From dc22f7c5b705e2a53bd10cc52fdb3f62d609ab6d Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Thu, 25 Sep 2025 15:11:41 +0800 Subject: [PATCH 01/18] sync efficientnet_b4 test --- tests/executables/_utils/get_num_devices.sh | 12 + .../_utils/global_environment_variables.sh | 12 + .../_utils/init_classification_torch.sh | 14 + tests/executables/_utils/install_pip_pkgs.sh | 18 ++ tests/executables/efficientnet/init_torch.sh | 7 + ...ain_efficientb4_imagenet_amp_dist_torch.sh | 51 +++ tests/prepare_dataset.sh | 97 ++++++ tests/prepare_python_environment.sh | 37 +++ tests/prepare_system_environment.sh | 86 +++++ tests/quick_build_environment.sh | 26 ++ tests/tools/dltest/README.md | 121 +++++++ tests/tools/dltest/dltest/__init__.py | 1 + tests/tools/dltest/dltest/cli/__init__.py | 0 tests/tools/dltest/dltest/cli/assert_cli.py | 209 ++++++++++++ tests/tools/dltest/dltest/cli/check_cli.py | 56 ++++ tests/tools/dltest/dltest/cli/entry_points.py | 35 ++ .../tools/dltest/dltest/cli/fetch_log_cli.py | 48 +++ .../dltest/dltest/cli/log_comparator_cli.py | 69 ++++ .../tools/dltest/dltest/cli/log_parser_cli.py | 35 ++ .../dltest/dltest/cli/model_validator_cli.py | 153 +++++++++ tests/tools/dltest/dltest/log_comparator.py | 101 ++++++ tests/tools/dltest/dltest/log_parser.py | 172 ++++++++++ .../dltest/dltest/model_compare_config.py | 306 ++++++++++++++++++ tests/tools/dltest/dltest/utils/__init__.py | 0 tests/tools/dltest/dltest/utils/base_cli.py | 44 +++ tests/tools/dltest/dltest/utils/get_env.py | 65 ++++ tests/tools/dltest/dltest/utils/misc.py | 41 +++ .../dltest/dltest/utils/real_tempfile.py | 64 ++++ .../dltest/dltest/utils/subprocess_tools.py | 84 +++++ .../dltest/dltest/utils/training_args.py | 87 +++++ tests/tools/dltest/setup.py | 27 ++ 31 files changed, 2078 insertions(+) create mode 100644 tests/executables/_utils/get_num_devices.sh create mode 100644 tests/executables/_utils/global_environment_variables.sh create mode 100644 tests/executables/_utils/init_classification_torch.sh create mode 100644 tests/executables/_utils/install_pip_pkgs.sh create mode 100644 tests/executables/efficientnet/init_torch.sh create mode 100644 tests/executables/efficientnet/train_efficientb4_imagenet_amp_dist_torch.sh create mode 100644 tests/prepare_dataset.sh create mode 100644 tests/prepare_python_environment.sh create mode 100644 tests/prepare_system_environment.sh create mode 100644 tests/quick_build_environment.sh create mode 100644 tests/tools/dltest/README.md create mode 100644 tests/tools/dltest/dltest/__init__.py create mode 100644 tests/tools/dltest/dltest/cli/__init__.py create mode 100644 tests/tools/dltest/dltest/cli/assert_cli.py create mode 100644 tests/tools/dltest/dltest/cli/check_cli.py create mode 100644 tests/tools/dltest/dltest/cli/entry_points.py create mode 100644 tests/tools/dltest/dltest/cli/fetch_log_cli.py create mode 100644 tests/tools/dltest/dltest/cli/log_comparator_cli.py create mode 100644 tests/tools/dltest/dltest/cli/log_parser_cli.py create mode 100644 tests/tools/dltest/dltest/cli/model_validator_cli.py create mode 100644 tests/tools/dltest/dltest/log_comparator.py create mode 100644 tests/tools/dltest/dltest/log_parser.py create mode 100644 tests/tools/dltest/dltest/model_compare_config.py create mode 100644 tests/tools/dltest/dltest/utils/__init__.py create mode 100644 tests/tools/dltest/dltest/utils/base_cli.py create mode 100644 tests/tools/dltest/dltest/utils/get_env.py create mode 100644 tests/tools/dltest/dltest/utils/misc.py create mode 100644 tests/tools/dltest/dltest/utils/real_tempfile.py create mode 100644 tests/tools/dltest/dltest/utils/subprocess_tools.py create mode 100644 tests/tools/dltest/dltest/utils/training_args.py create mode 100644 tests/tools/dltest/setup.py diff --git a/tests/executables/_utils/get_num_devices.sh b/tests/executables/_utils/get_num_devices.sh new file mode 100644 index 000000000..f0701ce8d --- /dev/null +++ b/tests/executables/_utils/get_num_devices.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +devices=$CUDA_VISIBLE_DEVICES +if [ -n "$devices" ]; then + _devices=(${devices//,/ }) + num_devices=${#_devices[@]} +else + num_devices=2 + export CUDA_VISIBLE_DEVICES=0,1 + echo "Not found CUDA_VISIBLE_DEVICES, set nproc_per_node = ${num_devices}" +fi +export IX_NUM_CUDA_VISIBLE_DEVICES=${num_devices} diff --git a/tests/executables/_utils/global_environment_variables.sh b/tests/executables/_utils/global_environment_variables.sh new file mode 100644 index 000000000..4be552ede --- /dev/null +++ b/tests/executables/_utils/global_environment_variables.sh @@ -0,0 +1,12 @@ +export PROJECT_DIR=../../ +export DRT_MEMCPYUSEKERNEL=20000000000 +: ${RUN_MODE:="strict"} +: ${NONSTRICT_EPOCH:=5} + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} diff --git a/tests/executables/_utils/init_classification_torch.sh b/tests/executables/_utils/init_classification_torch.sh new file mode 100644 index 000000000..fce2ca6a9 --- /dev/null +++ b/tests/executables/_utils/init_classification_torch.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -n "$1" ]; then + _UTILS_DIR=$1 +else + _UTILS_DIR='../_utils' +fi + +# Install packages +. $_UTILS_DIR/install_pip_pkgs.sh + +pkgs=('scipy' 'easydict' 'tqdm') + +install_pip_pkgs "${pkgs[@]}" \ No newline at end of file diff --git a/tests/executables/_utils/install_pip_pkgs.sh b/tests/executables/_utils/install_pip_pkgs.sh new file mode 100644 index 000000000..e49841657 --- /dev/null +++ b/tests/executables/_utils/install_pip_pkgs.sh @@ -0,0 +1,18 @@ +#!/bin/bash +PIPCMD=pip3 +: ${PKGS_CACHE_DIR:="__null__"} + +function install_pip_pkgs() { + for pkg in "$@" + do + if [ ! -d $PKGS_CACHE_DIR ]; then + $PIPCMD install $pkg + else + $PIPCMD install --no-index --find-links=$PKGS_CACHE_DIR $pkg + fi + done +} + +# Exeample +# pkgs=(1 2 3) +# install_pip_pkgs "${pkgs[@]}" \ No newline at end of file diff --git a/tests/executables/efficientnet/init_torch.sh b/tests/executables/efficientnet/init_torch.sh new file mode 100644 index 000000000..16ec233f5 --- /dev/null +++ b/tests/executables/efficientnet/init_torch.sh @@ -0,0 +1,7 @@ +bash ../_utils/init_classification_torch.sh ../_utils + +if [ "$?" != "0" ]; then + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/tests/executables/efficientnet/train_efficientb4_imagenet_amp_dist_torch.sh b/tests/executables/efficientnet/train_efficientb4_imagenet_amp_dist_torch.sh new file mode 100644 index 000000000..382e3ec5f --- /dev/null +++ b/tests/executables/efficientnet/train_efficientb4_imagenet_amp_dist_torch.sh @@ -0,0 +1,51 @@ +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=128} + +IMAGENET_PATH="`pwd`/../../data/datasets/imagenet" + +OUTPUT_PATH="`pwd`/work_dir/efficient_b4" +mkdir -p ${OUTPUT_PATH} + +source ../_utils/get_num_devices.sh + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd ../../../cv/classification/efficientnet_b4/pytorch + +export PYTHONPATH=./:$PYTHONPATH + +: "${HOST_MASTER_ADDR:="127.0.0.1"}" +: "${HOST_MASTER_PORT:=20060}" +: "${HOST_NNODES:=1}" +: "${HOST_NODE_RANK:=0}" + +extra_params="--epoch ${NONSTRICT_EPOCH}" +if [ "${RUN_MODE}" == "strict" ]; then + extra_params="--acc-thresh 75.0" +fi + +python3 -m torch.distributed.launch --master_addr ${HOST_MASTER_ADDR} \ +--master_port ${HOST_MASTER_PORT} \ +--nproc_per_node=$IX_NUM_CUDA_VISIBLE_DEVICES \ +--nnodes ${HOST_NNODES} \ +--node_rank ${HOST_NODE_RANK} \ +--use_env \ +train.py \ +--model efficientnet_b4 \ +--data-path ${IMAGENET_PATH} \ +--batch-size ${BATCH_SIZE} \ +--acc-thresh 75.0 \ +--amp \ +--output-dir ${OUTPUT_PATH} ${extra_params} \ +"$@";check_status + + +exit ${EXIT_STATUS} + diff --git a/tests/prepare_dataset.sh b/tests/prepare_dataset.sh new file mode 100644 index 000000000..4826bce2b --- /dev/null +++ b/tests/prepare_dataset.sh @@ -0,0 +1,97 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +PROJ_DIR=$(cd `dirname $0`; pwd) +DATASET_DIR="${PROJ_DIR}/data/datasets" +MODEL_ZOO_DIR="${PROJ_DIR}/data/model_zoo" + + +# Unarchive datas +if [ -f "datasets.tgz" ]; then + tar -zxf "datasets.tgz" +fi + +# Prepare coco +cd ${DATASET_DIR}/coco2017 +if [[ -f "annotations_trainval2017.zip" ]]; then + echo "Unarchive annotations_trainval2017.zip" + unzip -q annotations_trainval2017.zip +fi +if [[ -f "train2017.zip" ]]; then + if [[ ! -d "${DATASET_DIR}/coco2017/train2017" ]]; then + echo "Unarchive train2017.zip" + unzip -q train2017.zip + fi +fi +if [[ -f "val2017.zip" ]]; then + if [[ ! -d "${DATASET_DIR}/coco2017/val2017" ]]; then + echo "Unarchive val2017.zip" + unzip -q val2017.zip + fi +fi +if [[ -f "val2017_mini.zip" ]]; then + if [[ ! -d "${DATASET_DIR}/coco2017/val2017_mini" ]]; then + echo "Unarchive val2017_mini.zip" + unzip -q val2017_mini.zip + fi +fi + +cd ${DATASET_DIR} +# TGZS=`find . -iname 'CamVid*.tgz' -or -iname '*VOC*.tgz' -or -iname 'imagenet*.tgz' -or -iname 'coco*.tgz'` +TGZS=$(ls -al | grep -oE 'CamVid[^ ]*.tgz|[^ ]*VOC[^ ]*.tgz|imagenet[^ ]*.tgz|coco[^ ]*.tgz') +for path in $TGZS; do + data_name=`echo "${path}" | cut -f2 -d'/' | cut -f1 -d'.'` + if [[ -d "${data_name}" ]]; then + echo "Skip ${path}" + continue + fi + + echo "Unarchive ${path}" + cd ${path%/*} + if [ -w "${path##*/}" ]; then + echo "该文件有写入权限。" + else + echo "该文件没有写入权限。" + continue + fi + tar zxf ${path##*/} + cd ${DATASET_DIR} +done + + +# Prepare pretrained data +cd ${DATASET_DIR}/bert_mini +if [[ ! -d "${DATASET_DIR}/bert_mini/2048_shards_uncompressed" ]]; then + echo "Unarchive 2048_shards_uncompressed_mini" + tar -xzf 2048_shards_uncompressed_mini.tar.gz +fi +if [[ ! -d "${DATASET_DIR}/bert_mini/eval_set_uncompressed" ]]; then + echo "Unarchive eval_set_uncompressed.tar.gz" + tar -xzf eval_set_uncompressed.tar.gz +fi +cd ../../../ + + +# Prepare model's checkpoint +if [ ! -d "${HOME}/.cache/torch/hub/checkpoints/" ]; then + echo "Create checkpoints dir" + mkdir -p ${HOME}/.cache/torch/hub/checkpoints/ +fi + + +if [ -d "${MODEL_ZOO_DIR}" ]; then + cd ${MODEL_ZOO_DIR} + checkpoints=`find . -name '*.pth' -or -name '*.pt'` + for cpt in $checkpoints; do + if [[ ! -f "${HOME}/.cache/torch/hub/checkpoints/${cpt}" ]]; then + echo "Copy $cpt to ${HOME}/.cache/torch/hub/checkpoints/" + cp $cpt ${HOME}/.cache/torch/hub/checkpoints/ + fi + done +fi diff --git a/tests/prepare_python_environment.sh b/tests/prepare_python_environment.sh new file mode 100644 index 000000000..0d7e86995 --- /dev/null +++ b/tests/prepare_python_environment.sh @@ -0,0 +1,37 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +PIPCMD=pip3 + +PROJ_DIR=$(cd `dirname $0`; pwd) +PACKAGES_DIR="${PROJ_DIR}/data/packages" + +if [ -d "${PACKAGES_DIR}" ]; then + #$PIPCMD install --no-index --find-links=./packages numpy==1.19.5 + $PIPCMD install --no-index --find-links=${PACKAGES_DIR} scikit-build +else + $PIPCMD install scikit-build + #$PIPCMD install numpy==1.19.5 +fi + + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +cd "${PROJ_DIR}/tools/dltest" +# $prefix_sudo python3 setup.py install +$prefix_sudo $PIPCMD install . \ No newline at end of file diff --git a/tests/prepare_system_environment.sh b/tests/prepare_system_environment.sh new file mode 100644 index 000000000..7421376ad --- /dev/null +++ b/tests/prepare_system_environment.sh @@ -0,0 +1,86 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +PIPCMD=pip3 + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +## Install packages +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + $prefix_sudo apt update + sudo_path='command -v sudo' + if [ -z "${sudo_path}" ]; then + echo "Install sudo" + $prefix_sudo apt install -y sudo + fi + cmake_path=`command -v cmake` + if [ -z "${cmake_path}" ]; then + echo "Install cmake" + $prefix_sudo apt install -y cmake + fi + unzip_path=`command -v unzip` + if [ -z "${unzip_path}" ]; then + echo "Install unzip" + $prefix_sudo apt install -y unzip + fi + $prefix_sudo apt -y install libgl1-mesa-glx + pyver=`python3 -c 'import sys; print(sys.version_info[:][0])'` + pysubver=`python3 -c 'import sys; print(sys.version_info[:][1])'` + $prefix_sudo apt -y install python${pyver}.${pysubver}-dev +elif [[ ${ID} == "centos" ]]; then + sudo_path='command -v sudo' + if [ -z "${sudo_path}" ]; then + echo "Install sudo" + $prefix_sudo yum install -y sudo + fi + cmake_path=`command -v cmake` + if [ -z "${cmake_path}" ]; then + echo "Install cmake" + $prefix_sudo yum install -y cmake + fi + unzip_path=`command -v unzip` + if [ -z "${unzip_path}" ]; then + echo "Install unzip" + $prefix_sudo yum install -y unzip + fi + $prefix_sudo yum -y install mesa-libGL +else + sudo_path='command -v sudo' + if [ -z "${sudo_path}" ]; then + echo "Install sudo" + $prefix_sudo yum install -y sudo + fi + cmake_path=`command -v cmake` + if [ -z "${cmake_path}" ]; then + echo "Install cmake" + $prefix_sudo yum install -y cmake + fi + unzip_path=`command -v unzip` + if [ -z "${unzip_path}" ]; then + echo "Install unzip" + $prefix_sudo yum install -y unzip + fi + $prefix_sudo yum -y install mesa-libGL +fi + +# Fix No module named 'urllib3.packages.six' +sys_name_str=`uname -a` +if [[ "${sys_name_str}" =~ "aarch64" ]]; then + pip3 install urllib3 requests --upgrade +fi diff --git a/tests/quick_build_environment.sh b/tests/quick_build_environment.sh new file mode 100644 index 000000000..ba2b6991a --- /dev/null +++ b/tests/quick_build_environment.sh @@ -0,0 +1,26 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +# check current directory +: "${PROJ_DIR:=$(cd `dirname $0`; pwd)}" +if [ ! -d "${PROJ_DIR}/executables" ]; then + echo "CurrentDirectory = ${PROJ_DIR}" + echo "ERROR: Current directory is not found executables directory, exit 1." + echo "Please set variable to deeplearningsamples, PROJ_DIR= bash quick_build_environment.sh" + exit 1 +fi + +cd ${PROJ_DIR} + +echo "Current directory: `pwd`" + +bash ./prepare_dataset.sh +bash ./prepare_system_environment.sh +bash ./prepare_python_environment.sh \ No newline at end of file diff --git a/tests/tools/dltest/README.md b/tests/tools/dltest/README.md new file mode 100644 index 000000000..0f89071b9 --- /dev/null +++ b/tests/tools/dltest/README.md @@ -0,0 +1,121 @@ +### 1. Install dltest tool + + python setup.py develop + +### 2. Usage + +#### 2.1 Fetch log + +Commmand: + +```shell +ixdltest-fetch args ${log_path} +``` + +Arguments: + +- p or patterns, The pattern of fetch log; +- pn or pattern_names, The name of pattern; +- use_re, Whether use regular expression; +- d or nearest_distance, default=10, The nearest distance of matched pattern; +- start_flag, The flag of start to record log; +- end_flag, The flag of stop to record log; +- split_pattern, The pattern is used to match line, If the line is matched, argument `split_sep` to split the line. +- split_sep, The seperator is used to split line; +- split_idx, The index of split line; +- saved, Save result to path; +- log, Log path. + +> Examples + +2.1.1. Fetch accuracy in the log of ResNet by match. + +```shell +ixdltest-fetch nv-train_resnet50_torch.sh.epoch_5.log -p "Acc@1" "Acc@5" + +# Output: +# {'results': [{'Acc@1': [9.682], 'Acc@5': [50.293]}, {'Acc@1': [19.541], 'Acc@5': [61.096]}, +# {'Acc@1': [21.35], 'Acc@5': [67.338]}, {'Acc@1': [21.197], 'Acc@5': [67.083]}, +# {'Acc@1': [24.586], 'Acc@5': [67.949]}]} +``` + +2.1.2. Fetch mAP in the log of YoloV5 by split. + +```shell +ixdltest-fetch nv-train_yolov5s_coco_torch.sh.epoch_5.log \ +-p "Average Precision \(AP\) @\[ IoU=0.50:0.95 \| area= all \| maxDets=100 \] =" \ +-pn "mAP" + +# Output: +# {'results': [{'mAP': [0.359]}, {'mAP': [0.359]}, {'mAP': [0.359]}]} +``` + + +#### 2.2. Compare logs + +```shell +ixdltest-compare --log1 ${log_path1} --log2 ${log_path2} args +``` + +Arguments: + +- log1, First log; +- log2, Second log; +- threshold, Threshold; +- only_last, Whether use the last result to compare; +- print_result, Whether print result; +- p or patterns, The pattern of fetch log; +- pn or pattern_names, The name of pattern; +- use_re, Whether use regular expression; +- d or nearest_distance, default=10, The nearest distance of matched pattern; +- start_flag, The flag of start to record log; +- end_flag, The flag of stop to record log; +- split_pattern, The pattern is used to match line, If the line is matched, argument `split_sep` to split the line. +- split_sep, The seperator is used to split line; +- split_idx, The index of split line; +- saved, Save result to path; +- log, Log path. + +> Examples + +2.2.1. Compare log + +```shell +ixdltest-compare \ +--log1 nv-train_resnet50_torch.sh.epoch_5.log \ +--log2 nv-train_resnet50_torch.sh.epoch_default.log -p "Acc@1" \ +--threshold 0.02 + +# Output: +# Fail +``` + +#### 2.3. Validate model + +```shell +ixdltest-validate args ${script} ${script_args} +``` + +Arguments: + +- l or compare_log, If None is given, a comparable log will be searched in `deeplearningsamples/runing_logs`; +- saved, Save result to path; +- with_exit_code, Add exit code for the result of compared; +- print_result, Whether print result; +- capture_output, optional values ['pipe', 'tempfile'], The method of capture output; +- run_script, A runable script with arguments. + + +> Examples + +2.3.1. Validate model + +```shell +ixdltest-validate bash train_shufflenetv2_x0_5_torch.sh --epochs 5 + +# Output: +# SUCCESS + +``` + + diff --git a/tests/tools/dltest/dltest/__init__.py b/tests/tools/dltest/dltest/__init__.py new file mode 100644 index 000000000..e6f4d6ab9 --- /dev/null +++ b/tests/tools/dltest/dltest/__init__.py @@ -0,0 +1 @@ +from .utils.training_args import show_training_arguments \ No newline at end of file diff --git a/tests/tools/dltest/dltest/cli/__init__.py b/tests/tools/dltest/dltest/cli/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/tools/dltest/dltest/cli/assert_cli.py b/tests/tools/dltest/dltest/cli/assert_cli.py new file mode 100644 index 000000000..27ae1b6bd --- /dev/null +++ b/tests/tools/dltest/dltest/cli/assert_cli.py @@ -0,0 +1,209 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +import os +from typing import List, Iterable, Optional + +from dltest.cli.log_parser_cli import LogParserCLI +from dltest.log_parser import LogParser +from dltest.model_compare_config import get_compare_config_with_full_path +from dltest.utils.misc import get_full_path +from dltest.utils.subprocess_tools import get_output +from dltest.model_compare_config import ComparatorConfig + + +FRAMEWORKS = list(ComparatorConfig.get_frameworks()) + +REMAINDER = '...' + +assertion_expr_factory = dict( + eq = "a == b", + ne = "a != b", + ge = "a >= b", + le = "a <= b", + gt = "a > b", + lt = "a < b", +) + + +class AssertCLI(LogParserCLI): + + def command_name(self): + return "assert" + + def predefine_args(self): + super(AssertCLI, self).predefine_args() + self.parser.add_argument('-b', '--assertion_second_value', type=float, default=None, + help='It is used in assertion expression.') + self.parser.add_argument('--print_result', action="store_true", default=False, + help='Whether print result') + self.parser.add_argument('--capture_output', type=str, default='pipe', choices=['pipe', 'tempfile'], + help='The method of capture output') + # FIXME: Using store_action to replase it + self.parser.add_argument('--only_last', type=int, default=0, + help='Whether use the last result to compare') + self.parser.add_argument('--expr', type=str, default="ge", + help=f"Assertion expression, option keys: {', '.join(assertion_expr_factory.keys())}" + + ", or a executable code, such as `a > b`, `a > 1`, ...") + self.parser.add_argument('--use_predefined_parser_rules', action="store_true", default=False, + help='Whether use predefined args of parser.') + self.parser.add_argument('--log', type=str, default=None, help="Log path") + self.parser.add_argument("--run_script", default=[], nargs=REMAINDER) + + def parse_args(self, *args, **kwargs): + args = super(AssertCLI, self).parse_args() + args.only_last = args.only_last > 0 + if len(args.run_script) == 0 and args.log is None: + raise ValueError("The one of `--run_script` or `--log` must be given.") + + if args.assertion_second_value is None: + if args.expr is None: + raise ValueError("The one of `--assertion_second_value` or `--expr` must be given.") + + if args.expr in assertion_expr_factory: + raise ValueError( + "The comparison operators depend on the argument `assertion_second_value`." + ) + + return args + + def create_parser(self, args): + if args.use_predefined_parser_rules: + script_path = self._get_script_path(args.run_script) + config = get_compare_config_with_full_path(script_path, to_dict=False) + + return LogParser( + patterns=config.patterns, pattern_names=config.pattern_names, + use_re=config.use_re, nearest_distance=config.nearest_distance, + start_line_pattern_flag=config.start_line_pattern_flag, + end_line_pattern_flag=config.end_line_pattern_flag, + split_pattern=config.split_pattern, + split_sep=config.split_sep, + split_idx=config.split_idx + ) + + return LogParser( + patterns=args.patterns, pattern_names=args.pattern_names, + use_re=args.use_re, nearest_distance=args.nearest_distance, + start_line_pattern_flag=args.start_flag, + end_line_pattern_flag=args.end_flag, + split_pattern=args.split_pattern, + split_sep=args.split_sep, + split_idx=args.split_idx + ) + + def run(self): + args = self.parse_args() + parser = self.create_parser(args) + + if args.print_result: + print(args) + + output = self.get_log(args) + parsed_logs = self.parser_log(parser, output, args) + self.check_logs(parsed_logs, args) + + def get_log(self, args): + if len(args.run_script) == 0: + try: + with open(args.log) as f: + return f.readlines() + except: + print(f"ERROR: Read log fail in {args.log}") + exit(1) + else: + return get_output(args.run_script, capture_output_method=args.capture_output) + + def parser_log(self, parser, output, args) -> List[float]: + results = parser.parse(output) + if args.only_last: + results = results[-1:] + + if len(results) == 0: + raise ValueError("The parsed results is empty, please check patterns.") + if isinstance(results[0], dict): + if len(results[0]) == 0: + raise ValueError("The parsed results is empty, please check patterns.") + key = list(results[0].keys())[0] + results = [result[key] for result in results] + + if isinstance(results[0], Iterable): + results = [result[0] for result in results] + + return results + + def check_logs(self, parsed_logs, args): + if args.print_result: + print("Parsed result:", parsed_logs) + + assertion_expr = assertion_expr_factory.get(args.expr, args.expr) + + assert_results = [] + b = args.assertion_second_value + for a in parsed_logs: + assert_results.append(eval(assertion_expr)) + + if args.print_result: + print("The result of assertion expression:", assert_results) + + if any(assert_results): + print("SUCCESS") + exit(0) + print("FAIL") + exit(1) + + def _get_script_path(self, run_script: List[str]): + # Find shell script by current run_script + def _find_real_shell_script(cmd: List[str]): + for i, field in enumerate(cmd): + if field.endswith('.sh') and self._get_framework(field) in FRAMEWORKS: + return field + + real_shell_script = _find_real_shell_script(run_script) + + # Find shell script by parent process + if real_shell_script is None: + ppid = os.getppid() + import psutil + pproc = psutil.Process(ppid) + pproc_cmd = pproc.cmdline() + real_shell_script = _find_real_shell_script(pproc_cmd) + + if real_shell_script is not None: + real_shell_script = self._get_script_abs_path(real_shell_script) + return real_shell_script + + raise RuntimeError("The script is not named correctly, " + \ + "please use a script name ending with the framework, " + \ + f"got `{' '.join(run_script)}`, " + \ + "e.g. train_resnet50_torch.sh") + + def _get_framework(self, shell_script: str) -> Optional[str]: + try: + return shell_script.split('.')[-2].split('_')[-1] + except: + return None + + def _get_script_abs_path(self, run_script): + real_run_script = os.path.realpath(run_script) + if os.path.exists(real_run_script): + return real_run_script + + if "MODEL_DIR" in os.environ: + return os.path.join(os.environ["MODEL_DIR"], run_script) + + if "OLDPWD" in os.environ: + real_run_script = os.path.join(os.environ["OLDPWD"], run_script) + if os.path.exists(real_run_script): + return real_run_script + + raise FileNotFoundError("Not found running script path, " + \ + "please set environment variable `MODEL_DIR`, " + \ + "e.g /path/to/deeplearningsamples/executables/resnet.") + diff --git a/tests/tools/dltest/dltest/cli/check_cli.py b/tests/tools/dltest/dltest/cli/check_cli.py new file mode 100644 index 000000000..f2fc9f9ef --- /dev/null +++ b/tests/tools/dltest/dltest/cli/check_cli.py @@ -0,0 +1,56 @@ +import os + +from .assert_cli import AssertCLI +from ..utils.subprocess_tools import execute_shell + +RUN_MODE_KEY = "RUN_MODE" +RUN_MODE_STRICT = "strict" + + +class CheckCli(AssertCLI): + + def __init__(self, *args, **kwargs): + super(CheckCli, self).__init__(*args, **kwargs) + self.args = None + + def command_name(self): + return "check" + + def predefine_args(self): + self.parser.add_argument("--check_mode", type=str, default="no", + choices=["all", "strict", "nonstrict", "no"], + help="which running mode needs to be checked") + self.parser.add_argument("--nonstrict_mode_args", type=str, default="", + help="the arguments are used with nonstric testing") + super(CheckCli, self).predefine_args() + + def parse_args(self, *args, **kwargs): + if self.args is None: + args = super(CheckCli, self).parse_args(*args, **kwargs) + args.use_predefined_parser_rules = True + args.nonstrict_mode_args = args.nonstrict_mode_args.split(" ") + + if not self.is_strict_testing(): + args.run_script.extend(args.nonstrict_mode_args) + + if args.check_mode == "all": + args.check_mode = self.current_running_mode() + + self.args = args + return self.args + + def run(self): + args = self.parse_args() + if args.check_mode == self.current_running_mode(): + return super(CheckCli, self).run() + else: + res = execute_shell(args.run_script) + exit(res.returncode) + + def current_running_mode(self): + return os.environ.get(RUN_MODE_KEY, RUN_MODE_STRICT) + + def is_strict_testing(self): + return self.current_running_mode() == RUN_MODE_STRICT + + diff --git a/tests/tools/dltest/dltest/cli/entry_points.py b/tests/tools/dltest/dltest/cli/entry_points.py new file mode 100644 index 000000000..32225676d --- /dev/null +++ b/tests/tools/dltest/dltest/cli/entry_points.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from dltest.cli.assert_cli import AssertCLI +from dltest.cli.log_comparator_cli import LogComparatorCLI +from dltest.cli.model_validator_cli import ModelValidatorCLI +from dltest.cli.fetch_log_cli import FetchLog +from dltest.cli.check_cli import CheckCli + + +log_comparator_cli = LogComparatorCLI() +model_validator_cli = ModelValidatorCLI() +fetch_log_cli = FetchLog() +assert_cli = AssertCLI() +check_cli = CheckCli() + + +def make_execute_path(): + preffix = "dltest.cli.entry_points" + clis = [] + for cli_var in globals(): + if cli_var.endswith('_cli'): + cmd_name = globals()[cli_var].command_name() + clis.append(f"ixdltest-{cmd_name}={preffix}:{cli_var}") + + return clis + + diff --git a/tests/tools/dltest/dltest/cli/fetch_log_cli.py b/tests/tools/dltest/dltest/cli/fetch_log_cli.py new file mode 100644 index 000000000..196b982f4 --- /dev/null +++ b/tests/tools/dltest/dltest/cli/fetch_log_cli.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import json +from typing import Mapping + +from dltest.log_parser import LogParser +from dltest.cli.log_parser_cli import LogParserCLI + + +class FetchLog(LogParserCLI): + + def command_name(self): + return "fetch" + + def predefine_args(self): + super(FetchLog, self).predefine_args() + self.parser.add_argument('log', type=str, help="Log path") + self.parser.add_argument('--saved', type=str, default=None, help='Save to path') + + def run(self): + args = self.parse_args() + parser = LogParser( + patterns=args.patterns, pattern_names=args.pattern_names, + use_re=args.use_re, nearest_distance=args.nearest_distance, + start_line_pattern_flag=args.start_flag, + end_line_pattern_flag=args.end_flag, + split_pattern=args.split_pattern, + split_sep=args.split_sep, + split_idx=args.split_idx + ) + + results = parser.parse(args.log) + if not isinstance(results, Mapping): + results = dict(results=results) + print(results) + + if args.saved is not None: + with open(args.saved, 'w') as f: + json.dump(results, f) + diff --git a/tests/tools/dltest/dltest/cli/log_comparator_cli.py b/tests/tools/dltest/dltest/cli/log_comparator_cli.py new file mode 100644 index 000000000..d5befc704 --- /dev/null +++ b/tests/tools/dltest/dltest/cli/log_comparator_cli.py @@ -0,0 +1,69 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import json +from pprint import pprint + +from dltest.cli.log_parser_cli import LogParserCLI +from dltest.log_comparator import compare_logs_with_paths, DEFAULT_NEAREST_MATCH_CHARS + + +class LogComparatorCLI(LogParserCLI): + + def command_name(self): + return "compare" + + def predefine_args(self): + super(LogComparatorCLI, self).predefine_args() + self.parser.add_argument('--log1', type=str, help="First log") + self.parser.add_argument('--log2', type=str, help="Second log") + self.parser.add_argument('--threshold', type=float, default=0.0001, help="Threshold") + self.parser.add_argument('--only_last', type=int, default=1, help='Whether use the last result to compare') + self.parser.add_argument('--saved', type=str, default=None, help='Save to path') + self.parser.add_argument('--print_result', action="store_true", default=False, help='Whether print result') + self.parser.add_argument('--allow_greater_than', action="store_true", default=False, help='Allow log1 greater than log2') + + def parse_args(self, *args, **kwargs): + args = super(LogComparatorCLI, self).parse_args(*args, **kwargs) + args.only_last = args.only_last >= 1 + + return args + + def run(self): + args = self.parse_args() + satisfied, results = compare_logs_with_paths( + log1=args.log1, log2=args.log2, + threshold=args.threshold, + patterns=args.patterns, pattern_names=args.pattern_names, + use_re=args.use_re, nearest_distance=args.nearest_distance, + start_line_pattern_flag=args.start_flag, + end_line_pattern_flag=args.end_flag, + only_last=args.only_last, + split_pattern=args.split_pattern, + split_sep=args.split_sep, + split_idx=args.split_idx, + allow_greater_than=True + ) + + if args.print_result: + pprint(results) + + if satisfied: + print("SUCCESS") + else: + print("FAIL") + + if args.saved is not None: + with open(args.saved, 'w') as f: + json.dump(results, f) + + + + diff --git a/tests/tools/dltest/dltest/cli/log_parser_cli.py b/tests/tools/dltest/dltest/cli/log_parser_cli.py new file mode 100644 index 000000000..936a67606 --- /dev/null +++ b/tests/tools/dltest/dltest/cli/log_parser_cli.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import json +from typing import Mapping + +from dltest.log_parser import LogParser, DEFAULT_NEAREST_MATCH_CHARS +from dltest.utils.base_cli import BaseCLI + + +class LogParserCLI(BaseCLI): + + def predefine_args(self): + self.parser.add_argument('-p', '--patterns', nargs="*", type=str, default=None, help='Fetched patterns') + self.parser.add_argument('-pn', '--pattern_names', nargs="*", type=str, default=None, help='The name of pattern') + self.parser.add_argument('--use_re', action="store_true", default=False, help='Whether use regular expression') + self.parser.add_argument('-d', '--nearest_distance', type=int, default=DEFAULT_NEAREST_MATCH_CHARS, help='The nearest distance of matched pattern') + self.parser.add_argument('--start_flag', type=str, default=None, help='The flag of start to record log') + self.parser.add_argument('--end_flag', type=str, default=None, help='The flag of stop to record log') + self.parser.add_argument('--split_pattern', type=str, default=None, help='The pattern is used to match line') + self.parser.add_argument('--split_sep', nargs="*", type=str, default=None, help='The seperator is used to split line') + self.parser.add_argument('--split_idx', nargs="*", type=int, default=None, help='The index of split line') + + def parse_args(self, *args, **kwargs): + args = super(LogParserCLI, self).parse_args(*args, **kwargs) + + return args + diff --git a/tests/tools/dltest/dltest/cli/model_validator_cli.py b/tests/tools/dltest/dltest/cli/model_validator_cli.py new file mode 100644 index 000000000..973142a03 --- /dev/null +++ b/tests/tools/dltest/dltest/cli/model_validator_cli.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import json +import os +import os.path as ospath +from pprint import pprint +from typing import List, Union + +from dltest.utils.base_cli import BaseCLI +from dltest.utils.get_env import get_gpu_type +from dltest.utils.misc import get_full_path +from dltest.model_compare_config import get_compare_config_with_full_path +from dltest.log_comparator import compare_logs_with_paths +from dltest.utils.subprocess_tools import get_output + + +REMAINDER = '...' + + +class ModelValidatorCLI(BaseCLI): + + def command_name(self): + return "validate" + + def predefine_args(self): + super(ModelValidatorCLI, self).predefine_args() + self.parser.add_argument('-l', '--compare_log', type=str, default=None, help="Compare log") + self.parser.add_argument('--saved', type=str, default=None, help='Save to path') + self.parser.add_argument('--with_exit_code', type=int, default=1, help="Add exit code for the result of compared") + self.parser.add_argument('--print_result', action="store_true", default=False, help='Whether print result') + self.parser.add_argument('--capture_output', type=str, default='pipe', choices=['pipe', 'tempfile'], help='The method of capture output') + self.parser.add_argument("run_script", nargs=REMAINDER) + + def parse_args(self, *args, **kwargs): + args = super(ModelValidatorCLI, self).parse_args() + if len(args.run_script) == 0: + print("ERROR: Invalid run_script") + exit(1) + + return args + + def run(self): + args = self.parse_args() + output = self._run_script(args.run_script, capture_output_method=args.capture_output) + self.compare_logs( + output, args.compare_log, args.run_script, + args.saved, args.with_exit_code, + args.print_result + ) + + def compare_logs(self, output: List, compare_log: str, + run_script: List[str], saved: str=None, + with_exit_code: int=1, print_result=False): + script_path = self._get_script_path(run_script) + script_path = get_full_path(script_path) + compare_args = get_compare_config_with_full_path(script_path) + + if compare_log is None: + epoch = self._get_epoch(run_script) + script_name = ospath.basename(script_path) + dist_tag = self._get_dist_tag(script_name) + compare_log = self._find_comparable_log(script_path, epoch, dist_tag) + + if not ospath.exists(compare_log): + print(f"ERROR: {compare_log} not exist. Or please use argument `l` to locate log.") + exit(1) + + compare_args['log1'] = output + compare_args['log2'] = compare_log + + satisfied, results = compare_logs_with_paths(**compare_args) + + if print_result: + pprint(results) + + if satisfied: + print("SUCCESS") + else: + print("FAIL") + + if saved is not None: + with open(saved, 'w') as f: + json.dump(results, f) + + if with_exit_code: + if satisfied: + exit(0) + else: + exit(1) + + def _run_script(self, command: List, capture_output_method: str='tempfile'): + return get_output(command, capture_output_method=capture_output_method) + + def _get_script_path(self, run_script: List[str]): + for i, field in enumerate(run_script): + if field.endswith('.py') or field.endswith('.sh'): + return field + + raise RuntimeError("Not found the name of script, " + + "only support python or `sh` script, but got {}.".format(run_script)) + + def _find_comparable_log(self, script_path: str, epoch: Union[str, int], dist_tag: str): + gpu_type = get_gpu_type().lower() + + # Get the platform of trained log + if gpu_type == "nv": + gpu_type = 'bi' + else: + gpu_type = 'nv' + + script_path = get_full_path(script_path) + project_dir = self._get_project_dir(script_path) + script_name = ospath.basename(script_path) + + log_path = f"{project_dir}/runing_logs/{gpu_type}/{gpu_type}-{script_name}.epoch_{epoch}{dist_tag}.log" + + return log_path + + + def _get_epoch(self, run_script: List[str]): + for i, field in enumerate(run_script): + if "--epoch" in field: + if "=" in field: + return field.split("=")[1] + else: + return run_script[i + 1] + + return 'default' + + def _get_dist_tag(self, script_name: str): + try: + import torch + num_gpus = torch.cuda.device_count() + except: + num_gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "all") + + if '_dist_' in script_name or '_multigpu_' in script_name: + return f".{num_gpus}card" + return "" + + def _get_project_dir(self, abs_path): + abs_path = ospath.abspath(abs_path) + script_dir = ospath.dirname(abs_path) + executables_dir = ospath.dirname(script_dir) + return ospath.dirname(executables_dir) diff --git a/tests/tools/dltest/dltest/log_comparator.py b/tests/tools/dltest/dltest/log_comparator.py new file mode 100644 index 000000000..0da94100e --- /dev/null +++ b/tests/tools/dltest/dltest/log_comparator.py @@ -0,0 +1,101 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from typing import List, Mapping, Union, Tuple +from .log_parser import LogParser, DEFAULT_NEAREST_MATCH_CHARS + +LogLines = List[Mapping] +CompareResult = Tuple[bool, Union[List, Mapping]] + + +def _compute_errors(value1: Mapping, value2: Mapping, threshold: Mapping, allow_greater_than=False) -> CompareResult: + if not isinstance(threshold, Mapping): + _thds = dict() + for key in value1.keys(): + _thds[key] = threshold + threshold = _thds + + result = dict() + satisfied = True + for key, _thd in threshold.items(): + v1, v2 = value1[key], value2[key] + origin_value_type = list + if not isinstance(v1, (tuple, list)): + origin_value_type = float + v1 = [v1] + v2 = [v2] + + real_errors = [] + for v1_i, v2_i in zip(v1, v2): + real_error = v1_i - v2_i + real_errors.append(real_error) + if satisfied and abs(real_error) > _thd: + if allow_greater_than and real_error > 0: + continue + satisfied = False + + if origin_value_type is float and len(real_errors) > 0: + real_errors = real_errors[0] + + result[key] = real_errors + + return satisfied, result + + +def compare_logs(log1: LogLines, log2: LogLines, threshold: Union[float, Mapping], allow_greater_than=False) -> CompareResult: + total_lines = len(log1[0]) + real_errors = [] + satisfied = True + for line_idx in range(total_lines): + _satisfied, _error = _compute_errors(log1[line_idx], log2[line_idx], threshold, allow_greater_than=allow_greater_than) + real_errors.append(_error) + if satisfied and not _satisfied: + satisfied = False + + return satisfied, real_errors + + +def compare_logs_by_last_result(log1: LogLines, log2: LogLines, threshold: Union[float, Mapping], allow_greater_than=False) -> CompareResult: + if len(log1) == 0 or len(log2) == 0: + return False, [] + return _compute_errors(log1[-1], log2[-1], threshold, allow_greater_than=allow_greater_than) + + +def compare_logs_with_paths(log1, log2, threshold: Union[float, Mapping], + patterns: List[str], + pattern_names: List[str] = None, + use_re: bool = False, + nearest_distance: int = DEFAULT_NEAREST_MATCH_CHARS, + start_line_pattern_flag: str = None, + end_line_pattern_flag: str = None, + only_last: bool=True, + split_pattern: Union[str, List] = None, + split_sep: List = None, + split_idx: List = None, + allow_greater_than: bool = False): + parser = LogParser( + patterns=patterns, pattern_names=pattern_names, + use_re=use_re, nearest_distance=nearest_distance, + start_line_pattern_flag=start_line_pattern_flag, + end_line_pattern_flag=end_line_pattern_flag, + split_pattern=split_pattern, + split_sep=split_sep, + split_idx=split_idx + ) + + log1 = parser.parse(log1) + log2 = parser.parse(log2) + + if only_last: + compare_result = compare_logs_by_last_result(log1, log2, threshold, allow_greater_than=allow_greater_than) + else: + compare_result = compare_logs(log1, log2, threshold, allow_greater_than=allow_greater_than) + + return compare_result[0], dict(log1=log1, log2=log2, errors=compare_result[-1]) diff --git a/tests/tools/dltest/dltest/log_parser.py b/tests/tools/dltest/dltest/log_parser.py new file mode 100644 index 000000000..e7a7a0db5 --- /dev/null +++ b/tests/tools/dltest/dltest/log_parser.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from typing import List, Optional, Union, Mapping +import re + + +DEFAULT_NEAREST_MATCH_CHARS = 10 + + +def postprocess_search_result(results: List[str]) -> List[float]: + if len(results) != 0: + results = list(map(float, results)) + return results + + +def extract_nearest_value_by_key_inline(content: str, key: str, + nearest_distance: int=DEFAULT_NEAREST_MATCH_CHARS) -> List[float]: + pattern = "%s[\s\S]{0,%d}?(\d+(?:\.\d+)?)" % (key, nearest_distance) + return extract_value_by_pattern_inline(content, pattern) + + +def extract_value_by_pattern_inline(content: str, pattern: str) -> List[float]: + results = re.findall(pattern, content) + return postprocess_search_result(results) + + +def extract_value(content: str, pattern: str, + inline=True, use_re=False, + nearest_distance: int=DEFAULT_NEAREST_MATCH_CHARS) -> List[float]: + if inline: + if use_re: + return extract_value_by_pattern_inline(content, pattern) + else: + return extract_nearest_value_by_key_inline(content, pattern, nearest_distance) + else: + raise NotImplementedError() + + +class LogParser: + + def __init__(self, + patterns: List[str]=None, + pattern_names: List[str]=None, + use_re: bool=False, + nearest_distance: int=DEFAULT_NEAREST_MATCH_CHARS, + start_line_pattern_flag: str=None, + end_line_pattern_flag: str=None, + split_pattern: Union[str, List]=None, + split_sep: List[str]=None, + split_idx: List[int]=None): + if patterns is None and split_sep is None: + raise ValueError("The one of argument `patterns` or `split_sep` must be given.") + + if pattern_names is not None: + if isinstance(patterns, (tuple, list)) and patterns is not None and len(patterns) != len(pattern_names): + raise ValueError("The length of `pattern_names` argument not equal to `patterns`.") + if isinstance(split_sep, (tuple, list)) and split_sep is not None and len(split_sep) != len(pattern_names): + raise ValueError("The length of `pattern_names` argument not equal to `split_sep`.") + + if split_sep is not None and (split_idx is None or not isinstance(split_idx, (int, tuple, list))): + raise ValueError("Invalid index to split text, got {}.".format(split_idx)) + + if split_sep is not None and split_pattern is None: + raise ValueError("Invalid pattern to split text, got {}.".format(split_pattern)) + + self.patterns = patterns + self.use_re = use_re + self.nearest_distance = nearest_distance + self.start_line_pattern_flag = start_line_pattern_flag + self.end_line_pattern_flag = end_line_pattern_flag + + if not isinstance(split_sep, (tuple, list)) and split_sep is not None: + split_sep = [split_sep] + + if not isinstance(split_idx, (tuple, list)): + split_idx = [split_idx] + + self.split_sep = split_sep + self.split_idx = split_idx + + if pattern_names is None: + if patterns is None: + pattern_names = split_idx + else: + pattern_names = patterns + self.pattern_names = pattern_names + + if not isinstance(split_pattern, (tuple, list)) and split_sep is not None: + split_pattern = [split_pattern] * len(split_sep) + self.split_pattern = split_pattern + + self.start_record = start_line_pattern_flag is None + + def parse(self, path_or_logs: Union[str, List]) -> List[dict]: + """ + : return: [{matric_name: value}, ...] + """ + + if isinstance(path_or_logs, str): + with open(path_or_logs, encoding="utf8") as log_file: + path_or_logs = log_file.readlines() + + ret = [] + for line in path_or_logs: + result = self.parse_inline(line) + if len(result) == 0: + continue + ret.append(result) + return ret + + def parse_inline(self, line) -> dict: + if not self.can_record(line): + return {} + + if self.split_sep is None: + return self._parse_inline_by_match(line) + return self._parse_inline_by_split(line) + + def _parse_inline_by_match(self, line: str): + ret = {} + for name, pattern in zip(self.pattern_names, self.patterns): + result = extract_value( + line, pattern, inline=True, use_re=self.use_re, + nearest_distance=self.nearest_distance + ) + if len(result) == 0: + continue + ret[name] = result + return ret + + def _parse_inline_by_split(self, line: str, to_type=float): + ret = {} + for name, sep, idx, pattern in zip(self.pattern_names, + self.split_sep, + self.split_idx, + self.split_pattern): + if not self.can_matched(line, pattern): + continue + if '\t' in sep: + segs = line.strip().split(sep) + else: + segs = line.strip().replace('\t', ' ').split(sep) + segs = list(filter(lambda kv: kv.strip() not in ["", " ", None], segs)) + if len(segs) <= idx: + continue + ret[name] = to_type(segs[idx]) + return ret + + def can_record(self, line: str): + if self.start_line_pattern_flag is None: + self.start_record = True + elif not self.start_record: + self.start_record = self.can_matched(line, self.start_line_pattern_flag) + + if self.start_record: + if self.end_line_pattern_flag is not None and self.can_matched(line, self.end_line_pattern_flag): + self.start_record = False + + return self.start_record + + def can_matched(self, content: str, pattern: str): + result = re.findall(pattern, content) + return len(result) != 0 + diff --git a/tests/tools/dltest/dltest/model_compare_config.py b/tests/tools/dltest/dltest/model_compare_config.py new file mode 100644 index 000000000..4b6d9a471 --- /dev/null +++ b/tests/tools/dltest/dltest/model_compare_config.py @@ -0,0 +1,306 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import os.path as ospath + +from typing import NamedTuple, Union, List, Mapping + +from dltest.log_parser import DEFAULT_NEAREST_MATCH_CHARS + + +class LogComparatorArgs(NamedTuple): + threshold: Union[float, Mapping] + patterns: List[str] = None + pattern_names: List[str] = None + use_re: bool = False + nearest_distance: int = DEFAULT_NEAREST_MATCH_CHARS + start_line_pattern_flag: str = None + end_line_pattern_flag: str = None + split_pattern: Union[str, List] = None + split_sep: List = None + split_idx: List = None + only_last: bool = True + allow_greater_than: bool = True + + def to_dict(self): + return self._asdict() + + +class ArgsModelsTuple(NamedTuple): + + args: LogComparatorArgs + models: List[str] + + +class BaseConfig: + + def __getitem__(self, item): + return self.__class__.__dict__[item] + + def __getattr__(self, item): + return self.__class__.__dict__[item] + + def __iter__(self): + for attr, value in self.__class__.__dict__.items(): + if isinstance(value, ArgsModelsTuple): + yield attr + + def iter_items(self): + for attr, value in self.__class__.__dict__.items(): + if isinstance(value, ArgsModelsTuple): + yield attr, value + + +class _TFComparatorConfig(BaseConfig): + + cnn_benchmarks = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=["Accuracy @ 1 =", "Accuracy @ 5 ="], + pattern_names=["Acc@1", "Acc@5"] + ), + models=["alexnet", "inceptionv3", "resnet50", "resnet101", "vgg16"] + ) + + dist_cnn_becnmarks = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + split_sep=[' ', ' '], + split_idx=[9, 10], + split_pattern="[\s\S]*?images/sec:[\s\S]*?jitter", + pattern_names=['Acc@1', 'Acc@5'] + ), + models=[ + "alexnet_dist", "inceptionv3_dist", "resnet50_dist", "resnet101_dist", "vgg16_dist" + ] + ) + + bert = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=["eval_accuracy ="], + pattern_names=["Accuracy"] + ), + models=["bert"] + ) + + ssd = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=["acc="], + pattern_names=["Acc@1"] + ), + models=["ssd"] + ) + + yolov3 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.8, + patterns=["mAP"] + ), + models=["yolov3"] + ) + + vnet = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=["background_dice", "anterior_dice", "posterior_dice"] + ), + models=["vnet"] + ) + + +class _TorchComparatorConfig(BaseConfig): + classification = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=8.0, patterns=['Acc@1', 'Acc@5'], + start_line_pattern_flag="Start training", + ), + models=[ + 'googlenet', 'inceptionv3', 'mobilenetv3', 'resnet', 'shufflenetv2', + 'vgg', 'resnet50_dali', 'resnext', 'densenet' + ] + ) + + detection = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.03, + patterns=[ + "Average Precision \(AP\) @\[ IoU=0.50:0.95 \| area= all \| maxDets=100 \] =" + ], + pattern_names=["mAP"], + start_line_pattern_flag="IoU metric: bbox", + end_line_pattern_flag="IoU metric: segm" + ), + models=[ + 'maskrcnn', 'retinanet', 'ssd' + ] + ) + + bert_cola = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=['mcc'] + ), + models=['bert_cola'] + ) + + bert_mrpc = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=['acc'] + ), + models=['bert_mrpc'] + ) + + bert_pretrain_apex = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=['eval_mlm_accaracy'] + ), + models=['bert_pretrain_apex'] + ) + + segmentation = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=8.0, + patterns=['mean IoU:'], + pattern_names=['mIoU'] + ), + models=[ + 'deeplabv3', 'fcn' + ] + ) + + t5 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=5.0, + split_pattern="eval_bleu[\s\S]*?=", + split_sep=["="], + split_idx=[1], + pattern_names=['EvalBleu'] + ), + models=['t5'] + ) + + yolov3 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=["mAP"] + ), + models=['yolov3'] + ) + + yolov5 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + patterns=[ + "Average Precision \(AP\) @\[ IoU=0.50:0.95 \| area= all \| maxDets=100 \] =" + ], + pattern_names=["mAP"], + ), + models=['yolov5'], + ) + + yolov5s_coco128 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + split_pattern="[\s]+?all[\s\S]*?[1-9]\d*[\s]+?[1-9]\d*", + split_sep=[" ", " "], + split_idx=[5, 6], + pattern_names=["AP50", "mAP"] + ), + models=['yolov5s_coco128'] + ) + + centernet_resnet18 = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + split_pattern="[\s]+?all[\s\S]*?[1-9]\d*[\s]+?[1-9]\d*", + split_sep=[" ", " "], + split_idx=[5, 6], + pattern_names=["AP50", "mAP"] + ), + models=['centernet_resnet18'] + ) + + fcos_resnet50_fpn = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.08, + split_pattern="[\s]+?all[\s\S]*?[1-9]\d*[\s]+?[1-9]\d*", + split_sep=[" ", " "], + split_idx=[5, 6], + pattern_names=["AP50", "mAP"] + ), + models=['fcos_resnet50_fpn'] + ) + + ocr_recognition = ArgsModelsTuple( + args=LogComparatorArgs( + threshold=0.5, patterns=["0_word_acc"], + ), + models=[ + "sar", "satrn" + ] + ) + + + +class ComparatorConfig: + + _configs = dict(tf=_TFComparatorConfig(), torch=_TorchComparatorConfig()) + + @classmethod + def get_frameworks(cls) -> List: + return list(cls._configs.keys()) + + @classmethod + def get(cls, tf_or_torch, name, default=None): + for model_kind, comb in cls._configs[tf_or_torch].iter_items(): + if name in comb.models: + return comb.args + if default is not None: + return default + raise KeyError("Not found config, but got {name} for {fw}".format(name=name, fw=tf_or_torch)) + + @classmethod + def find_config(cls, script_path: str) -> LogComparatorArgs: + tf_or_torch = script_path.split('.')[-2].split('_')[-1] + + # Find by the name of script + script_name = ospath.basename(script_path).rsplit('.', maxsplit=1)[0] + if script_name.startswith('train_'): + script_name = script_name.replace("train_", "", 1) + while script_name not in [None, "", "/", "\\"]: + try: + config = cls.get(tf_or_torch, script_name) + return config + except: + pass + script_name = script_name.rsplit('_', maxsplit=1) + if len(script_name) <= 1: + break + script_name = script_name[0] + + # Find by the name of model's dir + model_dir_name = ospath.basename(ospath.dirname(script_path)) + try: + config = cls.get(tf_or_torch, model_dir_name) + return config + except: + raise RuntimeError("Not found for", script_path) + + +def get_compare_config_with_full_path(script_path: str, to_dict=True): + config = ComparatorConfig.find_config(script_path) + if to_dict: + return config.to_dict() + return config + diff --git a/tests/tools/dltest/dltest/utils/__init__.py b/tests/tools/dltest/dltest/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/tools/dltest/dltest/utils/base_cli.py b/tests/tools/dltest/dltest/utils/base_cli.py new file mode 100644 index 000000000..faf71ef39 --- /dev/null +++ b/tests/tools/dltest/dltest/utils/base_cli.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from argparse import ArgumentParser +from abc import abstractmethod + + +class BaseCLI: + + def __init__(self, parser=None, *args, **kwargs): + if parser is None: + self.parser = ArgumentParser(description=self.description ,*args, **kwargs) + + def __call__(self): + self.run() + + @property + def description(self): + return None + + @abstractmethod + def command_name(self): + pass + + def predefine_args(self): + pass + + def parse_args(self, *args, **kwargs): + self.predefine_args() + return self.parser.parse_args(*args, **kwargs) + + @abstractmethod + def run(self): + pass + + + diff --git a/tests/tools/dltest/dltest/utils/get_env.py b/tests/tools/dltest/dltest/utils/get_env.py new file mode 100644 index 000000000..673a17990 --- /dev/null +++ b/tests/tools/dltest/dltest/utils/get_env.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. +import os +from collections import defaultdict +import os.path as osp +import subprocess +import sys + + +def get_envinfo(): + import torch + env_info = {} + env_info['sys.platform'] = sys.platform + env_info['Python'] = sys.version.replace('\n', '') + + cuda_available = torch.cuda.is_available() + env_info['CUDA available'] = cuda_available + if cuda_available: + from torch.utils.cpp_extension import CUDA_HOME + env_info['CUDA_HOME'] = CUDA_HOME + if CUDA_HOME is not None and osp.isdir(CUDA_HOME): + try: + nvcc = osp.join(CUDA_HOME, 'bin/nvcc') + nvcc = subprocess.check_output( + f'"{nvcc}" -V | tail -n1', shell=True) + nvcc = nvcc.decode('utf-8').strip() + except subprocess.SubprocessError: + nvcc = 'Not Available' + env_info['NVCC'] = nvcc + + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, devids in devices.items(): + env_info['GPU ' + ','.join(devids)] = name + + gcc = subprocess.check_output('gcc --version | head -n1', shell=True) + gcc = gcc.decode('utf-8').strip() + env_info['GCC'] = gcc + + env_info['PyTorch'] = torch.__version__ + + return env_info + + +def get_gpu_type(): + import torch + if "DEBUG_GPU_TYPE" in os.environ: + return os.environ["DEBUG_GPU_TYPE"] + + if not torch.cuda.is_available(): + return "BI" + dev_name = torch.cuda.get_device_name(0) + if 'IX BI' in dev_name or getattr(torch, "corex", False): + _type = "BI" + else: + _type = "NV" + + return _type diff --git a/tests/tools/dltest/dltest/utils/misc.py b/tests/tools/dltest/dltest/utils/misc.py new file mode 100644 index 000000000..accf259f7 --- /dev/null +++ b/tests/tools/dltest/dltest/utils/misc.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. +import copy +import os + + +def get_full_path(fname): + pwd = os.getcwd() + if fname.startswith('/'): + return fname + return os.path.join(pwd, fname) + + +def is_main_proc(rank): + return str(rank) in ["None", "-1", "0"] + + +def main_proc_print(*args, **kwargs): + if "RANK" in os.environ: + if is_main_proc(os.environ["RANK"]): + print(*args, **kwargs) + return + + if "LOCAL_RANK" in os.environ: + if is_main_proc(os.environ["LOCAL_RANK"]): + print(*args, **kwargs) + return + + print(*args, **kwargs) + + +def create_subproc_env(): + env = copy.copy(os.environ) + env["USE_DLTEST"] = "1" + return env \ No newline at end of file diff --git a/tests/tools/dltest/dltest/utils/real_tempfile.py b/tests/tools/dltest/dltest/utils/real_tempfile.py new file mode 100644 index 000000000..cace5d1e8 --- /dev/null +++ b/tests/tools/dltest/dltest/utils/real_tempfile.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import os +import os.path as ospath +from pathlib import Path +import tempfile + + +class TemporaryFile: + + def __init__(self, with_open=False, mode='r'): + self.name = None + self.with_open = with_open + self.mode = mode + + self.file = None + + def create(self): + self.name = tempfile.mktemp() + file_path = Path(self.name) + file_path.touch() + + def delete(self): + if self.name is not None and ospath.exists(self.name): + os.unlink(self.name) + + def read(self): + self._check_file_status() + return self.file.read() + + def readlines(self): + self._check_file_status() + return self.file.readlines() + + def _check_file_status(self): + if self.file is None: + raise RuntimeError("File is closed, please reopen it.") + + def __enter__(self): + self.create() + if self.with_open: + self.file = open(self.name, mode=self.mode) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.with_open: + self.file.close() + self.delete() + + + + + + + + diff --git a/tests/tools/dltest/dltest/utils/subprocess_tools.py b/tests/tools/dltest/dltest/utils/subprocess_tools.py new file mode 100644 index 000000000..2b3fe3e7f --- /dev/null +++ b/tests/tools/dltest/dltest/utils/subprocess_tools.py @@ -0,0 +1,84 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import subprocess +from typing import Callable, Union, List + +from dltest.utils.real_tempfile import TemporaryFile +from dltest.utils import misc + + +def get_output_with_pipe(command, shell=None, callback: Callable[[list], None]=None, *args, **kwargs): + if shell is None: + shell = True + + if shell and not isinstance(command, str): + command = " ".join(command) + + stream = subprocess.Popen( + command, shell=shell, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + *args, **kwargs + ) + outputs = [] + while 1: + exit_code = stream.poll() + if exit_code is None: + if stream.stdout.readable(): + outputs.append(stream.stdout.readline().decode("utf8").rstrip()) + if callback is not None: + callback(outputs[-1:]) + print(outputs[-1]) + else: + if stream.stdout.readable(): + lines = stream.stdout.readlines() + lines = [line.decode("utf8".rstrip()) for line in lines] + outputs.extend(lines) + if callback is not None: + callback(outputs[-1:]) + print('\n'.join(lines)) + break + + return outputs + + +def get_output_with_tempfile(command, *args, **kwargs): + if not isinstance(command, (list, tuple)): + command = [command] + stdout = None + with TemporaryFile(with_open=True) as file: + command.extend(['|', 'tee', file.name]) + command = " ".join(command) + + res = subprocess.run(command, stdout=stdout, stderr=subprocess.STDOUT, shell=True, *args, **kwargs) + output = file.readlines() + + return output + +def execute_shell(command, *args, **kwargs): + if "env" not in kwargs: + kwargs["env"] = misc.create_subproc_env() + + if not isinstance(command, (list, tuple)): + command = [command] + + command = " ".join(command) + res = subprocess.run(command, + shell=True, *args, **kwargs) + return res + +def get_output(command: List, capture_output_method: str = 'tempfile', *args, **kwargs): + if "env" not in kwargs: + kwargs["env"] = misc.create_subproc_env() + + if capture_output_method == "tempfile": + return get_output_with_tempfile(command, *args, **kwargs) + return get_output_with_pipe(command, *args, **kwargs) \ No newline at end of file diff --git a/tests/tools/dltest/dltest/utils/training_args.py b/tests/tools/dltest/dltest/utils/training_args.py new file mode 100644 index 000000000..f096c2462 --- /dev/null +++ b/tests/tools/dltest/dltest/utils/training_args.py @@ -0,0 +1,87 @@ +import os + +from typing import Union, List, Dict, Any, Mapping +from argparse import Namespace, ArgumentParser +import json + + +def _obj_to_dict(obj) -> Dict: + if isinstance(obj, Mapping): + return obj + + try: + from absl import flags + if isinstance(obj, flags.FlagValues): + return obj.flag_values_dict() + except: + pass + if isinstance(obj, Namespace): + return obj.__dict__ + elif isinstance(obj, List): + new_obj = dict() + for _o in obj: + _o_dict = _obj_to_dict(_o) + new_obj.update(_o_dict) + return new_obj + elif not isinstance(obj, Dict): + if hasattr(obj, "__dict__"): + return obj.__dict__ + try: + typename = type(obj).__name__ + except: + typename = str(obj) + return {typename: str(obj)} + + +def json_dump_obj(o): + if hasattr(o, "__name__"): + return o.__name__ + return str(o) + + +def show_training_arguments(args: Union[List, Dict, Any]): + """ print running arguments + Example 1: For ArgumentParser + >>> parser = ArgumentParser("Test") + >>> parser.add_argument("--arg0", type=str) + >>> args = parser.parse_args() + >>> show_training_arguments(args) + + Example 2: For dict + >>> args = dict(arg=1) + >>> show_training_arguments(args) + + Example 3: For custom object + >>> from collections import namedtuple + >>> ArgsType = namedtuple("ArgsType", ["arg"]) + >>> args = ArgsType(arg=123) + >>> show_training_arguments(args) + + Example 4: For absl + >>> from absl import flags + >>> flags.DEFINE_string("arg", "123", "test") + >>> show_training_arguments(flags.FLAGS) + + Example 5: For multi args + >>> args1 = dict(a=1) + >>> args2 = dict(b=2) + >>> show_training_arguments([args1, args2]) + + """ + if not "SHOW_RUNNING_ARGS" in os.environ: + return + + if os.environ["SHOW_RUNNING_ARGS"].lower() in ["0", "f", "false"]: + return + + if "LOCAL_RANK" in os.environ: + if os.environ["LOCAL_RANK"] != "0": + return + args = _obj_to_dict(args) + args = json.dumps(args, default=json_dump_obj) + print("[RunningArguments]", args) + + +if __name__ == '__main__': + os.environ["SHOW_RUNNING_ARGS"] = "1" + show_training_arguments([dict(a=1), dict(b=1), object()]) \ No newline at end of file diff --git a/tests/tools/dltest/setup.py b/tests/tools/dltest/setup.py new file mode 100644 index 000000000..dc81c4daf --- /dev/null +++ b/tests/tools/dltest/setup.py @@ -0,0 +1,27 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from setuptools import setup, find_packages +from dltest.cli.entry_points import make_execute_path + +setup( + name="dltest", + version="0.1", + description='Iluvatar Corex AI Toolbox', + packages=find_packages(exclude=('examples')), + include_package_data=True, + zip_safe=False, + entry_points = { + 'console_scripts': make_execute_path(), + }, + install_requires=[ + 'psutil' + ] +) -- Gitee From 5a6bb123f9a80a54224b55543b9925f167a6e77c Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 10:49:44 +0800 Subject: [PATCH 02/18] sync fairmot --- tests/executables/fairmot/init_torch.sh | 14 +++++++++++ .../train_fairmot_hrnet32_dist_torch.sh | 23 +++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 tests/executables/fairmot/init_torch.sh create mode 100644 tests/executables/fairmot/train_fairmot_hrnet32_dist_torch.sh diff --git a/tests/executables/fairmot/init_torch.sh b/tests/executables/fairmot/init_torch.sh new file mode 100644 index 000000000..6ec2c63c7 --- /dev/null +++ b/tests/executables/fairmot/init_torch.sh @@ -0,0 +1,14 @@ +CURRENT_DIR=$(cd `dirname $0`; pwd) + +ROOT_DIR=${CURRENT_DIR}/../.. + +cd ${ROOT_DIR}/data/datasets +unzip -q MOT17.zip +mkdir MOT17/images && mkdir MOT17/labels_with_ids +mv ./MOT17/train ./MOT17/images/ && mv ./MOT17/test ./MOT17/images/ + +cd ${ROOT_DIR}/cv/multi_object_tracking/fairmot/pytorch/ +pip3 install Cython +pip3 install -r requirements.txt + +python3 src/gen_labels_17.py \ No newline at end of file diff --git a/tests/executables/fairmot/train_fairmot_hrnet32_dist_torch.sh b/tests/executables/fairmot/train_fairmot_hrnet32_dist_torch.sh new file mode 100644 index 000000000..748020898 --- /dev/null +++ b/tests/executables/fairmot/train_fairmot_hrnet32_dist_torch.sh @@ -0,0 +1,23 @@ + +source ../_utils/get_num_devices.sh + +CURRENT_DIR=$(cd `dirname $0`; pwd) +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=3} + +nonstrict_mode_args="" +if [ "${RUN_MODE}" != "strict" ]; then + nonstrict_mode_args="--num_epochs 1 --num_iters 300" +fi + +ROOT_DIR=${CURRENT_DIR}/../.. +DADASAT_PATH=${ROOT_DIR}/data/datasets/MOT17 + +cd ${ROOT_DIR}/cv/multi_object_tracking/fairmot/pytorch/ + +bash train_hrnet32_mot17.sh --batch_size $((IX_NUM_CUDA_VISIBLE_DEVICES*BATCH_SIZE)) \ + --lr 0.001 \ + --gpus $(seq -s "," 0 $(($IX_NUM_CUDA_VISIBLE_DEVICES-1))) ${nonstrict_mode_args} --target_loss 20 "$@";check_status + +exit ${EXIT_STATUS} -- Gitee From 8ab3678e5269db592fde5e24f98781c1b488ecdf Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 10:56:30 +0800 Subject: [PATCH 03/18] sync maskrcnn --- tests/executables/maskrcnn/init_torch.sh | 17 +++++++++++++ .../train_maskrcnn_resnet50_amp_torch.sh | 25 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 tests/executables/maskrcnn/init_torch.sh create mode 100644 tests/executables/maskrcnn/train_maskrcnn_resnet50_amp_torch.sh diff --git a/tests/executables/maskrcnn/init_torch.sh b/tests/executables/maskrcnn/init_torch.sh new file mode 100644 index 000000000..0e6c0becd --- /dev/null +++ b/tests/executables/maskrcnn/init_torch.sh @@ -0,0 +1,17 @@ +bash ../_utils/init_detection_torch.sh ../_utils + +CURRENT_MODEL_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_MODEL_DIR}/../../" +PROJECT_DATA="${PROJ_DIR}/data/datasets" + +if [[ ! -d "${PROJECT_DATA}/VOC2012_sample" ]]; then + tar zxf ${PROJECT_DATA}/VOC2012_sample.tgz -C ${PROJECT_DATA} +fi + +cd ${PROJ_DIR}/cv/detection/maskrcnn/pytorch/ +OSNAME=$(cat /proc/version) +# install the requirement +if [[ "${OSNAME}" == *"aarch64"* ]] +then + pip3 install -r requirements_aarch64.txt +fi \ No newline at end of file diff --git a/tests/executables/maskrcnn/train_maskrcnn_resnet50_amp_torch.sh b/tests/executables/maskrcnn/train_maskrcnn_resnet50_amp_torch.sh new file mode 100644 index 000000000..860d6a0a2 --- /dev/null +++ b/tests/executables/maskrcnn/train_maskrcnn_resnet50_amp_torch.sh @@ -0,0 +1,25 @@ +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=1} + +export PYTORCH_DISABLE_VEC_KERNEL=1 +export PT_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT=1 + +OUTPUT_DIR=${PROJECT_DIR}/output/maskrcnn/$0 +if [[ -d ${OUTPUT_DIR} ]]; then + mkdir -p ${OUTPUT_DIR} +fi + + +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 10 --run_script \ +python3 ${PROJECT_DIR}/cv/detection/maskrcnn/pytorch/train.py \ +--model maskrcnn_resnet50_fpn \ +--data-path ${PROJECT_DIR}/data/datasets/VOC2012_sample \ +--amp \ +--lr 0.001 \ +--batch-size ${BATCH_SIZE} \ +--output-dir ${OUTPUT_DIR} \ +"$@"; check_status + +rm -fr ${OUTPUT_DIR} +exit ${EXIT_STATUS} -- Gitee From 8434d476d2ae46d9a549f3776b3042a7d3ad0778 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:00:11 +0800 Subject: [PATCH 04/18] sync mobilenetv3 --- tests/executables/mobilenetv3/init_torch.sh | 15 +++++++++++ .../train_mobilenetv3_large_amp_torch.sh | 25 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 tests/executables/mobilenetv3/init_torch.sh create mode 100644 tests/executables/mobilenetv3/train_mobilenetv3_large_amp_torch.sh diff --git a/tests/executables/mobilenetv3/init_torch.sh b/tests/executables/mobilenetv3/init_torch.sh new file mode 100644 index 000000000..e8d2163e7 --- /dev/null +++ b/tests/executables/mobilenetv3/init_torch.sh @@ -0,0 +1,15 @@ +bash ../_utils/init_classification_torch.sh ../_utils + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +command -v yum >/dev/null && $prefix_sudo yum install -y numactl || $prefix_sudo apt install -y numactl + +pip3 install -r ../../cv/classification/mobilenetv3/pytorch/requirements.txt \ No newline at end of file diff --git a/tests/executables/mobilenetv3/train_mobilenetv3_large_amp_torch.sh b/tests/executables/mobilenetv3/train_mobilenetv3_large_amp_torch.sh new file mode 100644 index 000000000..13a46dc79 --- /dev/null +++ b/tests/executables/mobilenetv3/train_mobilenetv3_large_amp_torch.sh @@ -0,0 +1,25 @@ + +CURRENT_DIR=$(cd `dirname $0`; pwd) +source ../_utils/get_num_devices.sh +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=32} + +ROOT_DIR=${CURRENT_DIR}/../.. +DATA_DIR=${ROOT_DIR}/data/datasets/imagenette + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd $CURRENT_DIR/../../cv/classification/mobilenetv3/pytorch/ +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 10 --run_script \ +python3 train.py --model mobilenet_v3_large --data-path "${DATA_DIR}" \ + --epochs 600 --batch-size ${BATCH_SIZE} --opt sgd --lr 0.1 \ + --wd 0.00001 --lr-step-size 2 --lr-gamma 0.973 --auto-augment imagenet --random-erase 0.2 \ + --output-dir . --amp "$@"; check_status +exit ${EXIT_STATUS} -- Gitee From e3d50a1f305c5c2a726fdc87e32d3cc96a159e47 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:22:45 +0800 Subject: [PATCH 05/18] sync resnet torch --- .../resnet50/pytorch/_torchvision/__init__.py | 11 - .../_internally_replaced_utils.py | 66 --- .../pytorch/_torchvision/models/__init__.py | 9 - .../pytorch/_torchvision/models/resnet.py | 421 ------------- .../pytorch/_torchvision/ops/__init__.py | 12 - .../resnet50/pytorch/_torchvision/ops/misc.py | 171 ------ .../_torchvision/ops/stochastic_depth.py | 74 --- .../resnet50/pytorch/_torchvision/utils.py | 556 ------------------ cv/classification/resnet50/pytorch/utils.py | 160 ----- tests/executables/resnet/init_paddle.sh | 29 + tests/executables/resnet/init_tf.sh | 25 + tests/executables/resnet/init_torch.sh | 7 + .../resnet/train_resnet50_amp_torch.sh | 21 + .../resnet/train_resnet50_dist_paddle.sh | 19 + .../resnet/train_resnet50_dist_tf.sh | 23 + 15 files changed, 124 insertions(+), 1480 deletions(-) delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/__init__.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/_internally_replaced_utils.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/models/__init__.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/models/resnet.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/ops/__init__.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/ops/misc.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/ops/stochastic_depth.py delete mode 100644 cv/classification/resnet50/pytorch/_torchvision/utils.py delete mode 100644 cv/classification/resnet50/pytorch/utils.py create mode 100644 tests/executables/resnet/init_paddle.sh create mode 100644 tests/executables/resnet/init_tf.sh create mode 100644 tests/executables/resnet/init_torch.sh create mode 100644 tests/executables/resnet/train_resnet50_amp_torch.sh create mode 100644 tests/executables/resnet/train_resnet50_dist_paddle.sh create mode 100644 tests/executables/resnet/train_resnet50_dist_tf.sh diff --git a/cv/classification/resnet50/pytorch/_torchvision/__init__.py b/cv/classification/resnet50/pytorch/_torchvision/__init__.py deleted file mode 100644 index 725db7dae..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from . import models - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/cv/classification/resnet50/pytorch/_torchvision/_internally_replaced_utils.py b/cv/classification/resnet50/pytorch/_torchvision/_internally_replaced_utils.py deleted file mode 100644 index fce11d9b5..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/_internally_replaced_utils.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -import importlib.machinery -import os - -from torch.hub import _get_torch_home - - -_HOME = os.path.join(_get_torch_home(), "datasets", "vision") -_USE_SHARDED_DATASETS = False - - -def _download_file_from_remote_location(fpath: str, url: str) -> None: - pass - - -def _is_remote_location_available() -> bool: - return False - - -try: - from torch.hub import load_state_dict_from_url # noqa: 401 -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401 - - -def _get_extension_path(lib_name): - - lib_dir = os.path.dirname(__file__) - if os.name == "nt": - # Register the main torchvision library location on the default DLL path - import ctypes - import sys - - kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True) - with_load_library_flags = hasattr(kernel32, "AddDllDirectory") - prev_error_mode = kernel32.SetErrorMode(0x0001) - - if with_load_library_flags: - kernel32.AddDllDirectory.restype = ctypes.c_void_p - - if sys.version_info >= (3, 8): - os.add_dll_directory(lib_dir) - elif with_load_library_flags: - res = kernel32.AddDllDirectory(lib_dir) - if res is None: - err = ctypes.WinError(ctypes.get_last_error()) - err.strerror += f' Error adding "{lib_dir}" to the DLL directories.' - raise err - - kernel32.SetErrorMode(prev_error_mode) - - loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES) - - extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) - ext_specs = extfinder.find_spec(lib_name) - if ext_specs is None: - raise ImportError - - return ext_specs.origin diff --git a/cv/classification/resnet50/pytorch/_torchvision/models/__init__.py b/cv/classification/resnet50/pytorch/_torchvision/models/__init__.py deleted file mode 100644 index b40d8f4b4..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/models/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from .resnet import * diff --git a/cv/classification/resnet50/pytorch/_torchvision/models/resnet.py b/cv/classification/resnet50/pytorch/_torchvision/models/resnet.py deleted file mode 100644 index 2a39be3bc..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/models/resnet.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from typing import Type, Any, Callable, Union, List, Optional - -import torch -import torch.nn as nn -from torch import Tensor - -from .._internally_replaced_utils import load_state_dict_from_url -from ..utils import _log_api_usage_once - - -__all__ = [ - "ResNet", - "resnet18", - "resnet34", - "resnet50", - "resnet101", - "resnet152", - "resnext50_32x4d", - "resnext101_32x8d", - "wide_resnet50_2", - "wide_resnet101_2", -] - - -model_urls = { - "resnet18": "https://download.pytorch.org/models/resnet18-f37072fd.pth", - "resnet34": "https://download.pytorch.org/models/resnet34-b627a593.pth", - "resnet50": "https://download.pytorch.org/models/resnet50-0676ba61.pth", - "resnet101": "https://download.pytorch.org/models/resnet101-63fe2227.pth", - "resnet152": "https://download.pytorch.org/models/resnet152-394f9c45.pth", - "resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth", - "resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth", - "wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth", - "wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth", -} - - -def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: - """3x3 convolution with padding""" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation, - ) - - -def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion: int = 1 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None, - ) -> None: - super().__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError("BasicBlock only supports groups=1 and base_width=64") - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. - - expansion: int = 4 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None, - ) -> None: - super().__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.0)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - def __init__( - self, - block: Type[Union[BasicBlock, Bottleneck]], - layers: List[int], - num_classes: int = 1000, - zero_init_residual: bool = False, - groups: int = 1, - width_per_group: int = 64, - replace_stride_with_dilation: Optional[List[bool]] = None, - norm_layer: Optional[Callable[..., nn.Module]] = None, - ) -> None: - super().__init__() - _log_api_usage_once(self) - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError( - "replace_stride_with_dilation should be None " - f"or a 3-element tuple, got {replace_stride_with_dilation}" - ) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] - - def _make_layer( - self, - block: Type[Union[BasicBlock, Bottleneck]], - planes: int, - blocks: int, - stride: int = 1, - dilate: bool = False, - ) -> nn.Sequential: - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append( - block( - self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer - ) - ) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block( - self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation, - norm_layer=norm_layer, - ) - ) - - return nn.Sequential(*layers) - - def _forward_impl(self, x: Tensor) -> Tensor: - # See note [TorchScript super()] - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.fc(x) - - return x - - def forward(self, x: Tensor) -> Tensor: - return self._forward_impl(x) - - -def _resnet( - arch: str, - block: Type[Union[BasicBlock, Bottleneck]], - layers: List[int], - pretrained: bool, - progress: bool, - **kwargs: Any, -) -> ResNet: - model = ResNet(block, layers, **kwargs) - if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) - model.load_state_dict(state_dict) - return model - - -def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) - - -def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs) - - -def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) - - -def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) - - -def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) - - -def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs["groups"] = 32 - kwargs["width_per_group"] = 4 - return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) - - -def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs["groups"] = 32 - kwargs["width_per_group"] = 8 - return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) - - -def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" `_. - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs["width_per_group"] = 64 * 2 - return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) - - -def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" `_. - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs["width_per_group"] = 64 * 2 - return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) diff --git a/cv/classification/resnet50/pytorch/_torchvision/ops/__init__.py b/cv/classification/resnet50/pytorch/_torchvision/ops/__init__.py deleted file mode 100644 index 5fbaff88c..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/ops/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from .misc import FrozenBatchNorm2d, ConvNormActivation, SqueezeExcitation -from .stochastic_depth import stochastic_depth, StochasticDepth - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/cv/classification/resnet50/pytorch/_torchvision/ops/misc.py b/cv/classification/resnet50/pytorch/_torchvision/ops/misc.py deleted file mode 100644 index a2c5a37c9..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/ops/misc.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from typing import Callable, List, Optional - -import torch -from torch import Tensor - -from ..utils import _log_api_usage_once - - -interpolate = torch.nn.functional.interpolate - - -# This is not in nn -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed - - Args: - num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)`` - eps (float): a value added to the denominator for numerical stability. Default: 1e-5 - """ - - def __init__( - self, - num_features: int, - eps: float = 1e-5, - ): - super().__init__() - _log_api_usage_once(self) - self.eps = eps - self.register_buffer("weight", torch.ones(num_features)) - self.register_buffer("bias", torch.zeros(num_features)) - self.register_buffer("running_mean", torch.zeros(num_features)) - self.register_buffer("running_var", torch.ones(num_features)) - - def _load_from_state_dict( - self, - state_dict: dict, - prefix: str, - local_metadata: dict, - strict: bool, - missing_keys: List[str], - unexpected_keys: List[str], - error_msgs: List[str], - ): - num_batches_tracked_key = prefix + "num_batches_tracked" - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super()._load_from_state_dict( - state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ) - - def forward(self, x: Tensor) -> Tensor: - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - scale = w * (rv + self.eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})" - - -class ConvNormActivation(torch.nn.Sequential): - """ - Configurable block used for Convolution-Normalzation-Activation blocks. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block - kernel_size: (int, optional): Size of the convolving kernel. Default: 3 - stride (int, optional): Stride of the convolution. Default: 1 - padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation`` - groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 - norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolutiuon layer. If ``None`` this layer wont be used. Default: ``torch.nn.BatchNorm2d`` - activation_layer (Callable[..., torch.nn.Module], optinal): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU`` - dilation (int): Spacing between kernel elements. Default: 1 - inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True`` - bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``. - - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - kernel_size: int = 3, - stride: int = 1, - padding: Optional[int] = None, - groups: int = 1, - norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d, - activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU, - dilation: int = 1, - inplace: Optional[bool] = True, - bias: Optional[bool] = None, - ) -> None: - if padding is None: - padding = (kernel_size - 1) // 2 * dilation - if bias is None: - bias = norm_layer is None - layers = [ - torch.nn.Conv2d( - in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation=dilation, - groups=groups, - bias=bias, - ) - ] - if norm_layer is not None: - layers.append(norm_layer(out_channels)) - if activation_layer is not None: - params = {} if inplace is None else {"inplace": inplace} - layers.append(activation_layer(**params)) - super().__init__(*layers) - _log_api_usage_once(self) - self.out_channels = out_channels - - -class SqueezeExcitation(torch.nn.Module): - """ - This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1). - Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in in eq. 3. - - Args: - input_channels (int): Number of channels in the input image - squeeze_channels (int): Number of squeeze channels - activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU`` - scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid`` - """ - - def __init__( - self, - input_channels: int, - squeeze_channels: int, - activation: Callable[..., torch.nn.Module] = torch.nn.ReLU, - scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid, - ) -> None: - super().__init__() - _log_api_usage_once(self) - self.avgpool = torch.nn.AdaptiveAvgPool2d(1) - self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1) - self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1) - self.activation = activation() - self.scale_activation = scale_activation() - - def _scale(self, input: Tensor) -> Tensor: - scale = self.avgpool(input) - scale = self.fc1(scale) - scale = self.activation(scale) - scale = self.fc2(scale) - return self.scale_activation(scale) - - def forward(self, input: Tensor) -> Tensor: - scale = self._scale(input) - return scale * input diff --git a/cv/classification/resnet50/pytorch/_torchvision/ops/stochastic_depth.py b/cv/classification/resnet50/pytorch/_torchvision/ops/stochastic_depth.py deleted file mode 100644 index 167124cbf..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/ops/stochastic_depth.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -import torch -import torch.fx -from torch import nn, Tensor - -from ..utils import _log_api_usage_once - - -def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor: - """ - Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth" - `_ used for randomly dropping residual - branches of residual architectures. - - Args: - input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one - being its batch i.e. a batch with ``N`` rows. - p (float): probability of the input to be zeroed. - mode (str): ``"batch"`` or ``"row"``. - ``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes - randomly selected rows from the batch. - training: apply stochastic depth if is ``True``. Default: ``True`` - - Returns: - Tensor[N, ...]: The randomly zeroed tensor. - """ - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(stochastic_depth) - if p < 0.0 or p > 1.0: - raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") - if mode not in ["batch", "row"]: - raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}") - if not training or p == 0.0: - return input - - survival_rate = 1.0 - p - if mode == "row": - size = [input.shape[0]] + [1] * (input.ndim - 1) - else: - size = [1] * input.ndim - noise = torch.empty(size, dtype=input.dtype, device=input.device) - noise = noise.bernoulli_(survival_rate) - if survival_rate > 0.0: - noise.div_(survival_rate) - return input * noise - - -torch.fx.wrap("stochastic_depth") - - -class StochasticDepth(nn.Module): - """ - See :func:`stochastic_depth`. - """ - - def __init__(self, p: float, mode: str) -> None: - super().__init__() - _log_api_usage_once(self) - self.p = p - self.mode = mode - - def forward(self, input: Tensor) -> Tensor: - return stochastic_depth(input, self.p, self.mode, self.training) - - def __repr__(self) -> str: - s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})" - return s diff --git a/cv/classification/resnet50/pytorch/_torchvision/utils.py b/cv/classification/resnet50/pytorch/_torchvision/utils.py deleted file mode 100644 index 23bb8b54c..000000000 --- a/cv/classification/resnet50/pytorch/_torchvision/utils.py +++ /dev/null @@ -1,556 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -import math -import pathlib -import warnings -from types import FunctionType -from typing import Any, BinaryIO, List, Optional, Tuple, Union - -import numpy as np -import torch -from PIL import Image, ImageColor, ImageDraw, ImageFont - -__all__ = [ - "make_grid", - "save_image", - "draw_bounding_boxes", - "draw_segmentation_masks", - "draw_keypoints", - "flow_to_image", -] - - -@torch.no_grad() -def make_grid( - tensor: Union[torch.Tensor, List[torch.Tensor]], - nrow: int = 8, - padding: int = 2, - normalize: bool = False, - value_range: Optional[Tuple[int, int]] = None, - scale_each: bool = False, - pad_value: float = 0.0, - **kwargs, -) -> torch.Tensor: - """ - Make a grid of images. - - Args: - tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) - or a list of images all of the same size. - nrow (int, optional): Number of images displayed in each row of the grid. - The final grid size is ``(B / nrow, nrow)``. Default: ``8``. - padding (int, optional): amount of padding. Default: ``2``. - normalize (bool, optional): If True, shift the image to the range (0, 1), - by the min and max values specified by ``value_range``. Default: ``False``. - value_range (tuple, optional): tuple (min, max) where min and max are numbers, - then these numbers are used to normalize the image. By default, min and max - are computed from the tensor. - range (tuple. optional): - .. warning:: - This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``value_range`` - instead. - scale_each (bool, optional): If ``True``, scale each image in the batch of - images separately rather than the (min, max) over all images. Default: ``False``. - pad_value (float, optional): Value for the padded pixels. Default: ``0``. - - Returns: - grid (Tensor): the tensor containing grid of images. - """ - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(make_grid) - if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): - raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") - - if "range" in kwargs.keys(): - warnings.warn( - "The parameter 'range' is deprecated since 0.12 and will be removed in 0.14. " - "Please use 'value_range' instead." - ) - value_range = kwargs["range"] - - # if list of tensors, convert to a 4D mini-batch Tensor - if isinstance(tensor, list): - tensor = torch.stack(tensor, dim=0) - - if tensor.dim() == 2: # single image H x W - tensor = tensor.unsqueeze(0) - if tensor.dim() == 3: # single image - if tensor.size(0) == 1: # if single-channel, convert to 3-channel - tensor = torch.cat((tensor, tensor, tensor), 0) - tensor = tensor.unsqueeze(0) - - if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images - tensor = torch.cat((tensor, tensor, tensor), 1) - - if normalize is True: - tensor = tensor.clone() # avoid modifying tensor in-place - if value_range is not None: - assert isinstance( - value_range, tuple - ), "value_range has to be a tuple (min, max) if specified. min and max are numbers" - - def norm_ip(img, low, high): - img.clamp_(min=low, max=high) - img.sub_(low).div_(max(high - low, 1e-5)) - - def norm_range(t, value_range): - if value_range is not None: - norm_ip(t, value_range[0], value_range[1]) - else: - norm_ip(t, float(t.min()), float(t.max())) - - if scale_each is True: - for t in tensor: # loop over mini-batch dimension - norm_range(t, value_range) - else: - norm_range(tensor, value_range) - - assert isinstance(tensor, torch.Tensor) - if tensor.size(0) == 1: - return tensor.squeeze(0) - - # make the mini-batch of images into a grid - nmaps = tensor.size(0) - xmaps = min(nrow, nmaps) - ymaps = int(math.ceil(float(nmaps) / xmaps)) - height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) - num_channels = tensor.size(1) - grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value) - k = 0 - for y in range(ymaps): - for x in range(xmaps): - if k >= nmaps: - break - # Tensor.copy_() is a valid method but seems to be missing from the stubs - # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_ - grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined] - 2, x * width + padding, width - padding - ).copy_(tensor[k]) - k = k + 1 - return grid - - -@torch.no_grad() -def save_image( - tensor: Union[torch.Tensor, List[torch.Tensor]], - fp: Union[str, pathlib.Path, BinaryIO], - format: Optional[str] = None, - **kwargs, -) -> None: - """ - Save a given Tensor into an image file. - - Args: - tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, - saves the tensor as a grid of images by calling ``make_grid``. - fp (string or file object): A filename or a file object - format(Optional): If omitted, the format to use is determined from the filename extension. - If a file object was used instead of a filename, this parameter should always be used. - **kwargs: Other arguments are documented in ``make_grid``. - """ - - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(save_image) - grid = make_grid(tensor, **kwargs) - # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer - ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() - im = Image.fromarray(ndarr) - im.save(fp, format=format) - - -@torch.no_grad() -def draw_bounding_boxes( - image: torch.Tensor, - boxes: torch.Tensor, - labels: Optional[List[str]] = None, - colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, - fill: Optional[bool] = False, - width: int = 1, - font: Optional[str] = None, - font_size: int = 10, -) -> torch.Tensor: - - """ - Draws bounding boxes on given image. - The values of the input image should be uint8 between 0 and 255. - If fill is True, Resulting Tensor should be saved as PNG image. - - Args: - image (Tensor): Tensor of shape (C x H x W) and dtype uint8. - boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that - the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and - `0 <= ymin < ymax < H`. - labels (List[str]): List containing the labels of bounding boxes. - colors (color or list of colors, optional): List containing the colors - of the boxes or single color for all boxes. The color can be represented as - PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. - By default, random colors are generated for boxes. - fill (bool): If `True` fills the bounding box with specified color. - width (int): Width of bounding box. - font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may - also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`, - `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS. - font_size (int): The requested font size in points. - - Returns: - img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted. - """ - - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(draw_bounding_boxes) - if not isinstance(image, torch.Tensor): - raise TypeError(f"Tensor expected, got {type(image)}") - elif image.dtype != torch.uint8: - raise ValueError(f"Tensor uint8 expected, got {image.dtype}") - elif image.dim() != 3: - raise ValueError("Pass individual images, not batches") - elif image.size(0) not in {1, 3}: - raise ValueError("Only grayscale and RGB images are supported") - - num_boxes = boxes.shape[0] - - if labels is None: - labels: Union[List[str], List[None]] = [None] * num_boxes # type: ignore[no-redef] - elif len(labels) != num_boxes: - raise ValueError( - f"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box." - ) - - if colors is None: - colors = _generate_color_palette(num_boxes) - elif isinstance(colors, list): - if len(colors) < num_boxes: - raise ValueError(f"Number of colors ({len(colors)}) is less than number of boxes ({num_boxes}). ") - else: # colors specifies a single color for all boxes - colors = [colors] * num_boxes - - colors = [(ImageColor.getrgb(color) if isinstance(color, str) else color) for color in colors] - - # Handle Grayscale images - if image.size(0) == 1: - image = torch.tile(image, (3, 1, 1)) - - ndarr = image.permute(1, 2, 0).cpu().numpy() - img_to_draw = Image.fromarray(ndarr) - img_boxes = boxes.to(torch.int64).tolist() - - if fill: - draw = ImageDraw.Draw(img_to_draw, "RGBA") - else: - draw = ImageDraw.Draw(img_to_draw) - - txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size) - - for bbox, color, label in zip(img_boxes, colors, labels): # type: ignore[arg-type] - if fill: - fill_color = color + (100,) - draw.rectangle(bbox, width=width, outline=color, fill=fill_color) - else: - draw.rectangle(bbox, width=width, outline=color) - - if label is not None: - margin = width + 1 - draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font) - - return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8) - - -@torch.no_grad() -def draw_segmentation_masks( - image: torch.Tensor, - masks: torch.Tensor, - alpha: float = 0.8, - colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, -) -> torch.Tensor: - - """ - Draws segmentation masks on given RGB image. - The values of the input image should be uint8 between 0 and 255. - - Args: - image (Tensor): Tensor of shape (3, H, W) and dtype uint8. - masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool. - alpha (float): Float number between 0 and 1 denoting the transparency of the masks. - 0 means full transparency, 1 means no transparency. - colors (color or list of colors, optional): List containing the colors - of the masks or single color for all masks. The color can be represented as - PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. - By default, random colors are generated for each mask. - - Returns: - img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top. - """ - - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(draw_segmentation_masks) - if not isinstance(image, torch.Tensor): - raise TypeError(f"The image must be a tensor, got {type(image)}") - elif image.dtype != torch.uint8: - raise ValueError(f"The image dtype must be uint8, got {image.dtype}") - elif image.dim() != 3: - raise ValueError("Pass individual images, not batches") - elif image.size()[0] != 3: - raise ValueError("Pass an RGB image. Other Image formats are not supported") - if masks.ndim == 2: - masks = masks[None, :, :] - if masks.ndim != 3: - raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)") - if masks.dtype != torch.bool: - raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}") - if masks.shape[-2:] != image.shape[-2:]: - raise ValueError("The image and the masks must have the same height and width") - - num_masks = masks.size()[0] - if colors is not None and num_masks > len(colors): - raise ValueError(f"There are more masks ({num_masks}) than colors ({len(colors)})") - - if colors is None: - colors = _generate_color_palette(num_masks) - - if not isinstance(colors, list): - colors = [colors] - if not isinstance(colors[0], (tuple, str)): - raise ValueError("colors must be a tuple or a string, or a list thereof") - if isinstance(colors[0], tuple) and len(colors[0]) != 3: - raise ValueError("It seems that you passed a tuple of colors instead of a list of colors") - - out_dtype = torch.uint8 - - colors_ = [] - for color in colors: - if isinstance(color, str): - color = ImageColor.getrgb(color) - colors_.append(torch.tensor(color, dtype=out_dtype)) - - img_to_draw = image.detach().clone() - # TODO: There might be a way to vectorize this - for mask, color in zip(masks, colors_): - img_to_draw[:, mask] = color[:, None] - - out = image * (1 - alpha) + img_to_draw * alpha - return out.to(out_dtype) - - -@torch.no_grad() -def draw_keypoints( - image: torch.Tensor, - keypoints: torch.Tensor, - connectivity: Optional[List[Tuple[int, int]]] = None, - colors: Optional[Union[str, Tuple[int, int, int]]] = None, - radius: int = 2, - width: int = 3, -) -> torch.Tensor: - - """ - Draws Keypoints on given RGB image. - The values of the input image should be uint8 between 0 and 255. - - Args: - image (Tensor): Tensor of shape (3, H, W) and dtype uint8. - keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances, - in the format [x, y]. - connectivity (List[Tuple[int, int]]]): A List of tuple where, - each tuple contains pair of keypoints to be connected. - colors (str, Tuple): The color can be represented as - PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. - radius (int): Integer denoting radius of keypoint. - width (int): Integer denoting width of line connecting keypoints. - - Returns: - img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn. - """ - - if not torch.jit.is_scripting() and not torch.jit.is_tracing(): - _log_api_usage_once(draw_keypoints) - if not isinstance(image, torch.Tensor): - raise TypeError(f"The image must be a tensor, got {type(image)}") - elif image.dtype != torch.uint8: - raise ValueError(f"The image dtype must be uint8, got {image.dtype}") - elif image.dim() != 3: - raise ValueError("Pass individual images, not batches") - elif image.size()[0] != 3: - raise ValueError("Pass an RGB image. Other Image formats are not supported") - - if keypoints.ndim != 3: - raise ValueError("keypoints must be of shape (num_instances, K, 2)") - - ndarr = image.permute(1, 2, 0).cpu().numpy() - img_to_draw = Image.fromarray(ndarr) - draw = ImageDraw.Draw(img_to_draw) - img_kpts = keypoints.to(torch.int64).tolist() - - for kpt_id, kpt_inst in enumerate(img_kpts): - for inst_id, kpt in enumerate(kpt_inst): - x1 = kpt[0] - radius - x2 = kpt[0] + radius - y1 = kpt[1] - radius - y2 = kpt[1] + radius - draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0) - - if connectivity: - for connection in connectivity: - start_pt_x = kpt_inst[connection[0]][0] - start_pt_y = kpt_inst[connection[0]][1] - - end_pt_x = kpt_inst[connection[1]][0] - end_pt_y = kpt_inst[connection[1]][1] - - draw.line( - ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)), - width=width, - ) - - return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8) - - -# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization -@torch.no_grad() -def flow_to_image(flow: torch.Tensor) -> torch.Tensor: - - """ - Converts a flow to an RGB image. - - Args: - flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float. - - Returns: - img (Tensor): Image Tensor of dtype uint8 where each color corresponds - to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input. - """ - - if flow.dtype != torch.float: - raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.") - - orig_shape = flow.shape - if flow.ndim == 3: - flow = flow[None] # Add batch dim - - if flow.ndim != 4 or flow.shape[1] != 2: - raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.") - - max_norm = torch.sum(flow ** 2, dim=1).sqrt().max() - epsilon = torch.finfo((flow).dtype).eps - normalized_flow = flow / (max_norm + epsilon) - img = _normalized_flow_to_image(normalized_flow) - - if len(orig_shape) == 3: - img = img[0] # Remove batch dim - return img - - -@torch.no_grad() -def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor: - - """ - Converts a batch of normalized flow to an RGB image. - - Args: - normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W) - Returns: - img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8. - """ - - N, _, H, W = normalized_flow.shape - flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8) - colorwheel = _make_colorwheel() # shape [55x3] - num_cols = colorwheel.shape[0] - norm = torch.sum(normalized_flow ** 2, dim=1).sqrt() - a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi - fk = (a + 1) / 2 * (num_cols - 1) - k0 = torch.floor(fk).to(torch.long) - k1 = k0 + 1 - k1[k1 == num_cols] = 0 - f = fk - k0 - - for c in range(colorwheel.shape[1]): - tmp = colorwheel[:, c] - col0 = tmp[k0] / 255.0 - col1 = tmp[k1] / 255.0 - col = (1 - f) * col0 + f * col1 - col = 1 - norm * (1 - col) - flow_image[:, c, :, :] = torch.floor(255 * col) - return flow_image - - -def _make_colorwheel() -> torch.Tensor: - """ - Generates a color wheel for optical flow visualization as presented in: - Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) - URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf. - - Returns: - colorwheel (Tensor[55, 3]): Colorwheel Tensor. - """ - - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - colorwheel = torch.zeros((ncols, 3)) - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY) - col = col + RY - # YG - colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG) - colorwheel[col : col + YG, 1] = 255 - col = col + YG - # GC - colorwheel[col : col + GC, 1] = 255 - colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC) - col = col + GC - # CB - colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB) - colorwheel[col : col + CB, 2] = 255 - col = col + CB - # BM - colorwheel[col : col + BM, 2] = 255 - colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM) - col = col + BM - # MR - colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR) - colorwheel[col : col + MR, 0] = 255 - return colorwheel - - -def _generate_color_palette(num_objects: int): - palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) - return [tuple((i * palette) % 255) for i in range(num_objects)] - - -def _log_api_usage_once(obj: Any) -> None: - - """ - Logs API usage(module and name) within an organization. - In a large ecosystem, it's often useful to track the PyTorch and - TorchVision APIs usage. This API provides the similar functionality to the - logging module in the Python stdlib. It can be used for debugging purpose - to log which methods are used and by default it is inactive, unless the user - manually subscribes a logger via the `SetAPIUsageLogger method `_. - Please note it is triggered only once for the same API call within a process. - It does not collect any data from open-source users since it is no-op by default. - For more information, please refer to - * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging; - * Logging policy: https://github.com/pytorch/vision/issues/5052; - - Args: - obj (class instance or method): an object to extract info from. - """ - if not obj.__module__.startswith("torchvision"): - return - name = obj.__class__.__name__ - if isinstance(obj, FunctionType): - name = obj.__name__ - torch._C._log_api_usage_once(f"{obj.__module__}.{name}") diff --git a/cv/classification/resnet50/pytorch/utils.py b/cv/classification/resnet50/pytorch/utils.py deleted file mode 100644 index 08ff4fa53..000000000 --- a/cv/classification/resnet50/pytorch/utils.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. -# Copyright Declaration: This software, including all of its code and documentation, -# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX -# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright -# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar -# CoreX. No user of this software shall have any right, ownership or interest in this software and -# any use of this software shall be in compliance with the terms and conditions of the End User -# License Agreement. -from collections import defaultdict, deque, OrderedDict -import copy -import datetime -import hashlib -import time -import torch -import torch.distributed as dist - -import errno -import os - -from common_utils import * - - -def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - with torch.no_grad(): - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target[None]) - - res = [] - for k in topk: - correct_k = correct[:k].flatten().sum(dtype=torch.float32) - res.append(correct_k * (100.0 / batch_size)) - return res - - -def average_checkpoints(inputs): - """Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: - https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 - - Args: - inputs (List[str]): An iterable of string paths of checkpoints to load from. - Returns: - A dict of string keys mapping to various values. The 'model' key - from the returned dict should correspond to an OrderedDict mapping - string parameter names to torch Tensors. - """ - params_dict = OrderedDict() - params_keys = None - new_state = None - num_models = len(inputs) - for fpath in inputs: - with open(fpath, "rb") as f: - state = torch.load( - f, - map_location=( - lambda s, _: torch.serialization.default_restore_location(s, "cpu") - ), - ) - # Copies over the settings from the first checkpoint - if new_state is None: - new_state = state - model_params = state["model"] - model_params_keys = list(model_params.keys()) - if params_keys is None: - params_keys = model_params_keys - elif params_keys != model_params_keys: - raise KeyError( - "For checkpoint {}, expected list of params: {}, " - "but found: {}".format(f, params_keys, model_params_keys) - ) - for k in params_keys: - p = model_params[k] - if isinstance(p, torch.HalfTensor): - p = p.float() - if k not in params_dict: - params_dict[k] = p.clone() - # NOTE: clone() is needed in case of p is a shared parameter - else: - params_dict[k] += p - averaged_params = OrderedDict() - for k, v in params_dict.items(): - averaged_params[k] = v - if averaged_params[k].is_floating_point(): - averaged_params[k].div_(num_models) - else: - averaged_params[k] //= num_models - new_state["model"] = averaged_params - return new_state - - -def store_model_weights(model, checkpoint_path, checkpoint_key='model', strict=True): - """ - This method can be used to prepare weights files for new models. It receives as - input a model architecture and a checkpoint from the training script and produces - a file with the weights ready for release. - - Examples: - from torchvision import models as M - - # Classification - model = M.mobilenet_v3_large(pretrained=False) - print(store_model_weights(model, './class.pth')) - - # Quantized Classification - model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) - model.fuse_model() - model.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack') - _ = torch.quantization.prepare_qat(model, inplace=True) - print(store_model_weights(model, './qat.pth')) - - # Object Detection - model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) - print(store_model_weights(model, './obj.pth')) - - # Segmentation - model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) - print(store_model_weights(model, './segm.pth', strict=False)) - - Args: - model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. - checkpoint_path (str): The path of the checkpoint we will load. - checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. - Default: "model". - strict (bool): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` - - Returns: - output_path (str): The location where the weights are saved. - """ - # Store the new model next to the checkpoint_path - checkpoint_path = os.path.abspath(checkpoint_path) - output_dir = os.path.dirname(checkpoint_path) - - # Deep copy to avoid side-effects on the model object. - model = copy.deepcopy(model) - checkpoint = torch.load(checkpoint_path, map_location='cpu') - - # Load the weights to the model to validate that everything works - # and remove unnecessary weights (such as auxiliaries, etc) - model.load_state_dict(checkpoint[checkpoint_key], strict=strict) - - tmp_path = os.path.join(output_dir, str(model.__hash__())) - torch.save(model.state_dict(), tmp_path) - - sha256_hash = hashlib.sha256() - with open(tmp_path, "rb") as f: - # Read and update hash string value in blocks of 4K - for byte_block in iter(lambda: f.read(4096), b""): - sha256_hash.update(byte_block) - hh = sha256_hash.hexdigest() - - output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth") - os.replace(tmp_path, output_path) - - return output_path diff --git a/tests/executables/resnet/init_paddle.sh b/tests/executables/resnet/init_paddle.sh new file mode 100644 index 000000000..39528b7a4 --- /dev/null +++ b/tests/executables/resnet/init_paddle.sh @@ -0,0 +1,29 @@ +#!/bin/bash +bash ../_utils/init_classification_paddle.sh ../_utils + +export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python +pip3 install protobuf==3.20.3 +pip3 install pyyaml + +CUR_DIR=$(cd `dirname $0`; pwd) +PRJ_DIR=${CUR_DIR}/../.. +DATASET_DIR=${PRJ_DIR}/data/datasets + +if [ ! -d "${DATASET_DIR}/flowers102" ]; then + tar zxf ${DATASET_DIR}/flowers102.tgz -C ${DATASET_DIR} +fi + +RESNET_PADDLE_DIR=${PRJ_DIR}/official/cv/classification/resnet/paddle +cd ${RESNET_PADDLE_DIR} +pip3 install -r requirements.txt + +a=$(pip3 show paddlepaddle|awk '/Version:/ {print $NF}'); b=(${a//+/ }); c=(${b//./ }) +if [[ ${c[0]} -eq 2 && ${c[1]} -le 5 ]]; then + rm -rf ppcls && ln -s ppcls_2.5 ppcls + mkdir -p data/datasets + ln -s ${DATASET_DIR}/flowers102 ${RESNET_PADDLE_DIR}/data/datasets/flowers102 +else + rm -rf ppcls && ln -s ppcls_2.6 ppcls + mkdir -p dataset + ln -s ${DATASET_DIR}/flowers102 ${RESNET_PADDLE_DIR}/dataset/flowers102 +fi diff --git a/tests/executables/resnet/init_tf.sh b/tests/executables/resnet/init_tf.sh new file mode 100644 index 000000000..e03a185e2 --- /dev/null +++ b/tests/executables/resnet/init_tf.sh @@ -0,0 +1,25 @@ +bash ../_utils/init_tf_cnn_benckmark.sh ../_utils + +CURRENT_DIR=$(cd `dirname $0`; pwd) +ROOT_DIR=${CURRENT_DIR}/../.. +DATA_DIR=${ROOT_DIR}/data/packages + +# sys_name_str=`uname -a` +# if [[ "${sys_name_str}" =~ "aarch64" ]]; then +# pip3 install ${DATA_DIR}/addons/tensorflow_addons*.whl +# fi + +# pip3 install gin-config tensorflow_addons tensorflow_datasets tensorflow_model_optimization + +pip3 install gin-config tensorflow_datasets tensorflow_model_optimization + +pip3 uninstall -y protobuf +pip3 install "protobuf<4.0.0" + +python_version=$(python3 --version 2>&1 |awk '{print $2}'|awk -F '.' '{printf "%d.%d", $1,$2}') +if [ $python_version == 3.7 ]; then + pip3 install numpy==1.21.6 +else + pip3 install numpy==1.23.3 +fi + diff --git a/tests/executables/resnet/init_torch.sh b/tests/executables/resnet/init_torch.sh new file mode 100644 index 000000000..16ec233f5 --- /dev/null +++ b/tests/executables/resnet/init_torch.sh @@ -0,0 +1,7 @@ +bash ../_utils/init_classification_torch.sh ../_utils + +if [ "$?" != "0" ]; then + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/tests/executables/resnet/train_resnet50_amp_torch.sh b/tests/executables/resnet/train_resnet50_amp_torch.sh new file mode 100644 index 000000000..65132746b --- /dev/null +++ b/tests/executables/resnet/train_resnet50_amp_torch.sh @@ -0,0 +1,21 @@ + +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=256} + +OUTPUT_DIR=${PROJECT_DIR}/output/resnet/$0 +if [[ -d ${OUTPUT_DIR} ]]; then + mkdir -p ${OUTPUT_DIR} +fi + +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 8 --run_script \ +python3 ${PROJECT_DIR}/cv/classification/resnet50/pytorch/train.py \ +--data-path ${PROJECT_DIR}/data/datasets/imagenette \ +--batch-size ${BATCH_SIZE} \ +--lr 0.01 \ +--amp \ +--output-dir ${OUTPUT_DIR} \ +"$@" ;check_status + +rm -fr ${OUTPUT_DIR} +exit ${EXIT_STATUS} diff --git a/tests/executables/resnet/train_resnet50_dist_paddle.sh b/tests/executables/resnet/train_resnet50_dist_paddle.sh new file mode 100644 index 000000000..e3aaf503c --- /dev/null +++ b/tests/executables/resnet/train_resnet50_dist_paddle.sh @@ -0,0 +1,19 @@ +#!/bin/bash +source ../_utils/global_environment_variables.sh +source ../_utils/set_paddle_environment_variables.sh +source ../_utils/get_num_devices.sh + +OUTPUT_DIR=${PROJECT_DIR}/output/resnet/$0 +if [[ -d ${OUTPUT_DIR} ]]; then + mkdir -p ${OUTPUT_DIR} +fi + +RESNET_PADDLE_DIR=${PROJECT_DIR}/official/cv/classification/resnet/paddle +cd ${RESNET_PADDLE_DIR} + +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 8 --run_script \ +bash run_resnet50_dist.sh \ +"$@" ;check_status + +rm -fr ${OUTPUT_DIR} +exit ${EXIT_STATUS} \ No newline at end of file diff --git a/tests/executables/resnet/train_resnet50_dist_tf.sh b/tests/executables/resnet/train_resnet50_dist_tf.sh new file mode 100644 index 000000000..64fd864c3 --- /dev/null +++ b/tests/executables/resnet/train_resnet50_dist_tf.sh @@ -0,0 +1,23 @@ +CURRENT_DIR=$(cd `dirname $0`; pwd) + +source ../_utils/global_environment_variables.sh + +unset http_proxy +unset https_proxy + +ROOT_DIR=${CURRENT_DIR}/../.. +export DATA_DIR=${ROOT_DIR}/data/datasets + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd ${ROOT_DIR}/official/cv/classification/resnet/tensorflow +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 0.01 --run_script \ +bash run_train_resnet50_distributed_imagenette.sh "$@"; check_status + +exit ${EXIT_STATUS} \ No newline at end of file -- Gitee From 7054f99c1643831b00cfe6ba08a143259d386ec7 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:25:28 +0800 Subject: [PATCH 06/18] sync resnet tf --- tests/executables/resnet/train_resnet50_dist_tf.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/executables/resnet/train_resnet50_dist_tf.sh b/tests/executables/resnet/train_resnet50_dist_tf.sh index 64fd864c3..e6b121f0a 100644 --- a/tests/executables/resnet/train_resnet50_dist_tf.sh +++ b/tests/executables/resnet/train_resnet50_dist_tf.sh @@ -16,7 +16,7 @@ check_status() fi } -cd ${ROOT_DIR}/official/cv/classification/resnet/tensorflow +cd ${ROOT_DIR}/cv/classification/resnet50/tensorflow/ ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 0.01 --run_script \ bash run_train_resnet50_distributed_imagenette.sh "$@"; check_status -- Gitee From 7500ddec38be9983a1cdd1c91a89092bd73d8d14 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:32:09 +0800 Subject: [PATCH 07/18] sync retinanet --- tests/executables/retinanet/init_torch.sh | 1 + .../retinanet/train_retinanet_amp_torch.sh | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 tests/executables/retinanet/init_torch.sh create mode 100644 tests/executables/retinanet/train_retinanet_amp_torch.sh diff --git a/tests/executables/retinanet/init_torch.sh b/tests/executables/retinanet/init_torch.sh new file mode 100644 index 000000000..1f8f08793 --- /dev/null +++ b/tests/executables/retinanet/init_torch.sh @@ -0,0 +1 @@ +bash ../_utils/init_detection_torch.sh ../_utils \ No newline at end of file diff --git a/tests/executables/retinanet/train_retinanet_amp_torch.sh b/tests/executables/retinanet/train_retinanet_amp_torch.sh new file mode 100644 index 000000000..a3431f8f7 --- /dev/null +++ b/tests/executables/retinanet/train_retinanet_amp_torch.sh @@ -0,0 +1,26 @@ +source ../_utils/global_environment_variables.sh +source ../_utils/get_num_devices.sh + +: ${BATCH_SIZE:=8} + +CURRENT_DIR=$(cd `dirname $0`; pwd) +ROOT_DIR=${CURRENT_DIR}/../.. +DATA_DIR=${ROOT_DIR}/data/datasets/VOC2012_sample + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 0 --run_script \ +python3 ../../cv/detection/retinanet/pytorch/train.py \ +--model retinanet_resnet50_fpn \ +--lr 0.01 \ +--data-path ${DATA_DIR} \ +--batch-size ${BATCH_SIZE} \ +--amp "$@"; check_status + +exit ${EXIT_STATUS} -- Gitee From 1011445b147e7507cab53fdf73acb839dc070006 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:40:48 +0800 Subject: [PATCH 08/18] sync ssd tf --- tests/executables/ssd/init_tf.sh | 67 +++++++++++++++++++++++ tests/executables/ssd/train_ssd_amp_tf.sh | 40 ++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 tests/executables/ssd/init_tf.sh create mode 100644 tests/executables/ssd/train_ssd_amp_tf.sh diff --git a/tests/executables/ssd/init_tf.sh b/tests/executables/ssd/init_tf.sh new file mode 100644 index 000000000..a82a9f8f9 --- /dev/null +++ b/tests/executables/ssd/init_tf.sh @@ -0,0 +1,67 @@ +#!/bin/bash +CUR_DIR=$(cd "$(dirname "$0")";pwd) +PROJECT_ROOT="${CUR_DIR}/../.." +DATASET_DIR="${PROJECT_ROOT}/data/datasets" +MODEL_CPT_DIR="${PROJECT_ROOT}/data/model_zoo/ssd_tf" +VOC_RECORD_DIR="${DATASET_DIR}/tf_ssd_voc_record" +SSD_ROOT="${PROJECT_ROOT}/cv/detection/ssd/tensorflow" + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +# pip3 install --upgrade tf_slim +pip3 uninstall -y protobuf +pip3 install "protobuf<4.0.0" +source $(cd `dirname $0`; pwd)/../_utils/which_install_tool.sh +if command_exists apt; then + $prefix_sudo apt install -y git numactl +elif command_exists dnf; then + $prefix_sudo dnf install -y git numactl +else + $prefix_sudo yum install -y git numactl +fi + +# Prepare checkpoint +echo "Prepare SSD's checkpoint" +if [ -d "$MODEL_CPT_DIR" ]; then + rm -rf $MODEL_CPT_DIR +fi +mkdir -p $MODEL_CPT_DIR + +echo "Unarchive model checkpoint" +tar -xzvf "${MODEL_CPT_DIR}.tar" -C "${MODEL_CPT_DIR}/../" +if [ -d "$SSD_ROOT/model" ]; then + rm "$SSD_ROOT/model" +fi +ln -s ${MODEL_CPT_DIR} "$SSD_ROOT/model" +echo "Make soft link from ${MODEL_CPT_DIR} to $SSD_ROOT/model" + +# Prepare voc dataset +echo "Start make SSD's dataset" +if [ -d $VOC_RECORD_DIR ]; then + rm -rf $VOC_RECORD_DIR +fi + +mkdir $VOC_RECORD_DIR + +cd $SSD_ROOT +python3 dataset/convert_voc_sample_tfrecords.py \ +--dataset_directory=$DATASET_DIR \ +--output_directory=$VOC_RECORD_DIR \ +--train_splits VOC2012_sample \ +--validation_splits VOC2012_sample + +if [ -d "dataset/tfrecords" ]; then + rm "dataset/tfrecords" +fi + +ln -s $VOC_RECORD_DIR "./dataset/tfrecords" +echo "End make SSD's dataset" +cd $CUR_DIR diff --git a/tests/executables/ssd/train_ssd_amp_tf.sh b/tests/executables/ssd/train_ssd_amp_tf.sh new file mode 100644 index 000000000..e7aa985f3 --- /dev/null +++ b/tests/executables/ssd/train_ssd_amp_tf.sh @@ -0,0 +1,40 @@ +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=16} + +CURTIME=`date --utc +%Y%m%d%H%M%S` +CURRENT_DIR=$(cd `dirname $0`; pwd) +MODEL_NAME=`basename ${CURRENT_DIR}` + +ROOT_DIR="${CURRENT_DIR}/../.." +DATASET_PATH="${ROOT_DIR}/data/datasets/imagenette" +MODEL_ZOO="${ROOT_DIR}/data/model_zoo" +WORKSPACE="${ROOT_DIR}/output/${MODEL_NAME}/$0/${CURTIME}" +SRC_DIR="${ROOT_DIR}/cv/detection/ssd/tensorflow" + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd ${SRC_DIR} + + +if [[ -d "./logs" ]]; then + rm -rf ./logs +fi + +: ${CUDA_VISIBLE_DEVICES:="0"} +CUDA_VISIBLE_DEVICES=(${CUDA_VISIBLE_DEVICES//,/ }) +CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES[0]} + +ixdltest-check --nonstrict_mode_args="--train_epochs ${NONSTRICT_EPOCH}" -b 0. --run_script \ +CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} PYTHONPATH=$PYTHONPATH:${SRC_DIR} python3 train_ssd.py --batch_size ${BATCH_SIZE} --multi_gpu=False \ + --use_amp "$@"; check_status + + +cd - +exit ${EXIT_STATUS} -- Gitee From f6dbca4b1ab1dc054d8b2c2d7026962a5a1800f6 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 11:44:26 +0800 Subject: [PATCH 09/18] sync unet3d --- tests/executables/unet3d/init_torch.sh | 1 + ...ain_unet3d_kits19_stage3_dist_1x8_torch.sh | 50 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/executables/unet3d/init_torch.sh create mode 100644 tests/executables/unet3d/train_unet3d_kits19_stage3_dist_1x8_torch.sh diff --git a/tests/executables/unet3d/init_torch.sh b/tests/executables/unet3d/init_torch.sh new file mode 100644 index 000000000..eb598caec --- /dev/null +++ b/tests/executables/unet3d/init_torch.sh @@ -0,0 +1 @@ +bash ../_utils/init_segmentation_torch.sh ../_utils \ No newline at end of file diff --git a/tests/executables/unet3d/train_unet3d_kits19_stage3_dist_1x8_torch.sh b/tests/executables/unet3d/train_unet3d_kits19_stage3_dist_1x8_torch.sh new file mode 100644 index 000000000..0172b5720 --- /dev/null +++ b/tests/executables/unet3d/train_unet3d_kits19_stage3_dist_1x8_torch.sh @@ -0,0 +1,50 @@ +# export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +source ../_utils/get_num_devices.sh + +CURRENT_DIR=$(cd `dirname $0`; pwd) +ROOT_DIR=${CURRENT_DIR}/../.. +DATA_DIR=${ROOT_DIR}/data/datasets/kits19/train +RESUME=${ROOT_DIR}/data/model_zoo/unet3d/model_3620_stage3_start.pth + +SEED=1234 +MAX_EPOCHS=4200 +QUALITY_THRESHOLD="0.908" +START_EVAL_AT=3640 +EVALUATE_EVERY=5 +LEARNING_RATE="0.8" +LR_WARMUP_EPOCHS=200 +: ${BATCH_SIZE:=4} +GRADIENT_ACCUMULATION_STEPS=1 +SAVE_CKPT="./ckpt_stage3" +LOG_NAME='train_log_stage3.json' + +cd ../../cv/semantic_segmentation/unet3d/pytorch +if [ ! -d ${SAVE_CKPT} ]; then + mkdir ${SAVE_CKPT}; +fi + +python3 -u -m torch.distributed.launch --nproc_per_node=$IX_NUM_CUDA_VISIBLE_DEVICES \ +main.py --data_dir ${DATA_DIR} \ +--epochs ${MAX_EPOCHS} \ +--evaluate_every ${EVALUATE_EVERY} \ +--start_eval_at ${START_EVAL_AT} \ +--quality_threshold ${QUALITY_THRESHOLD} \ +--batch_size ${BATCH_SIZE} \ +--optimizer sgd \ +--ga_steps ${GRADIENT_ACCUMULATION_STEPS} \ +--learning_rate ${LEARNING_RATE} \ +--seed ${SEED} \ +--lr_warmup_epochs ${LR_WARMUP_EPOCHS} \ +--output-dir ${SAVE_CKPT} \ +--log_name ${LOG_NAME} \ +--resume ${RESUME} +"$@" + +if [ $? -eq 0 ];then + echo 'converged to the target value 0.908 of epoch 3820 in full train, stage-wise training succeed' + exit 0 +else + echo 'not converged to the target value, training fail' + exit 1 +fi + -- Gitee From a2ea0478cbc846da24d6d5c63186efa0c715f8cf Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 13:59:21 +0800 Subject: [PATCH 10/18] sync conformer all --- .../conformer/pytorch/bind_pyt.py | 137 +++++ .../pytorch/configs/conformer_lstm.json | 98 ++++ .../conformer/pytorch/dataloader.py | 120 +++++ .../conformer/pytorch/openspeech/README.md | 248 +++++++++ .../conformer/pytorch/openspeech/__init__.py | 33 ++ .../pytorch/openspeech/criterion/__init__.py | 103 ++++ .../criterion/cross_entropy/__init__.py | 21 + .../criterion/cross_entropy/configuration.py | 48 ++ .../criterion/cross_entropy/cross_entropy.py | 126 +++++ .../openspeech/criterion/ctc/__init__.py | 21 + .../openspeech/criterion/ctc/configuration.py | 51 ++ .../pytorch/openspeech/criterion/ctc/ctc.py | 148 ++++++ .../joint_ctc_cross_entropy/__init__.py | 21 + .../joint_ctc_cross_entropy/configuration.py | 63 +++ .../joint_ctc_cross_entropy.py | 117 +++++ .../label_smoothed_cross_entropy/__init__.py | 21 + .../configuration.py | 51 ++ .../label_smoothed_cross_entropy.py | 94 ++++ .../criterion/perplexity/__init__.py | 21 + .../criterion/perplexity/configuration.py | 48 ++ .../criterion/perplexity/perplexity.py | 78 +++ .../criterion/transducer/__init__.py | 21 + .../criterion/transducer/configuration.py | 51 ++ .../criterion/transducer/transducer.py | 91 ++++ .../pytorch/openspeech/data/__init__.py | 71 +++ .../pytorch/openspeech/data/audio/__init__.py | 21 + .../pytorch/openspeech/data/audio/augment.py | 193 +++++++ .../openspeech/data/audio/data_loader.py | 130 +++++ .../pytorch/openspeech/data/audio/dataset.py | 219 ++++++++ .../data/audio/filter_bank/__init__.py | 21 + .../data/audio/filter_bank/configuration.py | 79 +++ .../data/audio/filter_bank/filter_bank.py | 71 +++ .../pytorch/openspeech/data/audio/load.py | 57 +++ .../data/audio/melspectrogram/__init__.py | 21 + .../audio/melspectrogram/configuration.py | 79 +++ .../audio/melspectrogram/melspectrogram.py | 72 +++ .../openspeech/data/audio/mfcc/__init__.py | 21 + .../data/audio/mfcc/configuration.py | 79 +++ .../openspeech/data/audio/mfcc/mfcc.py | 77 +++ .../data/audio/spectrogram/__init__.py | 21 + .../data/audio/spectrogram/configuration.py | 80 +++ .../data/audio/spectrogram/spectrogram.py | 65 +++ .../pytorch/openspeech/data/sampler.py | 95 ++++ .../openspeech/data/text/data_loader.py | 87 ++++ .../pytorch/openspeech/data/text/dataset.py | 77 +++ .../pytorch/openspeech/dataclass/__init__.py | 87 ++++ .../openspeech/dataclass/configurations.py | 481 ++++++++++++++++++ .../openspeech/dataclass/initialize.py | 96 ++++ .../pytorch/openspeech/datasets/__init__.py | 60 +++ .../datasets/librispeech/__init__.py | 21 + .../datasets/librispeech/lit_data_module.py | 169 ++++++ .../librispeech/preprocess/__init__.py | 0 .../librispeech/preprocess/character.py | 116 +++++ .../librispeech/preprocess/preprocess.py | 60 +++ .../librispeech/preprocess/subword.py | 92 ++++ .../pytorch/openspeech/decoders/__init__.py | 27 + .../decoders/lstm_attention_decoder.py | 251 +++++++++ .../openspeech/decoders/openspeech_decoder.py | 42 ++ .../decoders/rnn_transducer_decoder.py | 128 +++++ .../decoders/transformer_decoder.py | 275 ++++++++++ .../transformer_transducer_decoder.py | 156 ++++++ .../pytorch/openspeech/encoders/__init__.py | 32 ++ .../openspeech/encoders/conformer_encoder.py | 141 +++++ .../openspeech/encoders/contextnet_encoder.py | 123 +++++ .../encoders/convolutional_lstm_encoder.py | 134 +++++ .../convolutional_transformer_encoder.py | 147 ++++++ .../openspeech/encoders/deepspeech2.py | 123 +++++ .../pytorch/openspeech/encoders/jasper.py | 182 +++++++ .../openspeech/encoders/lstm_encoder.py | 128 +++++ .../openspeech/encoders/openspeech_encoder.py | 72 +++ .../pytorch/openspeech/encoders/quartznet.py | 120 +++++ .../encoders/rnn_transducer_encoder.py | 112 ++++ .../encoders/transformer_encoder.py | 205 ++++++++ .../transformer_transducer_encoder.py | 177 +++++++ .../pytorch/openspeech/lm/__init__.py | 24 + .../pytorch/openspeech/lm/lstm_lm.py | 158 ++++++ .../pytorch/openspeech/lm/openspeech_lm.py | 45 ++ .../pytorch/openspeech/lm/transformer_lm.py | 171 +++++++ .../conformer/pytorch/openspeech/metrics.py | 156 ++++++ .../pytorch/openspeech/models/README.md | 27 + .../pytorch/openspeech/models/__init__.py | 208 ++++++++ .../openspeech/models/conformer/__init__.py | 34 ++ .../models/conformer/configurations.py | 368 ++++++++++++++ .../openspeech/models/conformer/model.py | 328 ++++++++++++ .../openspeech/models/contextnet/__init__.py | 32 ++ .../models/contextnet/configurations.py | 215 ++++++++ .../openspeech/models/contextnet/model.py | 255 ++++++++++ .../openspeech/models/deepspeech2/__init__.py | 28 + .../models/deepspeech2/configurations.py | 71 +++ .../openspeech/models/deepspeech2/model.py | 116 +++++ .../openspeech/models/jasper/__init__.py | 30 ++ .../models/jasper/configurations.py | 135 +++++ .../pytorch/openspeech/models/jasper/model.py | 79 +++ .../models/listen_attend_spell/__init__.py | 36 ++ .../listen_attend_spell/configurations.py | 383 ++++++++++++++ .../models/listen_attend_spell/model.py | 324 ++++++++++++ .../openspeech/models/lstm_lm/__init__.py | 28 + .../models/lstm_lm/configurations.py | 71 +++ .../openspeech/models/lstm_lm/model.py | 62 +++ .../openspeech/models/openspeech_ctc_model.py | 174 +++++++ .../openspeech_encoder_decoder_model.py | 225 ++++++++ .../models/openspeech_language_model.py | 168 ++++++ .../openspeech/models/openspeech_model.py | 188 +++++++ .../models/openspeech_transducer_model.py | 287 +++++++++++ .../openspeech/models/quartznet/__init__.py | 32 ++ .../models/quartznet/configurations.py | 193 +++++++ .../openspeech/models/quartznet/model.py | 103 ++++ .../models/rnn_transducer/__init__.py | 24 + .../models/rnn_transducer/configurations.py | 83 +++ .../openspeech/models/rnn_transducer/model.py | 75 +++ .../models/transformer_lm/__init__.py | 24 + .../models/transformer_lm/configurations.py | 71 +++ .../openspeech/models/transformer_lm/model.py | 62 +++ .../models/transformer_transducer/__init__.py | 24 + .../transformer_transducer/configurations.py | 92 ++++ .../models/transformer_transducer/model.py | 119 +++++ .../pytorch/openspeech/modules/__init__.py | 97 ++++ .../openspeech/modules/add_normalization.py | 44 ++ .../openspeech/modules/additive_attention.py | 63 +++ .../openspeech/modules/batchnorm_relu_rnn.py | 84 +++ .../modules/conformer_attention_module.py | 81 +++ .../openspeech/modules/conformer_block.py | 110 ++++ .../modules/conformer_convolution_module.py | 83 +++ .../modules/conformer_feed_forward_module.py | 73 +++ .../openspeech/modules/contextnet_block.py | 199 ++++++++ .../openspeech/modules/contextnet_module.py | 182 +++++++ .../openspeech/modules/conv2d_extractor.py | 107 ++++ .../openspeech/modules/conv2d_subsampling.py | 71 +++ .../pytorch/openspeech/modules/conv_base.py | 38 ++ .../openspeech/modules/conv_group_shuffle.py | 42 ++ .../modules/deepspeech2_extractor.py | 73 +++ .../openspeech/modules/depthwise_conv1d.py | 74 +++ .../modules/dot_product_attention.py | 80 +++ .../pytorch/openspeech/modules/glu.py | 38 ++ .../openspeech/modules/jasper_block.py | 109 ++++ .../openspeech/modules/jasper_subblock.py | 115 +++++ .../modules/location_aware_attention.py | 92 ++++ .../pytorch/openspeech/modules/mask.py | 60 +++ .../pytorch/openspeech/modules/mask_conv1d.py | 89 ++++ .../pytorch/openspeech/modules/mask_conv2d.py | 98 ++++ .../modules/multi_head_attention.py | 89 ++++ .../openspeech/modules/pointwise_conv1d.py | 66 +++ .../openspeech/modules/positional_encoding.py | 50 ++ .../modules/positionwise_feed_forward.py | 47 ++ .../openspeech/modules/quartznet_block.py | 104 ++++ .../openspeech/modules/quartznet_subblock.py | 96 ++++ .../modules/relative_multi_head_attention.py | 121 +++++ .../modules/residual_connection_module.py | 48 ++ .../pytorch/openspeech/modules/swish.py | 37 ++ .../modules/time_channel_separable_conv1d.py | 61 +++ .../modules/transformer_embedding.py | 60 +++ .../openspeech/modules/vgg_extractor.py | 81 +++ .../pytorch/openspeech/modules/wrapper.py | 64 +++ .../pytorch/openspeech/optim/__init__.py | 44 ++ .../pytorch/openspeech/optim/adamp.py | 109 ++++ .../pytorch/openspeech/optim/novograd.py | 127 +++++ .../pytorch/openspeech/optim/optimizer.py | 83 +++ .../pytorch/openspeech/optim/radam.py | 118 +++++ .../openspeech/optim/scheduler/__init__.py | 59 +++ .../optim/scheduler/lr_scheduler.py | 47 ++ .../reduce_lr_on_plateau_scheduler.py | 82 +++ .../scheduler/transformer_lr_scheduler.py | 109 ++++ .../optim/scheduler/tri_stage_lr_scheduler.py | 154 ++++++ .../warmup_reduce_lr_on_plateau_scheduler.py | 102 ++++ .../optim/scheduler/warmup_scheduler.py | 82 +++ .../pytorch/openspeech/search/__init__.py | 28 + .../openspeech/search/beam_search_base.py | 134 +++++ .../openspeech/search/beam_search_ctc.py | 84 +++ .../openspeech/search/beam_search_lstm.py | 154 ++++++ .../search/beam_search_rnn_transducer.py | 156 ++++++ .../search/beam_search_transformer.py | 155 ++++++ .../beam_search_transformer_transducer.py | 156 ++++++ .../openspeech/search/ensemble_search.py | 96 ++++ .../pytorch/openspeech/tokenizers/__init__.py | 75 +++ .../openspeech/tokenizers/aishell/__init__.py | 21 + .../tokenizers/aishell/character.py | 138 +++++ .../tokenizers/ksponspeech/__init__.py | 21 + .../tokenizers/ksponspeech/character.py | 134 +++++ .../tokenizers/ksponspeech/grapheme.py | 134 +++++ .../tokenizers/ksponspeech/subword.py | 100 ++++ .../tokenizers/librispeech/__init__.py | 21 + .../tokenizers/librispeech/character.py | 135 +++++ .../tokenizers/librispeech/subword.py | 95 ++++ .../openspeech/tokenizers/tokenizer.py | 44 ++ .../conformer/pytorch/openspeech/utils.py | 211 ++++++++ .../conformer/pytorch/requirements.txt | 6 + .../conformer/pytorch/run_training.sh | 28 + .../pytorch/test/test_build_conformer.py | 32 ++ .../conformer/pytorch/test/test_dataloader.py | 43 ++ .../conformer/pytorch/train.py | 320 ++++++++++++ .../conformer/pytorch/utils/__init__.py | 24 + .../conformer/pytorch/utils/config.py | 19 + .../conformer/pytorch/utils/dist.py | 203 ++++++++ .../conformer/pytorch/utils/logger.py | 1 + .../conformer/pytorch/utils/misc.py | 20 + tests/executables/conformer/init_torch.sh | 50 ++ ...in_conformer_librispeech_dist_1x8_torch.sh | 31 ++ tests/executables/ssd/init_torch.sh | 63 +++ tests/executables/ssd/train_ssd_amp_torch.sh | 25 + 199 files changed, 19651 insertions(+) create mode 100644 audio/speech_recognition/conformer/pytorch/bind_pyt.py create mode 100644 audio/speech_recognition/conformer/pytorch/configs/conformer_lstm.json create mode 100644 audio/speech_recognition/conformer/pytorch/dataloader.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/README.md create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/cross_entropy.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/ctc.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/joint_ctc_cross_entropy.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/label_smoothed_cross_entropy.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/perplexity.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/transducer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/augment.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/data_loader.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/dataset.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/filter_bank.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/load.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/melspectrogram.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/mfcc.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/configuration.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/spectrogram.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/sampler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/text/data_loader.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/data/text/dataset.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/dataclass/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/dataclass/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/dataclass/initialize.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/lit_data_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/character.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/preprocess.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/subword.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/lstm_attention_decoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/openspeech_decoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/rnn_transducer_decoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_decoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_transducer_decoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/conformer_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/contextnet_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_lstm_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_transformer_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/deepspeech2.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/jasper.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/lstm_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/openspeech_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/quartznet.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/rnn_transducer_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_transducer_encoder.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/lm/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/lm/lstm_lm.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/lm/openspeech_lm.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/lm/transformer_lm.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/metrics.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/README.md create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_ctc_model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_encoder_decoder_model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_language_model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_transducer_model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/configurations.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/model.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/add_normalization.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/additive_attention.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/batchnorm_relu_rnn.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_attention_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_block.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_convolution_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_feed_forward_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_block.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_extractor.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_subsampling.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_base.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_group_shuffle.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/deepspeech2_extractor.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/depthwise_conv1d.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/dot_product_attention.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/glu.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_block.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_subblock.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/location_aware_attention.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/mask.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv1d.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv2d.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/multi_head_attention.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/pointwise_conv1d.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/positional_encoding.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/positionwise_feed_forward.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_block.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_subblock.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/relative_multi_head_attention.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/residual_connection_module.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/swish.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/time_channel_separable_conv1d.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/transformer_embedding.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/vgg_extractor.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/modules/wrapper.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/adamp.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/novograd.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/optimizer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/radam.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/lr_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/reduce_lr_on_plateau_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/transformer_lr_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/tri_stage_lr_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_reduce_lr_on_plateau_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_scheduler.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_base.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_ctc.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_lstm.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_rnn_transducer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer_transducer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/search/ensemble_search.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/character.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/character.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/grapheme.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/subword.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/character.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/subword.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/tokenizer.py create mode 100644 audio/speech_recognition/conformer/pytorch/openspeech/utils.py create mode 100644 audio/speech_recognition/conformer/pytorch/requirements.txt create mode 100644 audio/speech_recognition/conformer/pytorch/run_training.sh create mode 100644 audio/speech_recognition/conformer/pytorch/test/test_build_conformer.py create mode 100644 audio/speech_recognition/conformer/pytorch/test/test_dataloader.py create mode 100644 audio/speech_recognition/conformer/pytorch/train.py create mode 100644 audio/speech_recognition/conformer/pytorch/utils/__init__.py create mode 100644 audio/speech_recognition/conformer/pytorch/utils/config.py create mode 100644 audio/speech_recognition/conformer/pytorch/utils/dist.py create mode 100644 audio/speech_recognition/conformer/pytorch/utils/logger.py create mode 100644 audio/speech_recognition/conformer/pytorch/utils/misc.py create mode 100644 tests/executables/conformer/init_torch.sh create mode 100644 tests/executables/conformer/train_conformer_librispeech_dist_1x8_torch.sh create mode 100644 tests/executables/ssd/init_torch.sh create mode 100644 tests/executables/ssd/train_ssd_amp_torch.sh diff --git a/audio/speech_recognition/conformer/pytorch/bind_pyt.py b/audio/speech_recognition/conformer/pytorch/bind_pyt.py new file mode 100644 index 000000000..1a4e44cfa --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/bind_pyt.py @@ -0,0 +1,137 @@ +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import subprocess +import os +from argparse import ArgumentParser + + +def parse_args(): + parser = ArgumentParser(description="PyTorch distributed training launch " + "helper utilty that will spawn up " + "multiple distributed processes") + # Optional arguments for the launch helper + parser.add_argument("--nnodes", type=int, default=1, + help="The number of nodes to use for distributed " + "training") + parser.add_argument("--nproc_per_node", type=int, default=1, + help="The number of processes to launch on each node, " + "for GPU training, this is recommended to be set " + "to the number of GPUs in your system so that " + "each process can be bound to a single GPU.") + parser.add_argument("--node_rank", type=int, default=0, + help="The rank of the node for multi-node distributed " + "training") + parser.add_argument("--master_addr", default="127.0.0.1", type=str, + help="Master node (rank 0)'s address, should be either " + "the IP address or the hostname of node 0, for " + "single node multi-proc training, the " + "--master_addr can simply be 127.0.0.1") + parser.add_argument("--master_port", default=29500, type=int, + help="Master node (rank 0)'s free port that needs to " + "be used for communciation during distributed " + "training") + parser.add_argument('--no_hyperthreads', action='store_true', + help='Flag to disable binding to hyperthreads') + parser.add_argument('--no_membind', action='store_true', + help='Flag to disable memory binding') + # non-optional arguments for binding + parser.add_argument("--nsockets_per_node", type=int, required=True, + help="Number of CPU sockets on a node") + parser.add_argument("--ncores_per_socket", type=int, required=True, + help="Number of CPU cores per socket") + # positional + parser.add_argument("training_script", type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script") + + args, unparsed = parser.parse_known_args() + args.training_script_args = unparsed + + return args + + +def get_cuda_visible_devices(gpus=1): + if "CUDA_VISIBLE_DEVICES" in os.environ: + return os.environ['CUDA_VISIBLE_DEVICES'] + return ','.join([str(gpu_id) for gpu_id in range(gpus)]) + + +def main(): + args = parse_args() + + # set PyTorch distributed related environmental variables + current_env = os.environ.copy() + current_env["MASTER_ADDR"] = args.master_addr + current_env["MASTER_PORT"] = str(args.master_port) + current_env["NODE_RANK"] = str(args.node_rank) + current_env["CUDA_VISIBLE_DEVICES"] = get_cuda_visible_devices(args.nproc_per_node) + + gpu_ids = current_env["CUDA_VISIBLE_DEVICES"].split(',') + args.nproc_per_node = len(gpu_ids) + + # world size in terms of number of processes + dist_world_size = args.nproc_per_node * args.nnodes + current_env["WORLD_SIZE"] = str(dist_world_size) + + # variables for numactrl binding + NSOCKETS = args.nsockets_per_node + NGPUS_PER_SOCKET = (args.nproc_per_node // args.nsockets_per_node) + \ + (1 if (args.nproc_per_node % args.nsockets_per_node) else 0) + NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET + + processes = [] + + for local_rank in range(0, args.nproc_per_node): + # each process's rank + dist_rank = args.nproc_per_node * args.node_rank + local_rank + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + # form numactrl binding command + cpu_ranges = [local_rank * NCORES_PER_GPU, + (local_rank + 1) * NCORES_PER_GPU - 1, + local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS), + (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1] + + numactlargs = [] + if args.no_hyperthreads: + numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ] + else: + numactlargs += [ "--physcpubind={}-{},{}-{}".format(*cpu_ranges) ] + + if not args.no_membind: + memnode = local_rank // NGPUS_PER_SOCKET + numactlargs += [ "--membind={}".format(memnode) ] + + # spawn the processes + cmd = [ "/usr/bin/numactl" ] \ + + numactlargs \ + + [ sys.executable, + "-u", + args.training_script, + "--local_rank={}".format(local_rank) + ] \ + + args.training_script_args + + process = subprocess.Popen(cmd, env=current_env) + processes.append(process) + + for process in processes: + process.wait() + + +if __name__ == "__main__": + main() diff --git a/audio/speech_recognition/conformer/pytorch/configs/conformer_lstm.json b/audio/speech_recognition/conformer/pytorch/configs/conformer_lstm.json new file mode 100644 index 000000000..87b5a35a3 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/configs/conformer_lstm.json @@ -0,0 +1,98 @@ +{ + "audio": { + "name": "fbank", + "sample_rate": 16000, + "frame_length": 20.0, + "frame_shift": 10.0, + "del_silence": false, + "num_mels": 80, + "apply_spec_augment": true, + "apply_noise_augment": false, + "apply_time_stretch_augment": false, + "apply_joining_augment": false + }, + "augment": { + "apply_spec_augment": false, + "apply_noise_augment": false, + "apply_joining_augment": false, + "apply_time_stretch_augment": false, + "freq_mask_para": 27, + "freq_mask_num": 2, + "time_mask_num": 4, + "noise_dataset_dir": null, + "noise_level": 0.7, + "time_stretch_min_rate": 0.7, + "time_stretch_max_rate": 1.4 + }, + "dataset": { + "dataset": "librispeech", + "dataset_path": "datasets/LibriSpeech", + "train_parts": ["train-clean-100"], + "eval_parts": ["dev-clean"], + "dataset_download": false, + "train_manifest_file": "datasets/LibriSpeech/libri_subword_train_manifest.txt", + "eval_manifest_file": "datasets/LibriSpeech/libri_subword_dev_manifest.txt" + }, + "criterion": { + "criterion_name": "cross_entropy", + "reduction": "mean" + }, + "lr_scheduler": { + "lr": 0.0001, + "scheduler_name": "warmup_reduce_lr_on_plateau", + "lr_patience": 1, + "lr_factor": 0.3, + "peak_lr": 0.0001, + "init_lr": 1e-10, + "warmup_steps": 4000 + }, + "model": { + "model_name": "conformer_lstm", + "encoder_dim": 512, + "num_encoder_layers": 17, + "num_attention_heads": 8, + "feed_forward_expansion_factor": 4, + "conv_expansion_factor": 2, + "input_dropout_p": 0.1, + "feed_forward_dropout_p": 0.1, + "attention_dropout_p": 0.1, + "conv_dropout_p": 0.1, + "conv_kernel_size": 31, + "half_step_residual": true, + "num_decoder_layers": 2, + "decoder_dropout_p": 0.1, + "max_length": 128, + "teacher_forcing_ratio": 1.0, + "rnn_type": "lstm", + "decoder_attn_mechanism": "loc", + "optimizer": "adam" + }, + "trainer": { + "seed": 1, + "accelerator": "ddp", + "accumulate_grad_batches": 1, + "num_workers": 4, + "batch_size": 8, + "check_val_every_n_epoch": 1, + "gradient_clip_val": 5.0, + "logger": "wandb", + "max_epochs": 50, + "save_checkpoint_n_steps": 10000, + "auto_scale_batch_size": "binsearch", + "sampler": "random", + "name": "gpu", + "device": "gpu", + "use_cuda": true, + "auto_select_gpus": true + }, + "tokenizer": { + "sos_token": "", + "eos_token": "", + "pad_token": "", + "blank_token": "", + "encoding": "utf-8", + "unit": "libri_subword", + "vocab_size": 1023, + "vocab_path": "model_zoo/sentencepieces" + } +} diff --git a/audio/speech_recognition/conformer/pytorch/dataloader.py b/audio/speech_recognition/conformer/pytorch/dataloader.py new file mode 100644 index 000000000..d3829b44c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/dataloader.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +import numpy as np +import random + +import torch +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler +from torch.utils.data.distributed import DistributedSampler + + +def get_sampler(dataset, sampler_type): + return dict( + random=RandomSampler, + sequential=SequentialSampler, + distributed=DistributedSampler + )[sampler_type.lower()](dataset) + + +class WorkerInitializer(object): + + _instance = None + + def __init__(self, seed): + self.seed = seed + + def __call__(self, idx): + np.random.seed(seed=self.seed + idx) + random.seed(self.seed + idx) + + @classmethod + def default(cls, seed=0): + if cls._instance is None: + cls._instance = cls(seed) + return cls._instance + + +# sampler: Random | Sequential | Distributed +def create_dataloader( + dataset, + batch_size, + worker_init_fn: WorkerInitializer=None, + sampler_type='Random', + pin_memory=True +): + if worker_init_fn is None: + worker_init_fn = WorkerInitializer.default() + sampler = get_sampler(dataset, sampler_type) + dataloader = DataLoader( + dataset, + sampler=sampler, + batch_size=batch_size, + num_workers=0 if batch_size <= 8 else 4, + worker_init_fn=worker_init_fn, + pin_memory=pin_memory, + collate_fn=padding_collate_fn + ) + + return dataloader + + +def padding_collate_fn(batch, pad_id: int = 0): + r""" + Functions that pad to the maximum sequence length + + Args: + batch (tuple): tuple contains input and target tensors + pad_id (int): identification of pad token + + Returns: + seqs (torch.FloatTensor): tensor contains input sequences. + target (torch.IntTensor): tensor contains target sequences. + seq_lengths (torch.IntTensor): tensor contains input sequence lengths + target_lengths (torch.IntTensor): tensor contains target sequence lengths + """ + def seq_length_(p): + return len(p[0]) + + def target_length_(p): + return len(p[1]) + + # sort by sequence length for rnn.pack_padded_sequence() + batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True) + + seq_lengths = [len(s[0]) for s in batch] + target_lengths = [len(s[1]) - 1 for s in batch] + + max_seq_sample = max(batch, key=seq_length_)[0] + max_target_sample = max(batch, key=target_length_)[1] + + max_seq_size = max_seq_sample.size(0) + max_target_size = len(max_target_sample) + + feat_size = max_seq_sample.size(1) + batch_size = len(batch) + + seqs = torch.zeros(batch_size, max_seq_size, feat_size) + + targets = torch.zeros(batch_size, max_target_size).to(torch.long) + targets.fill_(pad_id) + + for x in range(batch_size): + sample = batch[x] + tensor = sample[0] + target = sample[1] + seq_length = tensor.size(0) + + seqs[x].narrow(0, 0, seq_length).copy_(tensor) + targets[x].narrow(0, 0, len(target)).copy_(torch.LongTensor(target)) + + seq_lengths = torch.IntTensor(seq_lengths) + target_lengths = torch.IntTensor(target_lengths) + + return seqs, targets, seq_lengths, target_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/README.md b/audio/speech_recognition/conformer/pytorch/openspeech/README.md new file mode 100644 index 000000000..2abcfa9ca --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/README.md @@ -0,0 +1,248 @@ +## OpenSpeech Structure. + +``` +. +├── README.md +├── __init__.py +├── criterion +│   ├── __init__.py +│   ├── cross_entropy +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── cross_entropy.py +│   ├── ctc +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── ctc.py +│   ├── joint_ctc_cross_entropy +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── joint_ctc_cross_entropy.py +│   ├── label_smoothed_cross_entropy +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── label_smoothed_cross_entropy.py +│   ├── perplexity +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── perplexity.py +│   └── transducer +│   ├── __init__.py +│   ├── configuration.py +│   └── transducer.py +├── data +│   ├── __init__.py +│   ├── audio +│   │   ├── __init__.py +│   │   ├── augment.py +│   │   ├── data_loader.py +│   │   ├── dataset.py +│   │   ├── filter_bank +│   │   │   ├── __init__.py +│   │   │   ├── configuration.py +│   │   │   └── filter_bank.py +│   │   ├── load.py +│   │   ├── melspectrogram +│   │   │   ├── __init__.py +│   │   │   ├── configuration.py +│   │   │   └── melspectrogram.py +│   │   ├── mfcc +│   │   │   ├── __init__.py +│   │   │   ├── configuration.py +│   │   │   └── mfcc.py +│   │   └── spectrogram +│   │   ├── __init__.py +│   │   ├── configuration.py +│   │   └── spectrogram.py +│   ├── sampler.py +│   └── text +│   ├── data_loader.py +│   └── dataset.py +├── dataclass +│   ├── __init__.py +│   ├── configurations.py +│   └── initialize.py +├── datasets +│   ├── README.md +│   ├── __init__.py +│   ├── aishell +│   │   ├── __init__.py +│   │   ├── lit_data_module.py +│   │   └── preprocess.py +│   ├── ksponspeech +│   │   ├── __init__.py +│   │   ├── lit_data_module.py +│   │   └── preprocess +│   │   ├── __init__.py +│   │   ├── character.py +│   │   ├── grapheme.py +│   │   ├── preprocess.py +│   │   └── subword.py +│   ├── language_model +│   │   ├── __init__.py +│   │   └── lit_data_module.py +│   └── librispeech +│   ├── __init__.py +│   ├── lit_data_module.py +│   └── preprocess +│   ├── __init__.py +│   ├── character.py +│   ├── preprocess.py +│   └── subword.py +├── decoders +│   ├── __init__.py +│   ├── lstm_attention_decoder.py +│   ├── openspeech_decoder.py +│   ├── rnn_transducer_decoder.py +│   ├── transformer_decoder.py +│   └── transformer_transducer_decoder.py +├── encoders +│   ├── __init__.py +│   ├── conformer_encoder.py +│   ├── contextnet_encoder.py +│   ├── convolutional_lstm_encoder.py +│   ├── convolutional_transformer_encoder.py +│   ├── deepspeech2.py +│   ├── jasper.py +│   ├── lstm_encoder.py +│   ├── openspeech_encoder.py +│   ├── quartznet.py +│   ├── rnn_transducer_encoder.py +│   ├── transformer_encoder.py +│   └── transformer_transducer_encoder.py +├── lm +│   ├── __init__.py +│   ├── lstm_lm.py +│   ├── openspeech_lm.py +│   └── transformer_lm.py +├── metrics.py +├── models +│   ├── README.md +│   ├── __init__.py +│   ├── conformer +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── contextnet +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── deepspeech2 +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── jasper +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── listen_attend_spell +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── lstm_lm +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── openspeech_ctc_model.py +│   ├── openspeech_encoder_decoder_model.py +│   ├── openspeech_language_model.py +│   ├── openspeech_model.py +│   ├── openspeech_transducer_model.py +│   ├── quartznet +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── rnn_transducer +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── transformer +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   ├── transformer_lm +│   │   ├── __init__.py +│   │   ├── configurations.py +│   │   └── model.py +│   └── transformer_transducer +│   ├── __init__.py +│   ├── configurations.py +│   └── model.py +├── modules +│   ├── __init__.py +│   ├── add_normalization.py +│   ├── additive_attention.py +│   ├── batchnorm_relu_rnn.py +│   ├── conformer_attention_module.py +│   ├── conformer_block.py +│   ├── conformer_convolution_module.py +│   ├── conformer_feed_forward_module.py +│   ├── contextnet_block.py +│   ├── contextnet_module.py +│   ├── conv2d_extractor.py +│   ├── conv2d_subsampling.py +│   ├── conv_base.py +│   ├── conv_group_shuffle.py +│   ├── deepspeech2_extractor.py +│   ├── depthwise_conv1d.py +│   ├── dot_product_attention.py +│   ├── glu.py +│   ├── jasper_block.py +│   ├── jasper_subblock.py +│   ├── location_aware_attention.py +│   ├── mask.py +│   ├── mask_conv1d.py +│   ├── mask_conv2d.py +│   ├── multi_head_attention.py +│   ├── pointwise_conv1d.py +│   ├── positional_encoding.py +│   ├── positionwise_feed_forward.py +│   ├── quartznet_block.py +│   ├── quartznet_subblock.py +│   ├── relative_multi_head_attention.py +│   ├── residual_connection_module.py +│   ├── swish.py +│   ├── time_channel_separable_conv1d.py +│   ├── transformer_embedding.py +│   ├── vgg_extractor.py +│   └── wrapper.py +├── optim +│   ├── __init__.py +│   ├── adamp.py +│   ├── novograd.py +│   ├── optimizer.py +│   ├── radam.py +│   └── scheduler +│   ├── __init__.py +│   ├── lr_scheduler.py +│   ├── reduce_lr_on_plateau_scheduler.py +│   ├── transformer_lr_scheduler.py +│   ├── tri_stage_lr_scheduler.py +│   ├── warmup_reduce_lr_on_plateau_scheduler.py +│   └── warmup_scheduler.py +├── search +│   ├── __init__.py +│   ├── beam_search_base.py +│   ├── beam_search_ctc.py +│   ├── beam_search_lstm.py +│   ├── beam_search_rnn_transducer.py +│   ├── beam_search_transformer.py +│   ├── beam_search_transformer_transducer.py +│   └── ensemble_search.py +├── tokenizers +│   ├── __init__.py +│   ├── aishell +│   │   ├── __init__.py +│   │   └── character.py +│   ├── ksponspeech +│   │   ├── __init__.py +│   │   ├── character.py +│   │   ├── grapheme.py +│   │   └── subword.py +│   ├── librispeech +│   │   ├── __init__.py +│   │   ├── character.py +│   │   └── subword.py +│   └── tokenizer.py +└── utils.py +``` diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/__init__.py new file mode 100644 index 000000000..1b66c5495 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/__init__.py @@ -0,0 +1,33 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import openspeech.criterion +import openspeech.datasets +import openspeech.data +import openspeech.dataclass +import openspeech.encoders +import openspeech.decoders +import openspeech.models +import openspeech.search +import openspeech.optim +import openspeech.tokenizers +import openspeech.metrics diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/__init__.py new file mode 100644 index 000000000..0e9e9c734 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/__init__.py @@ -0,0 +1,103 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +CRITERION_REGISTRY = dict() +CRITERION_DATACLASS_REGISTRY = dict() + + +def register_criterion(name: str, dataclass=None): + r""" + New criterion types can be added to OpenSpeech with the :func:`register_criterion` function decorator. + + For example:: + @register_criterion('label_smoothed_cross_entropy') + class LabelSmoothedCrossEntropyLoss(nn.Module): + (...) + + .. note:: All criterion must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the criterion + dataclass (Optional, str): the dataclass of the criterion (default: None) + """ + + def register_criterion_cls(cls): + if name in CRITERION_REGISTRY: + raise ValueError(f"Cannot register duplicate criterion ({name})") + + CRITERION_REGISTRY[name] = cls + + cls.__dataclass = dataclass + if dataclass is not None: + if name in CRITERION_DATACLASS_REGISTRY: + raise ValueError(f"Cannot register duplicate criterion ({name})") + CRITERION_DATACLASS_REGISTRY[name] = dataclass + + return cls + + return register_criterion_cls + + +criterion_dir = os.path.dirname(__file__) +for file in os.listdir(criterion_dir): + if os.path.isdir(os.path.join(criterion_dir, file)) and not file.startswith('__'): + for subfile in os.listdir(os.path.join(criterion_dir, file)): + path = os.path.join(criterion_dir, file, subfile) + if subfile.endswith(".py"): + python_file = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile + module = importlib.import_module(f"openspeech.criterion.{file}.{python_file}") + continue + + path = os.path.join(criterion_dir, file) + if file.endswith(".py"): + criterion_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module(f"openspeech.criterion.{criterion_name}") + + +from .cross_entropy.configuration import CrossEntropyLossConfigs +from .ctc.configuration import CTCLossConfigs +from .joint_ctc_cross_entropy.configuration import JointCTCCrossEntropyLossConfigs +from .label_smoothed_cross_entropy.configuration import LabelSmoothedCrossEntropyLossConfigs +from .transducer.configuration import TransducerLossConfigs +from .perplexity.perplexity import PerplexityLossConfigs +from .cross_entropy.cross_entropy import CrossEntropyLoss +from .ctc.ctc import CTCLoss +from .joint_ctc_cross_entropy.joint_ctc_cross_entropy import JointCTCCrossEntropyLoss +from .label_smoothed_cross_entropy.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyLoss +from .transducer.transducer import TransducerLoss +from .perplexity.perplexity import Perplexity + +__all__ = [ + "CrossEntropyLossConfigs", + "CTCLossConfigs", + "JointCTCCrossEntropyLossConfigs", + "LabelSmoothedCrossEntropyLossConfigs", + "TransducerLossConfigs", + "CrossEntropyLoss", + "CTCLoss", + "JointCTCCrossEntropyLoss", + "LabelSmoothedCrossEntropyLoss", + "TransducerLoss", +] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/configuration.py new file mode 100644 index 000000000..39464bcbb --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/configuration.py @@ -0,0 +1,48 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class CrossEntropyLossConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of a + :class: `~openspeech.criterion.CrossEntropyLoss`. + + It is used to initiated an `CrossEntropyLoss` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion (default: cross_entropy) + reduction (str): reduction method of criterion (default: mean) + """ + criterion_name: str = field( + default="cross_entropy", metadata={"help": "Criterion name for training"} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/cross_entropy.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/cross_entropy.py new file mode 100644 index 000000000..0355c5672 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/cross_entropy/cross_entropy.py @@ -0,0 +1,126 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from .. import register_criterion +from ..cross_entropy.configuration import CrossEntropyLossConfigs +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("cross_entropy", dataclass=CrossEntropyLossConfigs) +class CrossEntropyLoss(nn.Module): + r""" + The negative log likelihood loss. It is useful to train a classification + problem with `C` classes. + + If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning + weight to each of the classes. This is particularly useful when you have an + unbalanced training set. + + The `input` given through a forward call is expected to contain + log-probabilities of each class. `input` has to be a Tensor of size either + :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` + with :math:`K \geq 1` for the `K`-dimensional case (described later). + + Obtaining log-probabilities in a neural network is easily achieved by + adding a `LogSoftmax` layer in the last layer of your network. + You may use `CrossEntropyLoss` instead, if you prefer not to add an extra + layer. + + The `target` that this loss expects should be a class index in the range :math:`[0, C-1]` + where `C = number of classes`; if `ignore_index` is specified, this loss also accepts + this class index (this index may not necessarily be in the class range). + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_{y_n} x_{n,y_n}, \quad + w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}, + + where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and + :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then + + .. math:: + \ell(x, y) = \begin{cases} + \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & + \text{if reduction} = \text{`mean';}\\ + \sum_{n=1}^N l_n, & + \text{if reduction} = \text{`sum'.} + \end{cases} + + Can also be used for higher dimension inputs, such as 2D images, by providing + an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`, + where :math:`K` is the number of dimensions, and a target of appropriate shape + (see below). In the case of images, it computes NLL loss per-pixel. + + Args: + configs (DictConfig): hydra configuration set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: logits, targets + - logits (torch.FloatTensor): probability distribution value from model and it has a logarithm shape. + The `FloatTensor` of size ``(batch, seq_length, num_classes)`` + - targets (torch.LongTensor): ground-truth encoded to integers which directly point a word in label. + The `LongTensor` of size ``(batch, target_length)`` + + Returns: loss + * loss (float): loss for training + + Examples:: + + >>> B, T1, C, T2 = 3, 128, 4, 10 + >>> loss = CrossEntropyLoss() + >>> inputs = torch.randn(B, T1, C, requires_grad=True) + >>> targets = torch.empty(B, T2, dtype=torch.long).random_(T2) + >>> outputs = loss(inputs, targets) + >>> outputs.backward() + """ + def __init__( + self, + configs, + tokenizer: Tokenizer, + ) -> None: + super(CrossEntropyLoss, self).__init__() + self.cross_entropy_loss = nn.CrossEntropyLoss( + reduction=configs.criterion.reduction, + ignore_index=tokenizer.pad_id, + ) + + def forward(self, logits: Tensor, targets: Tensor) -> Tensor: + max_target_length = targets.size(1) + max_logits_length = logits.size(1) + + if max_logits_length > max_target_length: + logits = logits[:, :max_target_length, :] + elif max_target_length > max_logits_length: + targets = targets[:, :max_logits_length] + + logits = logits.contiguous().view(-1, logits.size(-1)) + + return self.cross_entropy_loss( + logits.contiguous().view(-1, logits.size(-1)), + targets.contiguous().view(-1), + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/configuration.py new file mode 100644 index 000000000..83cce6ee8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/configuration.py @@ -0,0 +1,51 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class CTCLossConfigs(OpenspeechDataclass): + """ + This is the configuration class to store the configuration of + a :class:`~openspeech.criterion.CTCLoss`. + + It is used to initiated an `CTCLoss` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion. (default: ctc) + reduction (str): reduction method of criterion. (default: mean) + zero_infibity (bool): whether to zero infinite losses and the associated gradients. (default: True) + """ + criterion_name: str = field( + default="ctc", metadata={"help": "Criterion name for training"} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + zero_infinity: bool = field( + default=True, metadata={"help": "Whether to zero infinite losses and the associated gradients."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/ctc.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/ctc.py new file mode 100644 index 000000000..7e58a38d9 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/ctc/ctc.py @@ -0,0 +1,148 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from .. import register_criterion +from ..ctc.configuration import CTCLossConfigs +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("ctc", dataclass=CTCLossConfigs) +class CTCLoss(nn.Module): + r""" + The Connectionist Temporal Classification loss. + + Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the + probability of possible alignments of input to target, producing a loss value which is differentiable + with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which + limits the length of the target sequence such that it must be :math:`\leq` the input length. + + Args: + configs (DictConfig): hydra configuration set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: log_probs, targets, input_lengths, target_lengths + - Log_probs: Tensor of size :math:`(T, N, C)`, + where :math:`T = \text{input length}`, + :math:`N = \text{batch size}`, and + :math:`C = \text{number of classes (including blank)}`. + The logarithmized probabilities of the outputs (e.g. obtained with + :func:`torch.nn.functional.log_softmax`). + - Targets: Tensor of size :math:`(N, S)` or + :math:`(\operatorname{sum}(\text{target\_lengths}))`, + where :math:`N = \text{batch size}` and + :math:`S = \text{max target length, if shape is } (N, S)`. + It represent the target sequences. Each element in the target + sequence is a class index. And the target index cannot be blank (default=0). + In the :math:`(N, S)` form, targets are padded to the + length of the longest sequence, and stacked. + In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form, + the targets are assumed to be un-padded and + concatenated within 1 dimension. + - Input_lengths: Tuple or tensor of size :math:`(N)`, + where :math:`N = \text{batch size}`. It represent the lengths of the + inputs (must each be :math:`\leq T`). And the lengths are specified + for each sequence to achieve masking under the assumption that sequences + are padded to equal lengths. + - Target_lengths: Tuple or tensor of size :math:`(N)`, + where :math:`N = \text{batch size}`. It represent lengths of the targets. + Lengths are specified for each sequence to achieve masking under the + assumption that sequences are padded to equal lengths. If target shape is + :math:`(N,S)`, target_lengths are effectively the stop index + :math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for + each target in a batch. Lengths must each be :math:`\leq S` + If the targets are given as a 1d tensor that is the concatenation of individual + targets, the target_lengths must add up to the total length of the tensor. + + Returns: loss + * loss (float): loss for training + + Examples:: + + >>> # Target are to be padded + >>> T = 50 # Input sequence length + >>> C = 20 # Number of classes (including blank) + >>> N = 16 # Batch size + >>> S = 30 # Target sequence length of longest target in batch (padding length) + >>> S_min = 10 # Minimum target length, for demonstration purposes + >>> + >>> # Initialize random batch of input vectors, for *size = (T,N,C) + >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() + >>> + >>> # Initialize random batch of targets (0 = blank, 1:C = classes) + >>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long) + >>> + >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) + >>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long) + >>> ctc_loss = nn.CTCLoss() + >>> loss = ctc_loss(input, target, input_lengths, target_lengths) + >>> loss.backward() + >>> + >>> + >>> # Target are to be un-padded + >>> T = 50 # Input sequence length + >>> C = 20 # Number of classes (including blank) + >>> N = 16 # Batch size + >>> + >>> # Initialize random batch of input vectors, for *size = (T,N,C) + >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() + >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) + >>> + >>> # Initialize random batch of targets (0 = blank, 1:C = classes) + >>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long) + >>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long) + >>> ctc_loss = CTCLoss() + >>> loss = ctc_loss(input, target, input_lengths, target_lengths) + >>> loss.backward() + + Reference: + A. Graves et al.: Connectionist Temporal Classification: + Labelling Unsegmented Sequence Data with Recurrent Neural Networks: + https://www.cs.toronto.edu/~graves/icml_2006.pdf + """ + def __init__( + self, + configs, + tokenizer: Tokenizer, + ) -> None: + super(CTCLoss, self).__init__() + self.ctc_loss = nn.CTCLoss( + blank=tokenizer.blank_id, + reduction=configs.criterion.reduction, + zero_infinity=configs.criterion.zero_infinity, + ) + + def forward( + self, + log_probs: Tensor, + input_lengths: Tensor, + targets: Tensor, + target_lengths: Tensor, + ): + return self.ctc_loss( + log_probs, + targets, + input_lengths, + target_lengths, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/configuration.py new file mode 100644 index 000000000..123558ca5 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/configuration.py @@ -0,0 +1,63 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class JointCTCCrossEntropyLossConfigs(OpenspeechDataclass): + """ + This is the configuration class to store the configuration of + a :class:`~openspeech.criterion.JointCTCCrossEntropyLoss`. + + It is used to initiated an `CTCLoss` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion. (default: joint_ctc_cross_entropy) + reduction (str): reduction method of criterion. (default: mean) + ctc_weight (float): weight of ctc loss for training. (default: 0.3) + cross_entropy_weight (float): weight of cross entropy loss for training. (default: 0.7) + smoothing (float): ratio of smoothing loss (confidence = 1.0 - smoothing) (default: 0.0) + zero_infibity (bool): whether to zero infinite losses and the associated gradients. (default: True) + """ + criterion_name: str = field( + default="joint_ctc_cross_entropy", metadata={"help": "Criterion name for training."} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + ctc_weight: float = field( + default=0.3, metadata={"help": "Weight of ctc loss for training."} + ) + cross_entropy_weight: float = field( + default=0.7, metadata={"help": "Weight of cross entropy loss for training."} + ) + smoothing: float = field( + default=0.0, metadata={"help": "Ratio of smoothing loss (confidence = 1.0 - smoothing)"} + ) + zero_infinity: bool = field( + default=True, metadata={"help": "Whether to zero infinite losses and the associated gradients."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/joint_ctc_cross_entropy.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/joint_ctc_cross_entropy.py new file mode 100644 index 000000000..dce96ea9d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/joint_ctc_cross_entropy/joint_ctc_cross_entropy.py @@ -0,0 +1,117 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from typing import Tuple +from torch import Tensor + +from .. import register_criterion +from ..joint_ctc_cross_entropy.configuration import JointCTCCrossEntropyLossConfigs +from ..label_smoothed_cross_entropy.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyLoss +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("joint_ctc_cross_entropy", dataclass=JointCTCCrossEntropyLossConfigs) +class JointCTCCrossEntropyLoss(nn.Module): + r""" + Privides Joint CTC-CrossEntropy Loss function. The logit from the encoder applies CTC Loss, and the logit + from the decoder applies Cross Entropy. This loss makes the encoder more robust. + + Args: + configs (DictConfig): hydra configuration set + num_classes (int): the number of classfication + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: encoder_logits, logits, output_lengths, targets, target_lengths + - encoder_logits (torch.FloatTensor): probability distribution value from encoder and it has a logarithm shape. + The `FloatTensor` of size ``(input_length, batch, num_classes)`` + - logits (torch.FloatTensor): probability distribution value from model and it has a logarithm shape. + The `FloatTensor` of size ``(batch, seq_length, num_classes)`` + - output_lengths (torch.LongTensor): length of model's outputs. + The `LongTensor` of size ``(batch)`` + - targets (torch.LongTensor): ground-truth encoded to integers which directly point a word in label. + The `LongTensor` of size ``(batch, target_length)`` + - target_lengths (torch.LongTensor): length of targets. + The `LongTensor` of size ``(batch)`` + + Returns: loss, ctc_loss, cross_entropy_loss + - loss (float): loss for training + - ctc_loss (float): ctc loss for training + - cross_entropy_loss (float): cross entropy loss for training + + Reference: + Suyoun Kim et al.: Joint CTC-Attention based End-to-End Speech Recognition using Multi-task Learning: + https://arxiv.org/abs/1609.06773 + """ + + def __init__( + self, + configs, + num_classes: int, + tokenizer: Tokenizer, + ) -> None: + super(JointCTCCrossEntropyLoss, self).__init__() + self.num_classes = num_classes + self.dim = -1 + self.ignore_index = tokenizer.pad_id + self.reduction = configs.criterion.reduction.lower() + self.ctc_weight = configs.criterion.ctc_weight + self.cross_entropy_weight = configs.criterion.cross_entropy_weight + self.ctc_loss = nn.CTCLoss( + blank=tokenizer.blank_id, + reduction=self.reduction, + zero_infinity=configs.criterion.zero_infinity, + ) + if configs.criterion.smoothing > 0.0: + self.cross_entropy_loss = LabelSmoothedCrossEntropyLoss( + configs=configs, + num_classes=num_classes, + tokenizer=tokenizer, + ) + else: + self.cross_entropy_loss = nn.CrossEntropyLoss(reduction=self.reduction, ignore_index=self.ignore_index) + + def forward( + self, + encoder_logits: Tensor, + logits: Tensor, + output_lengths: Tensor, + targets: Tensor, + target_lengths: Tensor, + ) -> Tuple[Tensor, Tensor, Tensor]: + max_target_length = targets.size(1) + max_logits_length = logits.size(1) + + if max_logits_length > max_target_length: + logits = logits[:, :max_target_length, :] + cross_entropy_targets = targets.clone() + elif max_target_length > max_logits_length: + cross_entropy_targets = targets[:, :max_logits_length].clone() + else: + cross_entropy_targets = targets.clone() + + logits = logits.contiguous().view(-1, logits.size(-1)) + + ctc_loss = self.ctc_loss(encoder_logits, targets, output_lengths, target_lengths) + cross_entropy_loss = self.cross_entropy_loss(logits, cross_entropy_targets.contiguous().view(-1)) + loss = cross_entropy_loss * self.cross_entropy_weight + ctc_loss * self.ctc_weight + return loss, ctc_loss, cross_entropy_loss diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/configuration.py new file mode 100644 index 000000000..2ab6afab9 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/configuration.py @@ -0,0 +1,51 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class LabelSmoothedCrossEntropyLossConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.criterion.LabelSmoothedCrossEntropyLoss`. + + It is used to initiated an `LabelSmoothedCrossEntropyLoss` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion. (default: label_smoothed_cross_entropy) + reduction (str): reduction method of criterion. (default: mean) + smoothing (float): ratio of smoothing loss (confidence = 1.0 - smoothing) (default: 0.1) + """ + criterion_name: str = field( + default="label_smoothed_cross_entropy", metadata={"help": "Criterion name for training."} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + smoothing: float = field( + default=0.1, metadata={"help": "Ratio of smoothing loss (confidence = 1.0 - smoothing)"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/label_smoothed_cross_entropy.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/label_smoothed_cross_entropy.py new file mode 100644 index 000000000..28c9ae6ff --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/label_smoothed_cross_entropy/label_smoothed_cross_entropy.py @@ -0,0 +1,94 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from .. import register_criterion +from ..label_smoothed_cross_entropy.configuration import LabelSmoothedCrossEntropyLossConfigs +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyLossConfigs) +class LabelSmoothedCrossEntropyLoss(nn.Module): + r""" + Label smoothed cross entropy loss function. + + Args: + configs (DictConfig): hydra configuration set + num_classes (int): the number of classfication + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: logits, targets + - **logits** (torch.FloatTensor): probability distribution value from model and it has a logarithm shape. + The `FloatTensor` of size ``(batch, seq_length, num_classes)`` + - **targets** (torch.LongTensor): ground-truth encoded to integers which directly point a word in label + The `LongTensor` of size ``(batch, target_length)`` + + Returns: loss + * loss (float): loss for training + """ + def __init__( + self, + configs, + num_classes: int, + tokenizer: Tokenizer, + ) -> None: + super(LabelSmoothedCrossEntropyLoss, self).__init__() + self.confidence = 1.0 - configs.criterion.smoothing + self.smoothing = configs.criterion.smoothing + self.num_classes = num_classes + self.dim = -1 + self.ignore_index = tokenizer.pad_id + self.reduction = configs.criterion.reduction.lower() + + if self.reduction == 'sum': + self.reduction_method = torch.sum + elif self.reduction == 'mean': + self.reduction_method = torch.mean + else: + raise ValueError(f"Unsupported reduction method {configs.criterion.reduction}") + + def forward(self, logits: Tensor, targets: Tensor) -> Tensor: + # If predict longer than the target size, won't be able to calculate the cross entropy + max_target_length = targets.size(1) + max_logits_length = logits.size(1) + + if max_logits_length > max_target_length: + logits = logits[:, :max_target_length, :] + elif max_target_length > max_logits_length: + targets = targets[:, :max_logits_length] + + logits = logits.contiguous().view(-1, logits.size(-1)) + targets = targets.contiguous().view(-1) + + if self.smoothing > 0.0: + with torch.no_grad(): + label_smoothed = torch.zeros_like(logits) + label_smoothed.fill_(self.smoothing / (self.num_classes - 1)) + label_smoothed.scatter_(1, targets.data.unsqueeze(1), self.confidence) + label_smoothed[targets == self.ignore_index, :] = 0 + return self.reduction_method(-label_smoothed * logits) + + return F.cross_entropy(logits, targets, ignore_index=self.ignore_index, reduction=self.reduction) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/configuration.py new file mode 100644 index 000000000..7ce7dfa6b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/configuration.py @@ -0,0 +1,48 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class PerplexityLossConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of a + :class: `~openspeech.criterion.Perplexity`. + + It is used to initiated an `Perplexity` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion (default: perplexity) + reduction (str): reduction method of criterion (default: mean) + """ + criterion_name: str = field( + default="perplexity", metadata={"help": "Criterion name for training"} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/perplexity.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/perplexity.py new file mode 100644 index 000000000..f0fcbee8e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/perplexity/perplexity.py @@ -0,0 +1,78 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from torch import Tensor + +from .. import register_criterion +from ..perplexity.configuration import PerplexityLossConfigs +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("perplexity", dataclass=PerplexityLossConfigs) +class Perplexity(nn.Module): + r""" + Language model perplexity loss. + Perplexity is the token averaged likelihood. When the averaging options are the + same, it is the exponential of negative log-likelihood. + + Args: + configs (DictConfig): hydra configuration set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: logits, targets + - **logits** (torch.FloatTensor): probability distribution value from model and it has a logarithm shape. + The `FloatTensor` of size ``(batch, seq_length, num_classes)`` + - **targets** (torch.LongTensor): ground-truth encoded to integers which directly point a word in label + The `LongTensor` of size ``(batch, target_length)`` + + Returns: loss + - loss (float): loss for training + """ + def __init__( + self, + configs, + tokenizer: Tokenizer, + ) -> None: + super(Perplexity, self).__init__() + self.cross_entropy_loss = nn.CrossEntropyLoss( + reduction=configs.criterion.reduction, + ignore_index=tokenizer.pad_id, + ) + + def forward(self, logits: Tensor, targets: Tensor) -> Tensor: + max_target_length = targets.size(1) + max_logits_length = logits.size(1) + + if max_logits_length > max_target_length: + logits = logits[:, :max_target_length, :] + elif max_target_length > max_logits_length: + targets = targets[:, :max_logits_length] + + logits = logits.contiguous().view(-1, logits.size(-1)) + + cross_entropy_loss = self.cross_entropy_loss( + logits.contiguous().view(-1, logits.size(-1)), + targets.contiguous().view(-1), + ) + return torch.exp(cross_entropy_loss) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/configuration.py new file mode 100644 index 000000000..a217a3672 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/configuration.py @@ -0,0 +1,51 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ...dataclass.configurations import OpenspeechDataclass + + +@dataclass +class TransducerLossConfigs(OpenspeechDataclass): + """ + This is the configuration class to store the configuration of + a :class:`~openspeech.criterion.TransducerLoss`. + + It is used to initiated an `TransducerLoss` criterion. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Configurations: + criterion_name (str): name of criterion. (default: label_smoothed_cross_entropy) + reduction (str): reduction method of criterion. (default: mean) + gather (bool): reduce memory consumption. (default: True) + """ + criterion_name: str = field( + default="transducer", metadata={"help": "Criterion name for training."} + ) + reduction: str = field( + default="mean", metadata={"help": "Reduction method of criterion"} + ) + gather: bool = field( + default=True, metadata={"help": "Reduce memory consumption."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/transducer.py b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/transducer.py new file mode 100644 index 000000000..d6ff0535c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/criterion/transducer/transducer.py @@ -0,0 +1,91 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn + +from .. import register_criterion +from ..transducer.configuration import TransducerLossConfigs +from ...utils import WARPRNNT_IMPORT_ERROR +from ...tokenizers.tokenizer import Tokenizer + + +@register_criterion("transducer", dataclass=TransducerLossConfigs) +class TransducerLoss(nn.Module): + r""" + Compute path-aware regularization transducer loss. + + Args: + configs (DictConfig): hydra configuration set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + logits (torch.FloatTensor): Input tensor with shape (N, T, U, V) + where N is the minibatch size, T is the maximum number of + input frames, U is the maximum number of output labels and V is + the vocabulary of labels (including the blank). + targets (torch.IntTensor): Tensor with shape (N, U-1) representing the + reference labels for all samples in the minibatch. + input_lengths (torch.IntTensor): Tensor with shape (N,) representing the + number of frames for each sample in the minibatch. + target_lengths (torch.IntTensor): Tensor with shape (N,) representing the + length of the transcription for each sample in the minibatch. + + Returns: + - loss (torch.FloatTensor): transducer loss + + Reference: + A. Graves: Sequence Transduction with Recurrent Neural Networks: + https://arxiv.org/abs/1211.3711.pdf + """ + + def __init__( + self, + configs, + tokenizer: Tokenizer, + ) -> None: + super().__init__() + try: + from warp_rnnt import rnnt_loss + except ImportError: + raise ImportError(WARPRNNT_IMPORT_ERROR) + self.rnnt_loss = rnnt_loss + self.blank_id = tokenizer.blank_id + self.reduction = configs.criterion.reduction + self.gather = configs.criterion.gather + + def forward( + self, + logits: torch.FloatTensor, + targets: torch.IntTensor, + input_lengths: torch.IntTensor, + target_lengths: torch.IntTensor, + ) -> torch.FloatTensor: + return self.rnnt_loss( + logits, + targets, + input_lengths, + target_lengths, + reduction=self.reduction, + blank=self.blank_id, + gather=self.gather, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/__init__.py new file mode 100644 index 000000000..56d4c3f6e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/__init__.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +AUDIO_FEATURE_TRANSFORM_REGISTRY = dict() +AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY = dict() + + +def register_audio_feature_transform(name: str, dataclass=None): + r""" + New dataset types can be added to OpenSpeech with the :func:`register_dataset` function decorator. + + For example:: + @register_audio_feature_transform("fbank", dataclass=FilterBankConfigs) + class FilterBankFeatureTransform(object): + (...) + + .. note:: All dataset must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the dataset + dataclass (Optional, str): the dataclass of the dataset (default: None) + """ + + def register_audio_feature_transform_cls(cls): + if name in AUDIO_FEATURE_TRANSFORM_REGISTRY: + raise ValueError(f"Cannot register duplicate audio ({name})") + + AUDIO_FEATURE_TRANSFORM_REGISTRY[name] = cls + + cls.__dataclass = dataclass + if dataclass is not None: + if name in AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY: + raise ValueError(f"Cannot register duplicate dataclass ({name})") + AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY[name] = dataclass + + return cls + + return register_audio_feature_transform_cls + + +data_dir = os.path.dirname(__file__) +for file in os.listdir(f"{data_dir}/audio"): + if os.path.isdir(f"{data_dir}/audio/{file}") and not file.startswith('__'): + path = f"{data_dir}/audio/{file}" + for module_file in os.listdir(path): + path = os.path.join(path, module_file) + if module_file.endswith(".py"): + module_name = module_file[: module_file.find(".py")] if module_file.endswith(".py") else module_file + module = importlib.import_module(f"openspeech.data.audio.{file}.{module_name}") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/augment.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/augment.py new file mode 100644 index 000000000..c8baabe04 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/augment.py @@ -0,0 +1,193 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import numpy as np +import random +import logging +import librosa +from torch import Tensor + +from ..audio.load import load_audio + +logger = logging.getLogger(__name__) + + +class SpecAugment(object): + """ + Provides Spec Augment. A simple data augmentation method for speech recognition. + This concept proposed in https://arxiv.org/abs/1904.08779 + + Args: + freq_mask_para (int): maximum frequency masking length + time_mask_num (int): how many times to apply time masking + freq_mask_num (int): how many times to apply frequency masking + + Inputs: feature_vector + - **feature_vector** (torch.FloatTensor): feature vector from audio file. + + Returns: feature_vector: + - **feature_vector**: masked feature vector. + """ + def __init__(self, freq_mask_para: int = 18, time_mask_num: int = 10, freq_mask_num: int = 2) -> None: + self.freq_mask_para = freq_mask_para + self.time_mask_num = time_mask_num + self.freq_mask_num = freq_mask_num + + def __call__(self, feature: Tensor) -> Tensor: + """ Provides SpecAugmentation for audio """ + time_axis_length = feature.size(0) + freq_axis_length = feature.size(1) + time_mask_para = time_axis_length / 20 # Refer to "Specaugment on large scale dataset" paper + + # time mask + for _ in range(self.time_mask_num): + t = int(np.random.uniform(low=0.0, high=time_mask_para)) + t0 = random.randint(0, time_axis_length - t) + feature[t0: t0 + t, :] = 0 + + # freq mask + for _ in range(self.freq_mask_num): + f = int(np.random.uniform(low=0.0, high=self.freq_mask_para)) + f0 = random.randint(0, freq_axis_length - f) + feature[:, f0: f0 + f] = 0 + + return feature + + +class NoiseInjector(object): + """ + Provides noise injection for noise augmentation. + + The noise augmentation process is as follows: + 1: Randomly sample audios by `noise_size` from dataset + 2: Extract noise from `audio_paths` + 3: Add noise to sound + + Args: + noise_dataset_dir (str): path of noise dataset + sample_rate (int): sampling rate + noise_level (float): level of noise + + Inputs: signal + - **signal**: signal from audio file + + Returns: signal + - **signal**: noise added signal + """ + def __init__( + self, + noise_dataset_dir: str, + sample_rate: int = 16000, + noise_level: float = 0.7, + ) -> None: + if not os.path.exists(noise_dataset_dir): + logger.info("Directory doesn`t exist: {0}".format(noise_dataset_dir)) + raise IOError + + logger.info("Create Noise injector...") + + self.sample_rate = sample_rate + self.noise_level = noise_level + self._load_audio = load_audio + self.audio_paths = self.create_audio_paths(noise_dataset_dir) + self.dataset = self.create_noiseset(noise_dataset_dir) + + logger.info("Create Noise injector complete !!") + + def __call__(self, signal): + noise = np.random.choice(self.dataset) + noise_level = np.random.uniform(0, self.noise_level) + + signal_length = len(signal) + noise_length = len(noise) + + if signal_length >= noise_length: + noise_start = int(np.random.rand() * (signal_length - noise_length)) + noise_end = int(noise_start + noise_length) + signal[noise_start: noise_end] += noise * noise_level + + else: + signal += noise[:signal_length] * noise_level + + return signal + + def create_audio_paths(self, dataset_path) -> list: + audio_paths = list() + noise_audio_paths = os.listdir(dataset_path) + num_noise_audio_data = len(noise_audio_paths) + + for idx in range(num_noise_audio_data): + if noise_audio_paths[idx].endswith('.pcm') \ + or noise_audio_paths[idx].endswith('.wav') \ + or noise_audio_paths[idx].endswith('.flac'): + audio_paths.append(noise_audio_paths[idx]) + + return audio_paths + + def create_noiseset(self, dataset_path): + dataset = list() + + for audio_path in self.audio_paths: + audio_path = os.path.join(dataset_path, audio_path) + noise = self._load_audio(audio_path, self.sample_rate, del_silence=False) + + if noise is not None: + dataset.append(noise) + + return dataset + + +class TimeStretchAugment(object): + """ + Time-stretch an audio series by a fixed rate. + + Inputs: + signal: np.ndarray [shape=(n,)] audio time series + + Returns: + y_stretch: np.ndarray [shape=(round(n/rate),)] audio time series stretched by the specified rate + """ + def __init__(self, min_rate: float = 0.7, max_rate: float = 1.4): + super(TimeStretchAugment, self).__init__() + self.min_rate = min_rate + self.max_rate = max_rate + + def __call__(self, signal: np.array): + return librosa.effects.time_stretch(signal, random.uniform(self.min_rate, self.max_rate)) + + +class JoiningAugment(object): + """ + Data augment by concatenating audio signals + + Inputs: + signal: np.ndarray [shape=(n,)] audio time series + + Returns: signal + - **signal**: concatenated signal + """ + def __init__(self): + super(JoiningAugment, self).__init__() + + def __call__(self, signals: tuple): + return np.concatenate([signal for signal in signals]) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/data_loader.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/data_loader.py new file mode 100644 index 000000000..45a26fa08 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/data_loader.py @@ -0,0 +1,130 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import numpy as np +from typing import Tuple +from torch.utils.data import DataLoader, Sampler + + +def _collate_fn(batch, pad_id: int = 0): + r""" + Functions that pad to the maximum sequence length + + Args: + batch (tuple): tuple contains input and target tensors + pad_id (int): identification of pad token + + Returns: + seqs (torch.FloatTensor): tensor contains input sequences. + target (torch.IntTensor): tensor contains target sequences. + seq_lengths (torch.IntTensor): tensor contains input sequence lengths + target_lengths (torch.IntTensor): tensor contains target sequence lengths + """ + def seq_length_(p): + return len(p[0]) + + def target_length_(p): + return len(p[1]) + + # sort by sequence length for rnn.pack_padded_sequence() + batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True) + + seq_lengths = [len(s[0]) for s in batch] + target_lengths = [len(s[1]) - 1 for s in batch] + + max_seq_sample = max(batch, key=seq_length_)[0] + max_target_sample = max(batch, key=target_length_)[1] + + max_seq_size = max_seq_sample.size(0) + max_target_size = len(max_target_sample) + + feat_size = max_seq_sample.size(1) + batch_size = len(batch) + + seqs = torch.zeros(batch_size, max_seq_size, feat_size) + + targets = torch.zeros(batch_size, max_target_size).to(torch.long) + targets.fill_(pad_id) + + for x in range(batch_size): + sample = batch[x] + tensor = sample[0] + target = sample[1] + seq_length = tensor.size(0) + + seqs[x].narrow(0, 0, seq_length).copy_(tensor) + targets[x].narrow(0, 0, len(target)).copy_(torch.LongTensor(target)) + + seq_lengths = torch.IntTensor(seq_lengths) + target_lengths = torch.IntTensor(target_lengths) + + return seqs, targets, seq_lengths, target_lengths + + +class AudioDataLoader(DataLoader): + r""" + Audio Data Loader + + Args: + dataset (torch.utils.data.Dataset): dataset from which to load the data. + num_workers (int): how many subprocesses to use for data loading. + batch_sampler (torch.utils.data.sampler.Sampler): defines the strategy to draw samples from the dataset. + """ + def __init__( + self, + dataset: torch.utils.data.Dataset, + num_workers: int, + batch_sampler: torch.utils.data.sampler.Sampler, + **kwargs, + ) -> None: + super(AudioDataLoader, self).__init__( + dataset=dataset, + num_workers=num_workers, + batch_sampler=batch_sampler, + **kwargs, + ) + self.collate_fn = _collate_fn + + +def load_dataset(manifest_file_path: str) -> Tuple[list, list]: + """ + Provides dictionary of filename and labels. + + Args: + manifest_file_path (str): evaluation manifest file path. + + Returns: target_dict + * target_dict (dict): dictionary of filename and labels + """ + audio_paths = list() + transcripts = list() + + with open(manifest_file_path) as f: + for idx, line in enumerate(f.readlines()): + audio_path, korean_transcript, transcript = line.split('\t') + transcript = transcript.replace('\n', '') + + audio_paths.append(audio_path) + transcripts.append(transcript) + + return audio_paths, transcripts diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/dataset.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/dataset.py new file mode 100644 index 000000000..7deeecf35 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/dataset.py @@ -0,0 +1,219 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import random +import torch +import numpy as np +import logging +from torch import Tensor +from torch.utils.data import Dataset + +from openspeech.data import AUDIO_FEATURE_TRANSFORM_REGISTRY +from openspeech.data.audio.augment import JoiningAugment, NoiseInjector, SpecAugment, TimeStretchAugment +from openspeech.data.audio.load import load_audio + +logger = logging.getLogger(__name__) + + +class SpeechToTextDataset(Dataset): + r""" + Dataset for audio & transcript matching + + Note: + Do not use this class directly, use one of the sub classes. + + Args: + dataset_path (str): path of librispeech dataset + audio_paths (list): list of audio path + transcripts (list): list of transript + sos_id (int): identification of + eos_id (int): identification of + del_silence (bool): flag indication whether to apply delete silence or not + apply_spec_augment (bool): flag indication whether to apply spec augment or not + apply_noise_augment (bool): flag indication whether to apply noise augment or not + apply_time_stretch_augment (bool): flag indication whether to apply time stretch augment or not + apply_joining_augment (bool): flag indication whether to apply audio joining augment or not + """ + NONE_AUGMENT = 0 + SPEC_AUGMENT = 1 + NOISE_AUGMENT = 2 + TIME_STRETCH = 3 + AUDIO_JOINING = 4 + + def __init__( + self, + configs, + dataset_path: str, + audio_paths: list, + transcripts: list, + sos_id: int = 1, + eos_id: int = 2, + del_silence: bool = False, + apply_spec_augment: bool = False, + apply_noise_augment: bool = False, + apply_time_stretch_augment: bool = False, + apply_joining_augment: bool = False, + ) -> None: + super(SpeechToTextDataset, self).__init__() + self.dataset_path = dataset_path + self.audio_paths = list(audio_paths) + self.transcripts = list(transcripts) + self.augments = [self.NONE_AUGMENT] * len(self.audio_paths) + self.dataset_size = len(self.audio_paths) + self.sos_id = sos_id + self.eos_id = eos_id + self.sample_rate = configs.audio.sample_rate + self.num_mels = configs.audio.num_mels + self.del_silence = del_silence + self.apply_spec_augment = apply_spec_augment + self.apply_noise_augment = apply_noise_augment + self.apply_time_stretch_augment = apply_time_stretch_augment + self.apply_joining_augment = apply_joining_augment + self.transforms = AUDIO_FEATURE_TRANSFORM_REGISTRY[configs.audio.name](configs) + self._load_audio = load_audio + + if self.apply_spec_augment: + self._spec_augment = SpecAugment( + freq_mask_para=configs.augment.freq_mask_para, + freq_mask_num=configs.augment.freq_mask_num, + time_mask_num=configs.augment.time_mask_num, + ) + for idx in range(self.dataset_size): + self.audio_paths.append(self.audio_paths[idx]) + self.transcripts.append(self.transcripts[idx]) + self.augments.append(self.SPEC_AUGMENT) + + if self.apply_noise_augment: + if eval(configs.augment.noise_dataset_dir) is None: + raise ValueError("`noise_dataset_dir` should be contain audio files.") + + self._noise_injector = NoiseInjector( + noise_dataset_dir=configs.augment.noise_dataset_dir, + sample_rate=configs.augment.noise_sample_rate, + noise_level=configs.augment.noise_level, + ) + for idx in range(self.dataset_size): + self.audio_paths.append(self.audio_paths[idx]) + self.transcripts.append(self.transcripts[idx]) + self.augments.append(self.NONE_AUGMENT) + + if self.apply_time_stretch_augment: + self._time_stretch_augment = TimeStretchAugment( + min_rate=configs.time_stretch_min_rate, + max_rate=configs.time_stretch_max_rate, + ) + for idx in range(self.dataset_size): + self.audio_paths.append(self.audio_paths[idx]) + self.transcripts.append(self.transcripts[idx]) + self.augments.append(self.TIME_STRETCH) + + if self.apply_joining_augment: + self._joining_augment = JoiningAugment() + for idx in range(self.dataset_size): + self.audio_paths.append(self.audio_paths[idx]) + self.transcripts.append(self.transcripts[idx]) + self.augments.append(self.AUDIO_JOINING) + + self.total_size = len(self.audio_paths) + + tmp = list(zip(self.audio_paths, self.transcripts, self.augments)) + random.shuffle(tmp) + self.audio_paths, self.transcripts, self.augments = zip(*tmp) + + def _parse_audio(self, audio_path: str, augment: int = None, joining_idx: int = 0) -> Tensor: + """ + Parses audio. + + Args: + audio_path (str): path of audio file + augment (int): augmentation identification + + Returns: + feature (np.ndarray): feature extract by sub-class + """ + signal = self._load_audio(audio_path, sample_rate=self.sample_rate, del_silence=self.del_silence) + + if signal is None: + logger.warning(f"{audio_path} is not Valid!!") + return torch.zeros(1000, self.num_mels) + + if augment == self.AUDIO_JOINING: + joining_signal = self._load_audio(self.audio_paths[joining_idx], sample_rate=self.sample_rate) + signal = self._joining_augment((signal, joining_signal)) + + if augment == self.TIME_STRETCH: + signal = self._time_stretch_augment(signal) + + if augment == self.NOISE_AUGMENT: + signal = self._noise_injector(signal) + + feature = self.transforms(signal) + + feature -= feature.mean() + feature /= np.std(feature) + + feature = torch.FloatTensor(feature).transpose(0, 1) + + if augment == self.SPEC_AUGMENT: + feature = self._spec_augment(feature) + + return feature + + def _parse_transcript(self, transcript: str) -> list: + """ + Parses transcript + Args: + transcript (str): transcript of audio file + Returns + transcript (list): transcript that added and tokens + """ + tokens = transcript.split(' ') + transcript = list() + + transcript.append(int(self.sos_id)) + for token in tokens: + transcript.append(int(token)) + transcript.append(int(self.eos_id)) + + return transcript + + def __getitem__(self, idx): + """ Provides paif of audio & transcript """ + audio_path = os.path.join(self.dataset_path, self.audio_paths[idx]) + + if self.augments[idx] == self.AUDIO_JOINING: + joining_idx = random.randint(0, self.total_size) + feature = self._parse_audio(audio_path, self.augments[idx], joining_idx) + transcript = self._parse_transcript(f"{self.transcripts[idx]} {self.transcripts[joining_idx]}") + + else: + feature = self._parse_audio(audio_path, self.augments[idx]) + transcript = self._parse_transcript(self.transcripts[idx]) + + return feature, transcript + + def __len__(self): + return len(self.audio_paths) + + def count(self): + return len(self.audio_paths) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/configuration.py new file mode 100644 index 000000000..c60e5e1b6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/configuration.py @@ -0,0 +1,79 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ....dataclass.configurations import OpenspeechDataclass + + +@dataclass +class FilterBankConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.data.audio.FilterBankFeatureTransform`. + + It is used to initiated an `FilterBankFeatureTransform` feature transform. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + name (str): name of feature transform. (default: fbank) + sample_rate (int): sampling rate of audio (default: 16000) + frame_length (float): frame length for spectrogram (default: 20.0) + frame_shift (float): length of hop between STFT (default: 10.0) + del_silence (bool): flag indication whether to apply delete silence or not (default: False) + num_mels (int): the number of mfc coefficients to retain. (default: 80) + apply_spec_augment (bool): flag indication whether to apply spec augment or not (default: True) + apply_noise_augment (bool): flag indication whether to apply noise augment or not (default: False) + apply_time_stretch_augment (bool): flag indication whether to apply time stretch augment or not (default: False) + apply_joining_augment (bool): flag indication whether to apply audio joining augment or not (default: False) + """ + name: str = field( + default="fbank", metadata={"help": "Name of dataset."} + ) + sample_rate: int = field( + default=16000, metadata={"help": "Sampling rate of audio"} + ) + frame_length: float = field( + default=20.0, metadata={"help": "Frame length for spectrogram"} + ) + frame_shift: float = field( + default=10.0, metadata={"help": "Length of hop between STFT"} + ) + del_silence: bool = field( + default=False, metadata={"help": "Flag indication whether to apply delete silence or not"} + ) + num_mels: int = field( + default=80, metadata={"help": "The number of mfc coefficients to retain."} + ) + apply_spec_augment: bool = field( + default=True, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + apply_noise_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply noise augment or not"} + ) + apply_time_stretch_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply time stretch augment or not"} + ) + apply_joining_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply audio joining augment or not"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/filter_bank.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/filter_bank.py new file mode 100644 index 000000000..8862117e7 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/filter_bank/filter_bank.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np +from torch import Tensor + +from ... import register_audio_feature_transform +from ...audio.filter_bank.configuration import FilterBankConfigs +from ....utils import TORCHAUDIO_IMPORT_ERROR + + +@register_audio_feature_transform("fbank", dataclass=FilterBankConfigs) +class FilterBankFeatureTransform(object): + r""" + Create a fbank from a raw audio signal. This matches the input/output of Kaldi's compute-fbank-feats. + + Args: + configs (DictConfig): hydra configuraion set + + Inputs: + signal (np.ndarray): signal from audio file. + + Returns: + Tensor: A fbank identical to what Kaldi would output. The shape is ``(seq_length, num_mels)`` + """ + def __init__(self, configs) -> None: + super(FilterBankFeatureTransform, self).__init__() + try: + import torchaudio + except ImportError: + raise ImportError(TORCHAUDIO_IMPORT_ERROR) + self.num_mels = configs.audio.num_mels + self.frame_length = configs.audio.frame_length + self.frame_shift = configs.audio.frame_shift + self.function = torchaudio.compliance.kaldi.fbank + + def __call__(self, signal: np.ndarray) -> np.ndarray: + """ + Provides feature extraction + + Inputs: + signal (np.ndarray): audio signal + + Returns: + feature (np.ndarray): feature extract by sub-class + """ + return self.function( + Tensor(signal).unsqueeze(0), + num_mel_bins=self.num_mels, + frame_length=self.frame_length, + frame_shift=self.frame_shift, + ).transpose(0, 1).numpy() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/load.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/load.py new file mode 100644 index 000000000..032c57d40 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/load.py @@ -0,0 +1,57 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np +import librosa +import logging + +logger = logging.getLogger(__name__) + + +def load_audio(audio_path: str, sample_rate: int, del_silence: bool = False) -> np.ndarray: + """ + Load audio file (PCM) to sound. if del_silence is True, Eliminate all sounds below 30dB. + If exception occurs in numpy.memmap(), return None. + """ + try: + if audio_path.endswith('pcm'): + signal = np.memmap(audio_path, dtype='h', mode='r').astype('float32') + + if del_silence: + non_silence_indices = librosa.effects.split(signal, top_db=30) + signal = np.concatenate([signal[start:end] for start, end in non_silence_indices]) + + return signal / 32767 # normalize audio + + elif audio_path.endswith('wav') or audio_path.endswith('flac'): + signal, _ = librosa.load(audio_path, sr=sample_rate) + return signal + + except ValueError: + logger.debug('ValueError in {0}'.format(audio_path)) + return None + except RuntimeError: + logger.debug('RuntimeError in {0}'.format(audio_path)) + return None + except IOError: + logger.debug('IOError in {0}'.format(audio_path)) + return None diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/configuration.py new file mode 100644 index 000000000..61c04db7c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/configuration.py @@ -0,0 +1,79 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ....dataclass.configurations import OpenspeechDataclass + + +@dataclass +class MelSpectrogramConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.data.audio.MelSpectrogramFeatureTransform`. + + It is used to initiated an `MelSpectrogramFeatureTransform` feature transform. + + Configuration objects inherit from :class: `~openspeech.dataclass.OpenspeechDataclass`. + + Args: + name (str): name of feature transform. (default: melspectrogram) + sample_rate (int): sampling rate of audio (default: 16000) + frame_length (float): frame length for spectrogram (default: 20.0) + frame_shift (float): length of hop between STFT (default: 10.0) + del_silence (bool): flag indication whether to apply delete silence or not (default: False) + num_mels (int): the number of mfc coefficients to retain. (default: 80) + apply_spec_augment (bool): flag indication whether to apply spec augment or not (default: True) + apply_noise_augment (bool): flag indication whether to apply noise augment or not (default: False) + apply_time_stretch_augment (bool): flag indication whether to apply time stretch augment or not (default: False) + apply_joining_augment (bool): flag indication whether to apply audio joining augment or not (default: False) + """ + name: str = field( + default="melspectrogram", metadata={"help": "Name of dataset."} + ) + sample_rate: int = field( + default=16000, metadata={"help": "Sampling rate of audio"} + ) + frame_length: float = field( + default=20.0, metadata={"help": "Frame length for spectrogram"} + ) + frame_shift: float = field( + default=10.0, metadata={"help": "Length of hop between STFT"} + ) + del_silence: bool = field( + default=False, metadata={"help": "Flag indication whether to apply delete silence or not"} + ) + num_mels: int = field( + default=80, metadata={"help": "The number of mfc coefficients to retain."} + ) + apply_spec_augment: bool = field( + default=True, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + apply_noise_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply noise augment or not"} + ) + apply_time_stretch_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply time stretch augment or not"} + ) + apply_joining_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply audio joining augment or not"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/melspectrogram.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/melspectrogram.py new file mode 100644 index 000000000..219b9f40e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/melspectrogram/melspectrogram.py @@ -0,0 +1,72 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np + +from ... import register_audio_feature_transform +from ...audio.melspectrogram.configuration import MelSpectrogramConfigs +from ....utils import LIBROSA_IMPORT_ERROR + + +@register_audio_feature_transform("melspectrogram", dataclass=MelSpectrogramConfigs) +class MelSpectrogramFeatureTransform(object): + r""" + Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram and MelScale. + + Args: + configs (DictConfig): configuraion set + + Returns: + Tensor: A mel-spectrogram feature. The shape is ``(seq_length, num_mels)`` + """ + def __init__(self, configs) -> None: + super(MelSpectrogramFeatureTransform, self).__init__() + try: + import librosa + except ImportError: + raise ImportError(LIBROSA_IMPORT_ERROR) + self.sample_rate = configs.audio.sample_rate + self.num_mels = configs.audio.num_mels + self.n_fft = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_length)) + self.hop_length = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_shift)) + self.function = librosa.feature.melspectrogram + self.power_to_db = librosa.power_to_db + + def __call__(self, signal: np.ndarray) -> np.ndarray: + """ + Provides feature extraction + + Inputs: + signal (np.ndarray): audio signal + + Returns: + feature (np.ndarray): feature extract by sub-class + """ + melspectrogram = self.function( + y=signal, + sr=self.sample_rate, + n_mels=self.num_mels, + n_fft=self.n_fft, + hop_length=self.hop_length, + ) + melspectrogram = self.power_to_db(melspectrogram, ref=np.max) + return melspectrogram diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/configuration.py new file mode 100644 index 000000000..d20882d8b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/configuration.py @@ -0,0 +1,79 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ....dataclass.configurations import OpenspeechDataclass + + +@dataclass +class MFCCConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.data.audio.MFCCFeatureTransform`. + + It is used to initiated an `MFCCFeatureTransform` feature transform. + + Configuration objects inherit from :class: `~openspeech.dataclass.OpenspeechDataclass`. + + Args: + name (str): name of feature transform. (default: mfcc) + sample_rate (int): sampling rate of audio (default: 16000) + frame_length (float): frame length for spectrogram (default: 20.0) + frame_shift (float): length of hop between STFT (default: 10.0) + del_silence (bool): flag indication whether to apply delete silence or not (default: False) + num_mels (int): the number of mfc coefficients to retain. (default: 40) + apply_spec_augment (bool): flag indication whether to apply spec augment or not (default: True) + apply_noise_augment (bool): flag indication whether to apply noise augment or not (default: False) + apply_time_stretch_augment (bool): flag indication whether to apply time stretch augment or not (default: False) + apply_joining_augment (bool): flag indication whether to apply audio joining augment or not (default: False) + """ + name: str = field( + default="mfcc", metadata={"help": "Name of dataset."} + ) + sample_rate: int = field( + default=16000, metadata={"help": "Sampling rate of audio"} + ) + frame_length: float = field( + default=20.0, metadata={"help": "Frame length for spectrogram"} + ) + frame_shift: float = field( + default=10.0, metadata={"help": "Length of hop between STFT"} + ) + del_silence: bool = field( + default=False, metadata={"help": "Flag indication whether to apply delete silence or not"} + ) + num_mels: int = field( + default=40, metadata={"help": "The number of mfc coefficients to retain."} + ) + apply_spec_augment: bool = field( + default=True, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + apply_noise_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply noise augment or not"} + ) + apply_time_stretch_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply time stretch augment or not"} + ) + apply_joining_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply audio joining augment or not"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/mfcc.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/mfcc.py new file mode 100644 index 000000000..4cfd61003 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/mfcc/mfcc.py @@ -0,0 +1,77 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np + +from ... import register_audio_feature_transform +from ...audio.mfcc.configuration import MFCCConfigs +from ....utils import LIBROSA_IMPORT_ERROR + + +@register_audio_feature_transform("mfcc", dataclass=MFCCConfigs) +class MFCCFeatureTransform(object): + r""" + Create the Mel-frequency cepstrum coefficients from an audio signal. + + By default, this calculates the MFCC on the DB-scaled Mel spectrogram. + This is not the textbook implementation, but is implemented here to + give consistency with librosa. + + This output depends on the maximum value in the input spectrogram, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + configs (DictConfig): configuraion set + + Returns: + Tensor: A mfcc feature. The shape is ``(seq_length, num_mels)`` + """ + def __init__(self, configs) -> None: + super(MFCCFeatureTransform, self).__init__() + try: + import librosa + except ImportError: + raise ImportError(LIBROSA_IMPORT_ERROR) + self.sample_rate = configs.audio.sample_rate + self.num_mels = configs.audio.num_mels + self.n_fft = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_length)) + self.hop_length = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_shift)) + self.function = librosa.feature.mfcc + + def __call__(self, signal: np.ndarray) -> np.ndarray: + """ + Provides feature extraction + + Inputs: + signal (np.ndarray): audio signal + + Returns: + feature (np.ndarray): feature extract by sub-class + """ + return self.function( + y=signal, + sr=self.sample_rate, + n_mfcc=self.num_mels, + n_fft=self.n_fft, + hop_length=self.hop_length, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/configuration.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/configuration.py new file mode 100644 index 000000000..436c13e09 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/configuration.py @@ -0,0 +1,80 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from ....dataclass.configurations import OpenspeechDataclass + + +@dataclass +class SpectrogramConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.data.audio.SpectrogramTransform`. + + It is used to initiated an `SpectrogramTransform` feature transform. + + Configuration objects inherit from :class: `~openspeech.dataclass.OpenspeechDataclass`. + + Args: + name (str): name of feature transform. (default: spectrogram) + sample_rate (int): sampling rate of audio (default: 16000) + frame_length (float): frame length for spectrogram (default: 20.0) + frame_shift (float): length of hop between STFT (default: 10.0) + del_silence (bool): flag indication whether to apply delete silence or not (default: False) + num_mels (int): the number of mfc coefficients to retain. (default: 161) + apply_spec_augment (bool): flag indication whether to apply spec augment or not (default: True) + apply_noise_augment (bool): flag indication whether to apply noise augment or not (default: False) + apply_time_stretch_augment (bool): flag indication whether to apply time stretch augment or not (default: False) + apply_joining_augment (bool): flag indication whether to apply audio joining augment or not (default: False) + """ + name: str = field( + default="spectrogram", metadata={"help": "Name of dataset."} + ) + sample_rate: int = field( + default=16000, metadata={"help": "Sampling rate of audio"} + ) + frame_length: float = field( + default=20.0, metadata={"help": "Frame length for spectrogram"} + ) + frame_shift: float = field( + default=10.0, metadata={"help": "Length of hop between STFT"} + ) + del_silence: bool = field( + default=False, metadata={"help": "Flag indication whether to apply delete silence or not"} + ) + num_mels: int = field( + default=161, metadata={"help": "Spectrogram is independent of mel, but uses the 'num_mels' variable " + "to unify feature size variables "} + ) + apply_spec_augment: bool = field( + default=True, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + apply_noise_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply noise augment or not"} + ) + apply_time_stretch_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply time stretch augment or not"} + ) + apply_joining_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply audio joining augment or not"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/spectrogram.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/spectrogram.py new file mode 100644 index 000000000..89525dc60 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/audio/spectrogram/spectrogram.py @@ -0,0 +1,65 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np +import torch +from torch import Tensor + +from ... import register_audio_feature_transform +from ...audio.spectrogram.configuration import SpectrogramConfigs + + +@register_audio_feature_transform("spectrogram", dataclass=SpectrogramConfigs) +class SpectrogramFeatureTransform(object): + r""" + Create a spectrogram from a audio signal. + + Args: + configs (DictConfig): configuraion set + + Returns: + Tensor: A spectrogram feature. The shape is ``(seq_length, num_mels)`` + """ + def __init__(self, configs) -> None: + super(SpectrogramFeatureTransform, self).__init__() + self.n_fft = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_length)) + self.hop_length = int(round(configs.audio.sample_rate * 0.001 * configs.audio.frame_shift)) + self.function = torch.stft + + def __call__(self, signal: np.ndarray) -> np.ndarray: + """ + Provides feature extraction + + Inputs: + signal (np.ndarray): audio signal + + Returns: + feature (np.ndarray): feature extract by sub-class + """ + spectrogram = self.function( + Tensor(signal), self.n_fft, hop_length=self.hop_length, + win_length=self.n_fft, window=torch.hamming_window(self.n_fft), + center=False, normalized=False, onesided=True + ) + spectrogram = (spectrogram[:, :, 0].pow(2) + spectrogram[:, :, 1].pow(2)).pow(0.5) + spectrogram = np.log1p(spectrogram.numpy()) + return spectrogram diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/sampler.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/sampler.py new file mode 100644 index 000000000..89c64d9d4 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/sampler.py @@ -0,0 +1,95 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import numpy as np +from torch.utils.data import Sampler + +from .audio.load import load_audio + + +class RandomSampler(Sampler): + r""" + Implementation of a Random Sampler for sampling the dataset. + + Args: + data_source (torch.utils.data.Dataset): dataset to sample from + batch_size (int): size of batch + drop_last (bool): flat indication whether to drop last batch or not + """ + def __init__(self, data_source, batch_size: int = 32, drop_last: bool = False) -> None: + super(RandomSampler, self).__init__(data_source) + self.batch_size = batch_size + self.data_source = data_source + ids = list(range(0, len(data_source))) + self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)] + self.drop_last = drop_last + + def __iter__(self): + for ids in self.bins: + np.random.shuffle(ids) + yield ids + + def __len__(self): + return len(self.bins) + + def shuffle(self, epoch): + np.random.shuffle(self.bins) + + +class SmartBatchingSampler(Sampler): + """ + Batching with similar sequence length. + + Args: + data_source (torch.utils.data.Dataset): dataset to sample from + batch_size (int): size of batch + drop_last (bool): flat indication whether to drop last batch or not + """ + def __init__(self, data_source, batch_size: int = 32, drop_last: bool = False) -> None: + super(SmartBatchingSampler, self).__init__(data_source) + self.batch_size = batch_size + self.data_source = data_source + + audio_lengths = [self._get_audio_length(audio_path) for audio_path in data_source.audio_paths] + audio_indices = [idx for idx in range(len(data_source.audio_paths))] + + pack_by_length = list(zip(audio_lengths, audio_indices)) + sort_by_length = sorted(pack_by_length) + audio_lengths, audio_indices = zip(*sort_by_length) + + self.bins = [audio_indices[i:i + batch_size] for i in range(0, len(audio_indices), batch_size)] + self.drop_last = drop_last + + def __iter__(self): + for ids in self.bins: + np.random.shuffle(list(ids)) + yield ids + + def _get_audio_length(self, audio_path): + return len(load_audio(os.path.join(self.data_source.dataset_path, audio_path), sample_rate=16000)) + + def __len__(self): + return len(self.bins) + + def shuffle(self, epoch): + np.random.shuffle(self.bins) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/text/data_loader.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/text/data_loader.py new file mode 100644 index 000000000..151af593e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/text/data_loader.py @@ -0,0 +1,87 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from torch.utils.data import DataLoader + + +def _collate_fn(batch, pad_id: int = 0): + r""" + Functions that pad to the maximum sequence length + + Args: + batch (tuple): tuple contains input and target tensors + pad_id (int): identification of pad token + + Returns: + inputs (torch.FloatTensor): tensor contains input sequences. + """ + def seq_length_(p): + return len(p[0]) + + batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True) + + seq_lengths = [len(s[0]) for s in batch] + + max_seq_sample = max(batch, key=seq_length_)[0] + max_seq_size = max_seq_sample.size(0) + + batch_size = len(batch) + + inputs = torch.zeros(batch_size, max_seq_size).fill_(pad_id).long() + targets = torch.zeros(batch_size, max_seq_size).fill_(pad_id).long() + + for x in range(batch_size): + sample = batch[x] + input_var = sample[0] + target = sample[1] + inputs[x].narrow(0, 0, len(input_var)).copy_(torch.LongTensor(input_var)) + targets[x].narrow(0, 0, len(target)).copy_(torch.LongTensor(target)) + + seq_lengths = torch.IntTensor(seq_lengths) + + return inputs, seq_lengths, targets + + +class TextDataLoader(DataLoader): + r""" + Text Data Loader + + Args: + dataset (torch.utils.data.Dataset): dataset from which to load the data. + num_workers (int): how many subprocesses to use for data loading. + batch_sampler (torch.utils.data.sampler.Sampler): defines the strategy to draw samples from the dataset. + """ + def __init__( + self, + dataset: torch.utils.data.Dataset, + num_workers: int, + batch_sampler: torch.utils.data.sampler.Sampler, + **kwargs, + ) -> None: + super(TextDataLoader, self).__init__( + dataset=dataset, + num_workers=num_workers, + batch_sampler=batch_sampler, + **kwargs, + ) + self.collate_fn = _collate_fn diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/data/text/dataset.py b/audio/speech_recognition/conformer/pytorch/openspeech/data/text/dataset.py new file mode 100644 index 000000000..b7e7f879f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/data/text/dataset.py @@ -0,0 +1,77 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import logging + +from torch.utils.data import Dataset + +logger = logging.getLogger(__name__) + + +class TextDataset(Dataset): + """ + Dataset for language modeling. + + Args: + transcripts (list): list of transcript + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + """ + def __init__(self, transcripts: list, tokenizer): + super(TextDataset, self).__init__() + self.transcripts = transcripts + self.tokenizer = tokenizer + self.sos_id = tokenizer.sos_id + self.eos_id = tokenizer.eos_id + + def _get_inputs(self, transcript): + tokens = transcript.split(' ') + transcript = [int(self.sos_id)] + + for token in tokens: + transcript.append(int(token)) + + return transcript + + def _get_targets(self, transcript): + tokens = transcript.split(' ') + transcript = list() + + for token in tokens: + transcript.append(int(token)) + + transcript.append(int(self.eos_id)) + + return transcript + + def __getitem__(self, idx): + transcript = self.tokenizer(self.transcripts[idx]) + inputs = torch.LongTensor(self._get_inputs(transcript)) + targets = torch.LongTensor(self._get_targets(transcript)) + return inputs, targets + + def __len__(self): + return len(self.transcripts) + + def count(self): + return len(self.transcripts) + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/__init__.py new file mode 100644 index 000000000..4f160b274 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/__init__.py @@ -0,0 +1,87 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + AugmentConfigs, + LibriSpeechConfigs, + KsponSpeechConfigs, + AIShellConfigs, + LMConfigs, + CPUTrainerConfigs, + GPUTrainerConfigs, + TPUTrainerConfigs, + Fp16GPUTrainerConfigs, + Fp16TPUTrainerConfigs, + Fp64CPUTrainerConfigs, + EvaluationConfigs, + EnsembleEvaluationConfigs, + CPUResumeTrainerConfigs, + GPUResumeTrainerConfigs, + TPUResumeTrainerConfigs, +) + +OPENSPEECH_TRAIN_CONFIGS = [ + "audio", + "augment", + "dataset", + "model", + "criterion", + "lr_scheduler", + "trainer", + "tokenizer", +] + +OPENSPEECH_LM_TRAIN_CONFIGS = [ + "dataset", + "model", + "criterion", + "lr_scheduler", + "trainer", + "tokenizer", +] + +DATASET_DATACLASS_REGISTRY = { + "kspon": KsponSpeechConfigs, + "libri": LibriSpeechConfigs, + "aishell": AIShellConfigs, + "ksponspeech": KsponSpeechConfigs, + "librispeech": LibriSpeechConfigs, + "lm": LMConfigs, +} +TRAINER_DATACLASS_REGISTRY = { + "cpu": CPUTrainerConfigs, + "gpu": GPUTrainerConfigs, + "tpu": TPUTrainerConfigs, + "gpu-fp16": Fp16GPUTrainerConfigs, + "tpu-fp16": Fp16TPUTrainerConfigs, + "cpu-fp64": Fp64CPUTrainerConfigs, + "cpu-resume": CPUResumeTrainerConfigs, + "gpu-resume": GPUResumeTrainerConfigs, + "tpu-resume": TPUResumeTrainerConfigs, +} +AUGMENT_DATACLASS_REGISTRY = { + "default": AugmentConfigs, +} +EVAL_DATACLASS_REGISTRY = { + "default": EvaluationConfigs, + "ensemble": EnsembleEvaluationConfigs, +} \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/configurations.py new file mode 100644 index 000000000..571197f4e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/configurations.py @@ -0,0 +1,481 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, _MISSING_TYPE, field +from typing import List, Optional, Any + +MISSING = '' + + +@dataclass +class OpenspeechDataclass: + """ OpenSpeech base dataclass that supported fetching attributes and metas """ + + def _get_all_attributes(self) -> List[str]: + return [k for k in self.__dataclass_fields__.keys()] + + def _get_meta(self, attribute_name: str, meta: str, default: Optional[Any] = None) -> Any: + return self.__dataclass_fields__[attribute_name].metadata.get(meta, default) + + def _get_name(self, attribute_name: str) -> str: + return self.__dataclass_fields__[attribute_name].name + + def _get_default(self, attribute_name: str) -> Any: + if hasattr(self, attribute_name): + if str(getattr(self, attribute_name)).startswith("${"): + return str(getattr(self, attribute_name)) + elif str(self.__dataclass_fields__[attribute_name].default).startswith("${"): + return str(self.__dataclass_fields__[attribute_name].default) + elif getattr(self, attribute_name) != self.__dataclass_fields__[attribute_name].default: + return getattr(self, attribute_name) + + f = self.__dataclass_fields__[attribute_name] + if not isinstance(f.default_factory, _MISSING_TYPE): + return f.default_factory() + return f.default + + def _get_type(self, attribute_name: str) -> Any: + return self.__dataclass_fields__[attribute_name].type + + def _get_help(self, attribute_name: str) -> Any: + return self._get_meta(attribute_name, "help") + + +@dataclass +class LibriSpeechConfigs(OpenspeechDataclass): + """ Configuration dataclass that common used """ + dataset: str = field( + default="librispeech", metadata={"help": "Select dataset for training (librispeech, ksponspeech, aishell, lm)"} + ) + dataset_path: str = field( + default=MISSING, metadata={"help": "Path of dataset"} + ) + dataset_download: bool = field( + default=True, metadata={"help": "Flag indication whether to download dataset or not."} + ) + manifest_file_path: str = field( + default="../../../LibriSpeech/libri_subword_manifest.txt", metadata={"help": "Path of manifest file"} + ) + + +@dataclass +class KsponSpeechConfigs(OpenspeechDataclass): + """ Configuration dataclass that common used """ + dataset: str = field( + default="ksponspeech", metadata={"help": "Select dataset for training (librispeech, ksponspeech, aishell, lm)"} + ) + dataset_path: str = field( + default=MISSING, metadata={"help": "Path of dataset"} + ) + test_dataset_path: str = field( + default=MISSING, metadata={"help": "Path of evaluation dataset"} + ) + manifest_file_path: str = field( + default="../../../ksponspeech_manifest.txt", metadata={"help": "Path of manifest file"} + ) + test_manifest_dir: str = field( + default="../../../KsponSpeech_scripts", metadata={"help": "Path of directory contains test manifest files"} + ) + preprocess_mode: str = field( + default="phonetic", metadata={"help": "KsponSpeech preprocess mode {phonetic, spelling}"}, + ) + + +@dataclass +class AIShellConfigs(OpenspeechDataclass): + """ Configuration dataclass that common used """ + dataset: str = field( + default="aishell", metadata={"help": "Select dataset for training (librispeech, ksponspeech, aishell, lm)"} + ) + dataset_path: str = field( + default=MISSING, metadata={"help": "Path of dataset"} + ) + dataset_download: bool = field( + default=True, metadata={"help": "Flag indication whether to download dataset or not."} + ) + manifest_file_path: str = field( + default="../../../data_aishell/aishell_manifest.txt", metadata={"help": "Path of manifest file"} + ) + + +@dataclass +class LMConfigs(OpenspeechDataclass): + dataset: str = field( + default="lm", metadata={"help": "Select dataset for training (librispeech, ksponspeech, aishell, lm)"} + ) + dataset_path: str = field( + default=MISSING, metadata={"help": "Path of dataset"} + ) + valid_ratio: float = field( + default=0.05, metadata={"help": "Ratio of validation data"} + ) + test_ratio: float = field( + default=0.05, metadata={"help": "Ratio of test data"} + ) + + +@dataclass +class AugmentConfigs(OpenspeechDataclass): + apply_spec_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + apply_noise_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply noise augment or not " + "Noise augment requires `noise_dataset_path`. " + "`noise_dataset_dir` should be contain audio files."} + ) + apply_joining_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply joining augment or not " + "If true, create a new audio file by connecting two audio randomly"} + ) + apply_time_stretch_augment: bool = field( + default=False, metadata={"help": "Flag indication whether to apply spec augment or not"} + ) + freq_mask_para: int = field( + default=27, metadata={"help": "Hyper Parameter for freq masking to limit freq masking length"} + ) + freq_mask_num: int = field( + default=2, metadata={"help": "How many freq-masked area to make"} + ) + time_mask_num: int = field( + default=4, metadata={"help": "How many time-masked area to make"} + ) + noise_dataset_dir: str = field( + default='None', metadata={"help": "How many time-masked area to make"} + ) + noise_level: float = field( + default=0.7, metadata={"help": "Noise adjustment level"} + ) + time_stretch_min_rate: float = field( + default=0.7, metadata={"help": "Minimum rate of audio time stretch"} + ) + time_stretch_max_rate: float = field( + default=1.4, metadata={"help": "Maximum rate of audio time stretch"} + ) + + +@dataclass +class BaseTrainerConfigs(OpenspeechDataclass): + """ Base trainer dataclass """ + seed: int = field( + default=1, metadata={"help": "Seed for training."} + ) + accelerator: str = field( + default="dp", metadata={"help": "Previously known as distributed_backend (dp, ddp, ddp2, etc…)."} + ) + accumulate_grad_batches: int = field( + default=1, metadata={"help": "Accumulates grads every k batches or as set up in the dict."} + ) + num_workers: int = field( + default=4, metadata={"help": "The number of cpu cores"} + ) + batch_size: int = field( + default=32, metadata={"help": "Size of batch"} + ) + check_val_every_n_epoch: int = field( + default=1, metadata={"help": "Check val every n train epochs."} + ) + gradient_clip_val: float = field( + default=5.0, metadata={"help": "0 means don’t clip."} + ) + logger: str = field( + default="wandb", metadata={"help": "Training logger. {wandb, tensorboard}"} + ) + max_epochs: int = field( + default=20, metadata={"help": "Stop training once this number of epochs is reached."} + ) + save_checkpoint_n_steps: int = field( + default=10000, metadata={"help": "Save a checkpoint every N steps."} + ) + auto_scale_batch_size: str = field( + default="binsearch", metadata={"help": "If set to True, will initially run a batch size finder trying to find " + "the largest batch size that fits into memory."} + ) + sampler: str = field( + default="smart", metadata={"help": "smart: batching with similar sequence length." + "else: random batch"} + ) + + +@dataclass +class CPUResumeTrainerConfigs(BaseTrainerConfigs): + name: str = field( + default="cpu-resume", metadata={"help": "Trainer name"} + ) + checkpoint_path: str = field( + default=MISSING, metadata={"help": "Path of model checkpoint."} + ) + device: str = field( + default="cpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=False, metadata={"help": "If set True, will train with GPU"} + ) + + +@dataclass +class GPUResumeTrainerConfigs(BaseTrainerConfigs): + name: str = field( + default="gpu-resume", metadata={"help": "Trainer name"} + ) + checkpoint_path: str = field( + default=MISSING, metadata={"help": "Path of model checkpoint."} + ) + device: str = field( + default="gpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=True, metadata={"help": "If set True, will train with GPU"} + ) + auto_select_gpus: bool = field( + default=True, metadata={"help": "If enabled and gpus is an integer, pick available gpus automatically."} + ) + + +@dataclass +class TPUResumeTrainerConfigs(BaseTrainerConfigs): + name: str = field( + default="tpu-resume", metadata={"help": "Trainer name"} + ) + checkpoint_path: str = field( + default=MISSING, metadata={"help": "Path of model checkpoint."} + ) + device: str = field( + default="tpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=False, metadata={"help": "If set True, will train with GPU"} + ) + use_tpu: bool = field( + default=True, metadata={"help": "If set True, will train with GPU"} + ) + tpu_cores: int = field( + default=8, metadata={"help": "Number of TPU cores"} + ) + + +@dataclass +class CPUTrainerConfigs(BaseTrainerConfigs): + name: str = field( + default="cpu", metadata={"help": "Trainer name"} + ) + device: str = field( + default="cpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=False, metadata={"help": "If set True, will train with GPU"} + ) + + +@dataclass +class GPUTrainerConfigs(BaseTrainerConfigs): + """ GPU trainer dataclass """ + name: str = field( + default="gpu", metadata={"help": "Trainer name"} + ) + device: str = field( + default="gpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=True, metadata={"help": "If set True, will train with GPU"} + ) + auto_select_gpus: bool = field( + default=True, metadata={"help": "If enabled and gpus is an integer, pick available gpus automatically."} + ) + + +@dataclass +class TPUTrainerConfigs(BaseTrainerConfigs): + name: str = field( + default="tpu", metadata={"help": "Trainer name"} + ) + device: str = field( + default="tpu", metadata={"help": "Training device."} + ) + use_cuda: bool = field( + default=False, metadata={"help": "If set True, will train with GPU"} + ) + use_tpu: bool = field( + default=True, metadata={"help": "If set True, will train with GPU"} + ) + tpu_cores: int = field( + default=8, metadata={"help": "Number of TPU cores"} + ) + + +@dataclass +class Fp16GPUTrainerConfigs(GPUTrainerConfigs): + name: str = field( + default="gpu-fp16", metadata={"help": "Trainer name"} + ) + precision: int = field( + default=16, metadata={"help": "Double precision (64), full precision (32) or half precision (16). " + "Can be used on CPU, GPU or TPUs."} + ) + amp_backend: str = field( + default="apex", metadata={"help": "The mixed precision backend to use (“native” or “apex”)"} + ) + + +@dataclass +class Fp16TPUTrainerConfigs(TPUTrainerConfigs): + name: str = field( + default="tpu-fp16", metadata={"help": "Trainer name"} + ) + precision: int = field( + default=16, metadata={"help": "Double precision (64), full precision (32) or half precision (16). " + "Can be used on CPU, GPU or TPUs."} + ) + amp_backend: str = field( + default="apex", metadata={"help": "The mixed precision backend to use (“native” or “apex”)"} + ) + + +@dataclass +class Fp64CPUTrainerConfigs(CPUTrainerConfigs): + name: str = field( + default="cpu-fp64", metadata={"help": "Trainer name"} + ) + precision: int = field( + default=64, metadata={"help": "Double precision (64), full precision (32) or half precision (16). " + "Can be used on CPU, GPU or TPUs."} + ) + amp_backend: str = field( + default="apex", metadata={"help": "The mixed precision backend to use (“native” or “apex”)"} + ) + + +@dataclass +class LearningRateSchedulerConfigs(OpenspeechDataclass): + """ Super class of learning rate dataclass """ + lr: float = field( + default=1e-04, metadata={"help": "Learning rate"} + ) + + +@dataclass +class TokenizerConfigs(OpenspeechDataclass): + """ Super class of tokenizer dataclass """ + sos_token: str = field( + default="", metadata={"help": "Start of sentence token"} + ) + eos_token: str = field( + default="", metadata={"help": "End of sentence token"} + ) + pad_token: str = field( + default="", metadata={"help": "Pad token"} + ) + blank_token: str = field( + default="", metadata={"help": "Blank token (for CTC training)"} + ) + encoding: str = field( + default="utf-8", metadata={"help": "Encoding of vocab"} + ) + + +@dataclass +class EvaluationConfigs(OpenspeechDataclass): + model_name: str = field( + default=MISSING, metadata={"help": "Model name."} + ) + dataset_path: str = field( + default=MISSING, metadata={"help": "Path of dataset."} + ) + checkpoint_path: str = field( + default=MISSING, metadata={"help": "Path of model checkpoint."} + ) + manifest_file_path: str = field( + default=MISSING, metadata={"help": "Path of evaluation manifest file."} + ) + num_workers: int = field( + default=4, metadata={"help": "Number of worker."} + ) + batch_size: int = field( + default=32, metadata={"help": "Batch size."} + ) + beam_size: int = field( + default=1, metadata={"help": "Beam size of beam search."} + ) + + +@dataclass +class EnsembleEvaluationConfigs(OpenspeechDataclass): + model_names: str = field( + default=MISSING, metadata={"help": "List of model name."} + ) + dataset_paths: str = field( + default=MISSING, metadata={"help": "Path of dataset."} + ) + checkpoint_paths: str = field( + default=MISSING, metadata={"help": "List of model checkpoint path."} + ) + manifest_file_path: str = field( + default=MISSING, metadata={"help": "Path of evaluation manifest file."} + ) + ensemble_method: str = field( + default="vanilla", metadata={"help": "Method of ensemble (vanilla, weighted)"} + ) + ensemble_weights: str = field( + default="(1.0, 1.0, 1.0 ..)", metadata={"help": "Weights of ensemble models."} + ) + num_workers: int = field( + default=4, metadata={"help": "Number of worker."} + ) + batch_size: int = field( + default=32, metadata={"help": "Batch size."} + ) + beam_size: int = field( + default=1, metadata={"help": "Beam size of beam search."} + ) + + +def generate_openspeech_configs_with_help(): + from openspeech.dataclass import OPENSPEECH_TRAIN_CONFIGS, TRAINER_DATACLASS_REGISTRY, AUGMENT_DATACLASS_REGISTRY, \ + DATASET_DATACLASS_REGISTRY + from openspeech.models import MODEL_DATACLASS_REGISTRY + from openspeech.criterion import CRITERION_DATACLASS_REGISTRY + from openspeech.data import AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY + from openspeech.optim.scheduler import SCHEDULER_DATACLASS_REGISTRY + from openspeech.tokenizers import TOKENIZER_DATACLASS_REGISTRY + + registries = { + "audio": AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY, + "augment": AUGMENT_DATACLASS_REGISTRY, + "trainer": TRAINER_DATACLASS_REGISTRY, + "model": MODEL_DATACLASS_REGISTRY, + "criterion": CRITERION_DATACLASS_REGISTRY, + "dataset": DATASET_DATACLASS_REGISTRY, + "lr_scheduler": SCHEDULER_DATACLASS_REGISTRY, + "tokenizer": TOKENIZER_DATACLASS_REGISTRY, + } + + with open("configuration.md", "w") as f: + for group in OPENSPEECH_TRAIN_CONFIGS: + dataclass_registry = registries[group] + + f.write(f"## `{group}`\n") + + for k, v in dataclass_registry.items(): + f.write(f"### `{k}` \n") + v = v() + for kv in v.__dataclass_fields__: + f.write(f"- `{kv}` : {v._get_help(kv)}\n") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/initialize.py b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/initialize.py new file mode 100644 index 000000000..f3424ebc8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/dataclass/initialize.py @@ -0,0 +1,96 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from hydra.core.config_store import ConfigStore + + +def hydra_train_init() -> None: + r""" initialize ConfigStore for hydra-train """ + from openspeech.dataclass import OPENSPEECH_TRAIN_CONFIGS, DATASET_DATACLASS_REGISTRY, TRAINER_DATACLASS_REGISTRY + from openspeech.models import MODEL_DATACLASS_REGISTRY + from openspeech.criterion import CRITERION_DATACLASS_REGISTRY + from openspeech.data import AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY + from openspeech.optim.scheduler import SCHEDULER_DATACLASS_REGISTRY + from openspeech.tokenizers import TOKENIZER_DATACLASS_REGISTRY + from openspeech.dataclass import AUGMENT_DATACLASS_REGISTRY + + registries = { + "audio": AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY, + "augment": AUGMENT_DATACLASS_REGISTRY, + "dataset": DATASET_DATACLASS_REGISTRY, + "trainer": TRAINER_DATACLASS_REGISTRY, + "model": MODEL_DATACLASS_REGISTRY, + "criterion": CRITERION_DATACLASS_REGISTRY, + "lr_scheduler": SCHEDULER_DATACLASS_REGISTRY, + "tokenizer": TOKENIZER_DATACLASS_REGISTRY, + } + + cs = ConfigStore.instance() + + for group in OPENSPEECH_TRAIN_CONFIGS: + dataclass_registry = registries[group] + + for k, v in dataclass_registry.items(): + cs.store(group=group, name=k, node=v, provider="openspeech") + + +def hydra_lm_train_init() -> None: + from openspeech.dataclass import OPENSPEECH_LM_TRAIN_CONFIGS, DATASET_DATACLASS_REGISTRY, TRAINER_DATACLASS_REGISTRY + from openspeech.models import MODEL_DATACLASS_REGISTRY + from openspeech.criterion import CRITERION_DATACLASS_REGISTRY + from openspeech.optim.scheduler import SCHEDULER_DATACLASS_REGISTRY + from openspeech.tokenizers import TOKENIZER_DATACLASS_REGISTRY + + registries = { + "dataset": DATASET_DATACLASS_REGISTRY, + "trainer": TRAINER_DATACLASS_REGISTRY, + "model": MODEL_DATACLASS_REGISTRY, + "criterion": CRITERION_DATACLASS_REGISTRY, + "lr_scheduler": SCHEDULER_DATACLASS_REGISTRY, + "tokenizer": TOKENIZER_DATACLASS_REGISTRY, + } + + cs = ConfigStore.instance() + + for group in OPENSPEECH_LM_TRAIN_CONFIGS: + dataclass_registry = registries[group] + + for k, v in dataclass_registry.items(): + cs.store(group=group, name=k, node=v, provider="openspeech") + + +def hydra_eval_init() -> None: + from openspeech.data import AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY + from openspeech.dataclass import EVAL_DATACLASS_REGISTRY + + registries = { + "audio": AUDIO_FEATURE_TRANSFORM_DATACLASS_REGISTRY, + "eval": EVAL_DATACLASS_REGISTRY, + } + + cs = ConfigStore.instance() + + for group in registries.keys(): + dataclass_registry = registries[group] + + for k, v in dataclass_registry.items(): + cs.store(group=group, name=k, node=v, provider="openspeech") \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/__init__.py new file mode 100644 index 000000000..52d38c845 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/__init__.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +DATA_MODULE_REGISTRY = dict() + + +def register_data_module(name: str): + """ + New data module types can be added to OpenSpeech with the :func:`register_data_module` function decorator. + + For example:: + @register_data_module('ksponspeech') + class LightningKsponSpeechDataModule: + (...) + + .. note:: All vocabs must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the vocab + """ + + def register_data_module_cls(cls): + if name in DATA_MODULE_REGISTRY: + raise ValueError(f"Cannot register duplicate data module ({name})") + DATA_MODULE_REGISTRY[name] = cls + return cls + + return register_data_module_cls + + +data_module_dir = os.path.dirname(__file__) +for file in os.listdir(data_module_dir): + if os.path.isdir(os.path.join(data_module_dir, file)) and file != '__pycache__': + for subfile in os.listdir(os.path.join(data_module_dir, file)): + path = os.path.join(data_module_dir, file, subfile) + if subfile.endswith(".py"): + data_module_name = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile + module = importlib.import_module(f"openspeech.datasets.{file}.{data_module_name}") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/__init__.py new file mode 100644 index 000000000..9a083c67f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/lit_data_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/lit_data_module.py new file mode 100644 index 000000000..396cd964a --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/lit_data_module.py @@ -0,0 +1,169 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import logging +from typing import Tuple, Optional + +from openspeech.data.audio.dataset import SpeechToTextDataset +from openspeech.datasets import register_data_module +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_data_module('librispeech') +class LibriSpeechDataModule(object): + """ + Data Module for LibriSpeech Dataset. LibriSpeech is a corpus of approximately 1000 hours of read + English speech with sampling rate of 16 kHz, prepared by Vassil Panayotov with the assistance of Daniel Povey. + The data is derived from read audiobooks from the LibriVox project, and has been carefully segmented and aligned. + + Args: + configs (DictConfig): configuraion set + """ + #LIBRISPEECH_TRAIN_NUM = 281241 + #LIBRISPEECH_VALID_NUM = 5567 + #LIBRISPEECH_TEST_NUM = 5559 + LIBRISPEECH_PARTS = [ + 'dev-clean', + 'test-clean', + 'dev-other', + 'test-other', + 'train-clean-100', + 'train-clean-360', + 'train-other-500', + ] + + def __init__(self, configs) -> None: + super().__init__() + self.configs = configs + self.dataset = dict() + self.logger = logging.getLogger(__name__) + + def _parse_manifest_file(self, manifest_file_path: str) -> Tuple[list, list]: + """ Parsing manifest file """ + audio_paths = list() + transcripts = list() + + with open(manifest_file_path, encoding='utf-8') as f: + for idx, line in enumerate(f.readlines()): + audio_path, _, transcript = line.split('\t') + transcript = transcript.replace('\n', '') + + audio_paths.append(audio_path) + transcripts.append(transcript) + + return audio_paths, transcripts + + def prepare_data(self) -> Tokenizer: + """ + Prepare librispeech data + + Returns: + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + """ + if self.configs.tokenizer.unit == 'libri_subword': + from openspeech.datasets.librispeech.preprocess.subword import generate_manifest_files + elif self.configs.tokenizer.unit == 'libri_character': + from openspeech.datasets.librispeech.preprocess.character import generate_manifest_files + else: + raise ValueError(f"Unsupported vocabulary unit: {self.configs.tokenizer.unit}") + + if self.configs.dataset.dataset_download: + self._download_dataset() + + if not os.path.exists(self.configs.dataset.train_manifest_file): + self.logger.info("Manifest file is not exists !!\n" + "Generate manifest files..") + + if hasattr(self.configs.tokenizer, "vocab_size"): + generate_manifest_files( + dataset_path=self.configs.dataset.dataset_path, + manifest_file_path=self.configs.dataset.train_manifest_file, + vocab_path=self.configs.tokenizer.vocab_path, + vocab_size=self.configs.tokenizer.vocab_size, + librispeech_parts=self.configs.dataset.train_parts + ) + else: + generate_manifest_files( + dataset_path=self.configs.dataset.dataset_path, + manifest_file_path=self.configs.dataset.train_manifest_file, + vocab_path=self.configs.tokenizer.vocab_path, + librispeech_parts=self.configs.dataset.train_parts + ) + + if not os.path.exists(self.configs.dataset.eval_manifest_file): + self.logger.info("Manifest file is not exists !!\n" + "Generate manifest files..") + + if hasattr(self.configs.tokenizer, "vocab_size"): + generate_manifest_files( + dataset_path=self.configs.dataset.dataset_path, + manifest_file_path=self.configs.dataset.eval_manifest_file, + vocab_path=self.configs.tokenizer.vocab_path, + vocab_size=self.configs.tokenizer.vocab_size, + librispeech_parts=self.configs.dataset.eval_parts + ) + else: + generate_manifest_files( + dataset_path=self.configs.dataset.dataset_path, + manifest_file_path=self.configs.dataset.eval_manifest_file, + vocab_path=self.configs.tokenizer.vocab_path, + librispeech_parts=self.configs.dataset.eval_parts + ) + + def setup( + self, + stage: Optional[str] = None, + tokenizer: Tokenizer = None, + num_train_samples: int = None, + num_eval_samples: int = None + ) -> None: + train_audio_paths, train_transcripts = self._parse_manifest_file( + self.configs.dataset.train_manifest_file) + eval_audio_paths, eval_transcripts = self._parse_manifest_file( + self.configs.dataset.eval_manifest_file) + + if num_train_samples is None: + num_train_samples = len(train_audio_paths) + if num_eval_samples is None: + num_eval_samples = len(eval_audio_paths) + + audio_paths = { + "train": train_audio_paths[:num_train_samples], + "val": eval_audio_paths[:num_eval_samples] + } + transcripts = { + "train": train_transcripts[:num_train_samples], + "val": eval_transcripts[:num_eval_samples] + } + + for stage in audio_paths.keys(): + self.dataset[stage] = SpeechToTextDataset( + configs=self.configs, + dataset_path=self.configs.dataset.dataset_path, + audio_paths=audio_paths[stage], + transcripts=transcripts[stage], + sos_id=tokenizer.sos_id, + eos_id=tokenizer.eos_id, + apply_spec_augment=self.configs.audio.apply_spec_augment if stage == 'train' else False, + del_silence=self.configs.audio.del_silence if stage == 'train' else False, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/character.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/character.py new file mode 100644 index 000000000..3313188c6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/character.py @@ -0,0 +1,116 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import logging +import pandas as pd +import shutil +from typing import Tuple + +from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts + + +logger = logging.getLogger(__name__) + + +def _generate_character_labels(labels_dest): + logger.info('create_char_labels started..') + + tokens = ' ABCDEFGHIJKLMNOPQRSTUVWXYZ' + tokens = list(tokens) + + special_tokens = ['', '', '', ''] + tokens.extend(special_tokens) + + # sort together Using zip + label = { + 'id': [x for x in range(len(tokens))], + 'char': tokens, + } + + label_df = pd.DataFrame(label) + label_df.to_csv(labels_dest, encoding="utf-8", index=False) + + +def _load_label(filepath): + char2id = dict() + id2char = dict() + + ch_labels = pd.read_csv(filepath, encoding="utf-8") + + id_list = ch_labels["id"] + char_list = ch_labels["char"] + + for (id_, char) in zip(id_list, char_list): + char2id[char] = id_ + id2char[id_] = char + return char2id, id2char + + +def sentence_to_target(sentence, char2id): + target = str() + + for ch in sentence: + try: + target += (str(char2id[ch]) + ' ') + except KeyError: + continue + + return target[:-1] + + +def generate_manifest_files( + dataset_path: str, + manifest_file_path: str, + vocab_path: str, + librispeech_parts: list = [ + 'train-clean-100', + 'train-clean-360', + 'train-other-500', + 'dev-clean', + 'dev-other', + 'test-clean', + 'test-other'] +) -> None: + """ + Generate manifest files. + Format: {audio_path}\t{transcript}\t{numerical_label} + + Args: + vocab_size (int): size of subword vocab + + Returns: + None + """ + _generate_character_labels(vocab_path) + char2id, id2char = _load_label(vocab_path) + + transcripts_collection = collect_transcripts(dataset_path, librispeech_parts) + + with open(manifest_file_path, 'w') as f: + for idx, part in enumerate(librispeech_parts): + for transcript in transcripts_collection[idx]: + audio_path, transcript = transcript.split('|') + label = sentence_to_target(transcript, char2id) + f.write(f"{audio_path}\t{transcript}\t{label}\n") + + return diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/preprocess.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/preprocess.py new file mode 100644 index 000000000..df4d96383 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/preprocess.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os + + +def collect_transcripts( + dataset_path, + librispeech_parts: list = [ + 'train-clean-100', + 'train-clean-360', + 'train-other-500', + 'dev-clean', + 'dev-other', + 'test-clean', + 'test-other'] +): + """ Collect librispeech transcripts """ + transcripts_collection = list() + + for dataset in librispeech_parts: + dataset_transcripts = list() + + for subfolder1 in os.listdir(os.path.join(dataset_path, dataset)): + for subfolder2 in os.listdir(os.path.join(dataset_path, dataset, subfolder1)): + for file in os.listdir(os.path.join(dataset_path, dataset, subfolder1, subfolder2)): + if file.endswith('txt'): + with open(os.path.join(dataset_path, dataset, subfolder1, subfolder2, file)) as f: + for line in f.readlines(): + tokens = line.split() + audio_path = os.path.join(dataset, subfolder1, subfolder2, tokens[0]) + audio_path = f"{audio_path}.flac" + transcript = " ".join(tokens[1:]) + dataset_transcripts.append('%s|%s' % (audio_path, transcript)) + + else: + continue + + transcripts_collection.append(dataset_transcripts) + + return transcripts_collection diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/subword.py b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/subword.py new file mode 100644 index 000000000..27c99098d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/datasets/librispeech/preprocess/subword.py @@ -0,0 +1,92 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import sentencepiece as spm +import shutil +from typing import Tuple + +from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts + +SENTENCEPIECE_MODEL_NAME = "sp" + + +def _prepare_tokenizer(train_transcripts, vocab_size): + """ Prepare sentencepice tokenizer """ + input_file = 'spm_input.txt' + model_type = 'unigram' + + with open(input_file, 'w') as f: + for transcript in train_transcripts: + f.write(f"{transcript.split('|')[-1]}\n") + + spm.SentencePieceTrainer.Train(f"--input={input_file} " + f"--model_prefix={SENTENCEPIECE_MODEL_NAME} " + f"--vocab_size={vocab_size} " + f"--model_type={model_type} " + f"--pad_id=0 " + f"--bos_id=1 " + f"--eos_id=2 " + f"--unk_id=3 " + f"--user_defined_symbols=") + + +def generate_manifest_files( + dataset_path: str, + manifest_file_path: str, + vocab_path: str, + vocab_size: int, + librispeech_parts: list = [ + 'train-clean-100', + 'train-clean-360', + 'train-other-500', + 'dev-clean', + 'dev-other', + 'test-clean', + 'test-other'] +) -> None: + """ + Generate manifest files. + Format: {audio_path}\t{transcript}\t{numerical_label} + + Args: + vocab_size (int): size of subword vocab + + Returns: + None + """ + transcripts_collection = collect_transcripts(dataset_path, librispeech_parts) + _prepare_tokenizer(transcripts_collection[0], vocab_size) + + shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.model", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model")) + shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.vocab", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.vocab")) + + sp = spm.SentencePieceProcessor() + sp.Load(os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model")) + + with open(manifest_file_path, 'w', encoding='utf-8') as f: + for idx, part in enumerate(librispeech_parts): + for transcript in transcripts_collection[idx]: + audio_path, transcript = transcript.split('|') + text = " ".join(sp.EncodeAsPieces(transcript)) + label = " ".join([str(item) for item in sp.EncodeAsIds(transcript)]) + f.write(f"{audio_path}\t{text}\t{label}\n") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/__init__.py new file mode 100644 index 000000000..41bf8e369 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/__init__.py @@ -0,0 +1,27 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .openspeech_decoder import OpenspeechDecoder +from .lstm_attention_decoder import LSTMAttentionDecoder +from .rnn_transducer_decoder import RNNTransducerDecoder +from .transformer_decoder import TransformerDecoder +from .transformer_transducer_decoder import TransformerTransducerDecoder diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/lstm_attention_decoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/lstm_attention_decoder.py new file mode 100644 index 000000000..40c7a784f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/lstm_attention_decoder.py @@ -0,0 +1,251 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import random +import torch +import torch.nn as nn +from typing import Optional, Tuple, Any + +from openspeech.decoders import OpenspeechDecoder +from openspeech.modules import ( + Linear, + View, + LocationAwareAttention, + MultiHeadAttention, + AdditiveAttention, + DotProductAttention, +) + + +class LSTMAttentionDecoder(OpenspeechDecoder): + r""" + Converts higher level features (from encoders) into output utterances + by specifying a probability distribution over sequences of characters. + + Args: + num_classes (int): number of classification + hidden_state_dim (int): the number of features in the decoders hidden state `h` + num_layers (int, optional): number of recurrent layers (default: 2) + rnn_type (str, optional): type of RNN cell (default: lstm) + pad_id (int, optional): index of the pad symbol (default: 0) + sos_id (int, optional): index of the start of sentence symbol (default: 1) + eos_id (int, optional): index of the end of sentence symbol (default: 2) + attn_mechanism (str, optional): type of attention mechanism (default: multi-head) + num_heads (int, optional): number of attention heads. (default: 4) + dropout_p (float, optional): dropout probability of decoders (default: 0.2) + + Inputs: inputs, encoder_outputs, teacher_forcing_ratio + - **inputs** (batch, seq_len, input_size): list of sequences, whose length is the batch size and within which + each sequence is a list of token IDs. It is used for teacher forcing when provided. (default `None`) + - **encoder_outputs** (batch, seq_len, hidden_state_dim): tensor with containing the outputs of the encoders. + Used for attention mechanism (default is `None`). + - **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is + drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value, + teacher forcing would be used (default is 0). + + Returns: logits + * logits (torch.FloatTensor) : log probabilities of model's prediction + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + num_classes: int, + max_length: int = 150, + hidden_state_dim: int = 1024, + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + attn_mechanism: str = 'multi-head', + num_heads: int = 4, + num_layers: int = 2, + rnn_type: str = 'lstm', + dropout_p: float = 0.3, + ) -> None: + super(LSTMAttentionDecoder, self).__init__() + self.hidden_state_dim = hidden_state_dim + self.num_classes = num_classes + self.num_heads = num_heads + self.num_layers = num_layers + self.max_length = max_length + self.eos_id = eos_id + self.sos_id = sos_id + self.pad_id = pad_id + self.attn_mechanism = attn_mechanism.lower() + self.embedding = nn.Embedding(num_classes, hidden_state_dim) + self.input_dropout = nn.Dropout(dropout_p) + rnn_cell = self.supported_rnns[rnn_type.lower()] + self.rnn = rnn_cell( + input_size=hidden_state_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=False, + ) + + if self.attn_mechanism == 'loc': + self.attention = LocationAwareAttention(hidden_state_dim, attn_dim=hidden_state_dim, smoothing=False) + elif self.attn_mechanism == 'multi-head': + self.attention = MultiHeadAttention(hidden_state_dim, num_heads=num_heads) + elif self.attn_mechanism == 'additive': + self.attention = AdditiveAttention(hidden_state_dim) + elif self.attn_mechanism == 'dot': + self.attention = DotProductAttention(dim=hidden_state_dim) + elif self.attn_mechanism == 'scaled-dot': + self.attention = DotProductAttention(dim=hidden_state_dim, scale=True) + else: + raise ValueError("Unsupported attention: %s".format(attn_mechanism)) + + self.fc = nn.Sequential( + Linear(hidden_state_dim << 1, hidden_state_dim), + nn.Tanh(), + View(shape=(-1, self.hidden_state_dim), contiguous=True), + Linear(hidden_state_dim, num_classes), + ) + + def forward_step( + self, + input_var: torch.Tensor, + hidden_states: Optional[torch.Tensor], + encoder_outputs: torch.Tensor, + attn: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + batch_size, output_lengths = input_var.size(0), input_var.size(1) + + embedded = self.embedding(input_var) + embedded = self.input_dropout(embedded) + + if self.training: + self.rnn.flatten_parameters() + + outputs, hidden_states = self.rnn(embedded, hidden_states) + + if self.attn_mechanism == 'loc': + context, attn = self.attention(outputs, encoder_outputs, attn) + else: + context, attn = self.attention(outputs, encoder_outputs, encoder_outputs) + + outputs = torch.cat((outputs, context), dim=2) + + step_outputs = self.fc(outputs.view(-1, self.hidden_state_dim << 1)).log_softmax(dim=-1) + step_outputs = step_outputs.view(batch_size, output_lengths, -1).squeeze(1) + + return step_outputs, hidden_states, attn + + def forward( + self, + encoder_outputs: torch.Tensor, + targets: Optional[torch.Tensor] = None, + encoder_output_lengths: Optional[torch.Tensor] = None, + teacher_forcing_ratio: float = 1.0, + ) -> torch.Tensor: + """ + Forward propagate a `encoder_outputs` for training. + + Args: + targets (torch.LongTensr): A target sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + encoder_output_lengths: The length of encoders outputs. ``(batch)`` + teacher_forcing_ratio (float): ratio of teacher forcing + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + logits = list() + hidden_states, attn = None, None + + targets, batch_size, max_length = self.validate_args(targets, encoder_outputs, teacher_forcing_ratio) + use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False + + if use_teacher_forcing: + targets = targets[targets != self.eos_id].view(batch_size, -1) + + if self.attn_mechanism == 'loc' or self.attn_mechanism == 'additive': + for di in range(targets.size(1)): + input_var = targets[:, di].unsqueeze(1) + step_outputs, hidden_states, attn = self.forward_step( + input_var=input_var, + hidden_states=hidden_states, + encoder_outputs=encoder_outputs, + attn=attn, + ) + logits.append(step_outputs) + + else: + step_outputs, hidden_states, attn = self.forward_step( + input_var=targets, + hidden_states=hidden_states, + encoder_outputs=encoder_outputs, + attn=attn, + ) + + for di in range(step_outputs.size(1)): + step_output = step_outputs[:, di, :] + logits.append(step_output) + + else: + input_var = targets[:, 0].unsqueeze(1) + + for di in range(max_length): + step_outputs, hidden_states, attn = self.forward_step( + input_var=input_var, + hidden_states=hidden_states, + encoder_outputs=encoder_outputs, + attn=attn, + ) + logits.append(step_outputs) + input_var = logits[-1].topk(1)[1] + + logits = torch.stack(logits, dim=1) + + return logits + + def validate_args( + self, + targets: Optional[Any] = None, + encoder_outputs: torch.Tensor = None, + teacher_forcing_ratio: float = 1.0, + ) -> Tuple[torch.Tensor, int, int]: + assert encoder_outputs is not None + batch_size = encoder_outputs.size(0) + + if targets is None: # inference + targets = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1) + max_length = self.max_length + + if torch.cuda.is_available(): + targets = targets.cuda() + + if teacher_forcing_ratio > 0: + raise ValueError("Teacher forcing has to be disabled (set 0) when no targets is provided.") + + else: + max_length = targets.size(1) - 1 # minus the start of sequence symbol + + return targets, batch_size, max_length diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/openspeech_decoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/openspeech_decoder.py new file mode 100644 index 000000000..00a3f3d9d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/openspeech_decoder.py @@ -0,0 +1,42 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn + + +class OpenspeechDecoder(nn.Module): + r""" Interface of OpenSpeech decoder. """ + def __init__(self): + super(OpenspeechDecoder, self).__init__() + + def count_parameters(self) -> int: + r""" Count parameters of decoders """ + return sum([p.numel for p in self.parameters()]) + + def update_dropout(self, dropout_p: float) -> None: + r""" Update dropout probability of decoders """ + for name, child in self.named_children(): + if isinstance(child, nn.Dropout): + child.p = dropout_p + + def forward(self, *args, **kwargs): + raise NotImplementedError diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/rnn_transducer_decoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/rnn_transducer_decoder.py new file mode 100644 index 000000000..3d7ec21f5 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/rnn_transducer_decoder.py @@ -0,0 +1,128 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.decoders import OpenspeechDecoder +from openspeech.modules import Linear + + +class RNNTransducerDecoder(OpenspeechDecoder): + r""" + Decoder of RNN-Transducer + + Args: + num_classes (int): number of classification + hidden_state_dim (int, optional): hidden state dimension of decoders (default: 512) + output_dim (int, optional): output dimension of encoders and decoders (default: 512) + num_layers (int, optional): number of decoders layers (default: 1) + rnn_type (str, optional): type of rnn cell (default: lstm) + sos_id (int, optional): start of sentence identification + eos_id (int, optional): end of sentence identification + dropout_p (float, optional): dropout probability of decoders + + Inputs: inputs, input_lengths + inputs (torch.LongTensor): A target sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + hidden_states (torch.FloatTensor): A previous hidden state of decoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + + Returns: + (Tensor, Tensor): + + * decoder_outputs (torch.FloatTensor): A output sequence of decoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + * hidden_states (torch.FloatTensor): A hidden state of decoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + + Reference: + A Graves: Sequence Transduction with Recurrent Neural Networks + https://arxiv.org/abs/1211.3711.pdf + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + num_classes: int, + hidden_state_dim: int, + output_dim: int, + num_layers: int, + rnn_type: str = 'lstm', + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + dropout_p: float = 0.2, + ): + super(RNNTransducerDecoder, self).__init__() + self.hidden_state_dim = hidden_state_dim + self.pad_id = pad_id, + self.sos_id = sos_id + self.eos_id = eos_id + self.embedding = nn.Embedding(num_classes, hidden_state_dim) + rnn_cell = self.supported_rnns[rnn_type.lower()] + self.rnn = rnn_cell( + input_size=hidden_state_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=False, + ) + self.out_proj = Linear(hidden_state_dim, output_dim) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor = None, + hidden_states: torch.Tensor = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward propage a `inputs` (targets) for training. + + Inputs: + inputs (torch.LongTensor): A input sequence passed to label encoder. Typically inputs will be a padded `LongTensor` of size ``(batch, target_length)`` + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + hidden_states (torch.FloatTensor): Previous hidden states. + + Returns: + (Tensor, Tensor): + + * outputs (torch.FloatTensor): A output sequence of decoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + * hidden_states (torch.FloatTensor): A hidden state of decoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + """ + embedded = self.embedding(inputs) + + if hidden_states is not None: + outputs, hidden_states = self.rnn(embedded, hidden_states) + else: + outputs, hidden_states = self.rnn(embedded) + + outputs = self.out_proj(outputs) + return outputs, hidden_states diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_decoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_decoder.py new file mode 100644 index 000000000..77bc3fc67 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_decoder.py @@ -0,0 +1,275 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import random +import torch +import torch.nn as nn +from torch import Tensor +from typing import Optional, Tuple + +from openspeech.decoders import OpenspeechDecoder +from openspeech.modules import ( + TransformerEmbedding, + PositionalEncoding, + Linear, + get_attn_pad_mask, + get_attn_subsequent_mask, + MultiHeadAttention, + PositionwiseFeedForward, +) + + +class TransformerDecoderLayer(nn.Module): + r""" + DecoderLayer is made up of self-attention, multi-head attention and feedforward network. + This standard decoders layer is based on the paper "Attention Is All You Need". + + Args: + d_model: dimension of model (default: 512) + num_heads: number of attention heads (default: 8) + d_ff: dimension of feed forward network (default: 2048) + dropout_p: probability of dropout (default: 0.3) + + Inputs: + inputs (torch.FloatTensor): input sequence of transformer decoder layer + encoder_outputs (torch.FloatTensor): outputs of encoder + self_attn_mask (torch.BoolTensor): mask of self attention + encoder_output_mask (torch.BoolTensor): mask of encoder outputs + + Returns: + (Tensor, Tensor, Tensor) + + * outputs (torch.FloatTensor): output of transformer decoder layer + * self_attn (torch.FloatTensor): output of self attention + * encoder_attn (torch.FloatTensor): output of encoder attention + + Reference: + Ashish Vaswani et al.: Attention Is All You Need + https://arxiv.org/abs/1706.03762 + """ + def __init__( + self, + d_model: int = 512, + num_heads: int = 8, + d_ff: int = 2048, + dropout_p: float = 0.3, + ) -> None: + super(TransformerDecoderLayer, self).__init__() + self.self_attention_prenorm = nn.LayerNorm(d_model) + self.decoder_attention_prenorm = nn.LayerNorm(d_model) + self.feed_forward_prenorm = nn.LayerNorm(d_model) + self.self_attention = MultiHeadAttention(d_model, num_heads) + self.decoder_attention = MultiHeadAttention(d_model, num_heads) + self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout_p) + + def forward( + self, + inputs: Tensor, + encoder_outputs: Tensor, + self_attn_mask: Optional[Tensor] = None, + encoder_attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Tensor, Tensor]: + r""" + Forward propagate transformer decoder layer. + + Inputs: + inputs (torch.FloatTensor): input sequence of transformer decoder layer + encoder_outputs (torch.FloatTensor): outputs of encoder + self_attn_mask (torch.BoolTensor): mask of self attention + encoder_output_mask (torch.BoolTensor): mask of encoder outputs + + Returns: + outputs (torch.FloatTensor): output of transformer decoder layer + self_attn (torch.FloatTensor): output of self attention + encoder_attn (torch.FloatTensor): output of encoder attention + """ + residual = inputs + inputs = self.self_attention_prenorm(inputs) + outputs, self_attn = self.self_attention(inputs, inputs, inputs, self_attn_mask) + outputs += residual + + residual = outputs + outputs = self.decoder_attention_prenorm(outputs) + outputs, encoder_attn = self.decoder_attention(outputs, encoder_outputs, encoder_outputs, encoder_attn_mask) + outputs += residual + + residual = outputs + outputs = self.feed_forward_prenorm(outputs) + outputs = self.feed_forward(outputs) + outputs += residual + + return outputs, self_attn, encoder_attn + + +class TransformerDecoder(OpenspeechDecoder): + r""" + The TransformerDecoder is composed of a stack of N identical layers. + Each layer has three sub-layers. The first is a multi-head self-attention mechanism, + and the second is a multi-head attention mechanism, third is a feed-forward network. + + Args: + num_classes: umber of classes + d_model: dimension of model + d_ff: dimension of feed forward network + num_layers: number of layers + num_heads: number of attention heads + dropout_p: probability of dropout + pad_id (int, optional): index of the pad symbol (default: 0) + sos_id (int, optional): index of the start of sentence symbol (default: 1) + eos_id (int, optional): index of the end of sentence symbol (default: 2) + max_length (int): max decoding length + """ + + def __init__( + self, + num_classes: int, + d_model: int = 512, + d_ff: int = 512, + num_layers: int = 6, + num_heads: int = 8, + dropout_p: float = 0.3, + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + max_length: int = 128, + ) -> None: + super(TransformerDecoder, self).__init__() + self.d_model = d_model + self.num_layers = num_layers + self.num_heads = num_heads + self.max_length = max_length + self.pad_id = pad_id + self.sos_id = sos_id + self.eos_id = eos_id + + self.embedding = TransformerEmbedding(num_classes, pad_id, d_model) + self.positional_encoding = PositionalEncoding(d_model) + self.input_dropout = nn.Dropout(p=dropout_p) + self.layers = nn.ModuleList([ + TransformerDecoderLayer( + d_model=d_model, + num_heads=num_heads, + d_ff=d_ff, + dropout_p=dropout_p, + ) for _ in range(num_layers) + ]) + self.fc = nn.Sequential( + nn.LayerNorm(d_model), + Linear(d_model, d_model, bias=False), + nn.Tanh(), + Linear(d_model, num_classes, bias=False), + ) + + def forward_step( + self, + decoder_inputs: torch.Tensor, + decoder_input_lengths: torch.Tensor, + encoder_outputs: torch.Tensor, + encoder_output_lengths: torch.Tensor, + positional_encoding_length: int, + ) -> torch.Tensor: + dec_self_attn_pad_mask = get_attn_pad_mask( + decoder_inputs, decoder_input_lengths, decoder_inputs.size(1) + ) + dec_self_attn_subsequent_mask = get_attn_subsequent_mask(decoder_inputs) + self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0) + + encoder_attn_mask = get_attn_pad_mask(encoder_outputs, encoder_output_lengths, decoder_inputs.size(1)) + + outputs = self.embedding(decoder_inputs) + self.positional_encoding(positional_encoding_length) + outputs = self.input_dropout(outputs) + + for layer in self.layers: + outputs, self_attn, memory_attn = layer( + inputs=outputs, + encoder_outputs=encoder_outputs, + self_attn_mask=self_attn_mask, + encoder_attn_mask=encoder_attn_mask, + ) + + return outputs + + def forward( + self, + encoder_outputs: torch.Tensor, + targets: Optional[torch.LongTensor] = None, + encoder_output_lengths: torch.Tensor = None, + target_lengths: torch.Tensor = None, + teacher_forcing_ratio: float = 1.0, + ) -> torch.Tensor: + r""" + Forward propagate a `encoder_outputs` for training. + + Args: + targets (torch.LongTensor): A target sequence passed to decoders. `IntTensor` of size + ``(batch, seq_length)`` + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + encoder_output_lengths (torch.LongTensor): The length of encoders outputs. ``(batch)`` + teacher_forcing_ratio (float): ratio of teacher forcing + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + logits = list() + batch_size = encoder_outputs.size(0) + use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False + + if targets is not None and use_teacher_forcing: + targets = targets[targets != self.eos_id].view(batch_size, -1) + target_length = targets.size(1) + + step_outputs = self.forward_step( + decoder_inputs=targets, + decoder_input_lengths=target_lengths, + encoder_outputs=encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + positional_encoding_length=target_length, + ) + step_outputs = self.fc(step_outputs).log_softmax(dim=-1) + + for di in range(step_outputs.size(1)): + step_output = step_outputs[:, di, :] + logits.append(step_output) + + # Inference + else: + input_var = encoder_outputs.new_zeros(batch_size, self.max_length).long() + input_var = input_var.fill_(self.pad_id) + input_var[:, 0] = self.sos_id + + for di in range(1, self.max_length): + input_lengths = torch.IntTensor(batch_size).fill_(di) + + outputs = self.forward_step( + decoder_inputs=input_var[:, :di], + decoder_input_lengths=input_lengths, + encoder_outputs=encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + positional_encoding_length=di, + ) + step_output = self.fc(outputs).log_softmax(dim=-1) + + logits.append(step_output[:, -1, :]) + input_var[:, di] = logits[-1].topk(1)[1].squeeze() + + return torch.stack(logits, dim=1) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_transducer_decoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_transducer_decoder.py new file mode 100644 index 000000000..ee37cea45 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/decoders/transformer_transducer_decoder.py @@ -0,0 +1,156 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import numpy as np +from typing import Tuple + +from openspeech.decoders import OpenspeechDecoder +from openspeech.encoders.transformer_transducer_encoder import TransformerTransducerEncoderLayer +from openspeech.modules import ( + PositionalEncoding, + get_attn_pad_mask, + get_attn_subsequent_mask, +) + + +class TransformerTransducerDecoder(OpenspeechDecoder): + r""" + Converts the label to higher feature values + + Args: + num_classes (int): the number of vocabulary + model_dim (int): the number of features in the label encoder (default : 512) + d_ff (int): the number of features in the feed forward layers (default : 2048) + num_layers (int): the number of label encoder layers (default: 2) + num_heads (int): the number of heads in the multi-head attention (default: 8) + dropout (float): dropout probability of label encoder (default: 0.1) + max_positional_length (int): Maximum length to use for positional encoding (default : 5000) + pad_id (int): index of padding (default: 0) + sos_id (int): index of the start of sentence (default: 1) + eos_id (int): index of the end of sentence (default: 2) + + Inputs: inputs, inputs_lens + - **inputs**: Ground truth of batch size number + - **inputs_lens**: Tensor of target lengths + + Returns: + (torch.FloatTensor, torch.FloatTensor) + + * outputs (torch.FloatTensor): ``(batch, seq_length, dimension)`` + * input_lengths (torch.FloatTensor): ``(batch)`` + + Reference: + Qian Zhang et al.: Transformer Transducer: A Streamable Speech Recognition Model with Transformer Encoders and RNN-T Loss + https://arxiv.org/abs/2002.02562 + """ + def __init__( + self, + num_classes: int, + model_dim: int = 512, + d_ff: int = 2048, + num_layers: int = 2, + num_heads: int = 8, + dropout: float = 0.1, + max_positional_length: int = 5000, + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + ) -> None: + super(TransformerTransducerDecoder, self).__init__() + self.embedding = nn.Embedding(num_classes, model_dim) + self.scale = np.sqrt(model_dim) + self.positional_encoding = PositionalEncoding(model_dim, max_positional_length) + self.input_dropout = nn.Dropout(p=dropout) + self.pad_id = pad_id + self.sos_id = sos_id + self.eos_id = eos_id + self.decoder_layers = nn.ModuleList([ + TransformerTransducerEncoderLayer( + model_dim, + d_ff, + num_heads, + dropout + ) for _ in range(num_layers) + ]) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for label encoder. + + Args: + inputs (torch.LongTensor): A input sequence passed to label encoder. Typically inputs will be a padded + `LongTensor` of size ``(batch, target_length)`` + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + * outputs (Tensor): ``(batch, seq_length, dimension)`` + * output_lengths (Tensor): ``(batch)`` + """ + batch = inputs.size(0) + + if len(inputs.size()) == 1: # validate, evaluation + inputs = inputs.unsqueeze(1) + target_lengths = inputs.size(1) + + outputs = self.forward_step( + decoder_inputs=inputs, + decoder_input_lengths=input_lengths, + positional_encoding_length=target_lengths, + ) + + else: # train + target_lengths = inputs.size(1) + + outputs = self.forward_step( + decoder_inputs=inputs, + decoder_input_lengths=input_lengths, + positional_encoding_length=target_lengths, + ) + + return outputs, input_lengths + + def forward_step( + self, + decoder_inputs: torch.Tensor, + decoder_input_lengths: torch.Tensor, + positional_encoding_length: int = 1, + ) -> torch.Tensor: + dec_self_attn_pad_mask = get_attn_pad_mask(decoder_inputs, decoder_input_lengths, decoder_inputs.size(1)) + dec_self_attn_subsequent_mask = get_attn_subsequent_mask(decoder_inputs) + self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0) + + embedding_output = self.embedding(decoder_inputs) * self.scale + positional_encoding_output = self.positional_encoding(positional_encoding_length) + inputs = embedding_output + positional_encoding_output + + outputs = self.input_dropout(inputs) + + for decoder_layer in self.decoder_layers: + outputs, _ = decoder_layer(outputs, self_attn_mask) + + return outputs diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/__init__.py new file mode 100644 index 000000000..4d942d7e0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/__init__.py @@ -0,0 +1,32 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .openspeech_encoder import OpenspeechEncoder +from .conformer_encoder import ConformerEncoder +from .contextnet_encoder import ContextNetEncoder +from .convolutional_lstm_encoder import ConvolutionalLSTMEncoder +from .convolutional_transformer_encoder import ConvolutionalTransformerEncoder +from .lstm_encoder import LSTMEncoder +from .rnn_transducer_encoder import RNNTransducerEncoder +from .transformer_encoder import TransformerEncoder +from .transformer_transducer_encoder import TransformerTransducerEncoder +from .jasper import Jasper diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/conformer_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/conformer_encoder.py new file mode 100644 index 000000000..25aabfdd0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/conformer_encoder.py @@ -0,0 +1,141 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.encoders.openspeech_encoder import OpenspeechEncoder +from openspeech.modules import Conv2dSubsampling, Linear, ConformerBlock, Transpose + + +class ConformerEncoder(OpenspeechEncoder): + r""" + Transformer models are good at capturing content-based global interactions, while CNNs exploit local features + effectively. Conformer achieves the best of both worlds by studying how to combine convolution neural + networks and transformers to model both local and global dependencies of an audio sequence + in a parameter-efficient way. + + Args: + num_classes (int): Number of classification + input_dim (int, optional): Dimension of input vector + encoder_dim (int, optional): Dimension of conformer encoders + num_layers (int, optional): Number of conformer blocks + num_attention_heads (int, optional): Number of attention heads + feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module + conv_expansion_factor (int, optional): Expansion factor of conformer convolution module + feed_forward_dropout_p (float, optional): Probability of feed forward module dropout + attention_dropout_p (float, optional): Probability of attention module dropout + conv_dropout_p (float, optional): Probability of conformer convolution module dropout + conv_kernel_size (int or tuple, optional): Size of the convolving kernel + half_step_residual (bool): Flag indication whether to use half step residual or not + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not + + Inputs: inputs, input_lengths + - **inputs** (batch, time, dim): Tensor containing input vector + - **input_lengths** (batch): list of sequence input lengths + + Returns: outputs, output_lengths + - **outputs** (batch, out_channels, time): Tensor produces by conformer encoders. + - **output_lengths** (batch): list of sequence output lengths + + Reference: + Anmol Gulati et al: Conformer: Convolution-augmented Transformer for Speech Recognition + https://arxiv.org/abs/2005.08100 + """ + def __init__( + self, + num_classes: int, + input_dim: int = 80, + encoder_dim: int = 512, + num_layers: int = 17, + num_attention_heads: int = 8, + feed_forward_expansion_factor: int = 4, + conv_expansion_factor: int = 2, + input_dropout_p: float = 0.1, + feed_forward_dropout_p: float = 0.1, + attention_dropout_p: float = 0.1, + conv_dropout_p: float = 0.1, + conv_kernel_size: int = 31, + half_step_residual: bool = True, + joint_ctc_attention: bool = True, + ) -> None: + super(ConformerEncoder, self).__init__() + self.joint_ctc_attention = joint_ctc_attention + self.conv_subsample = Conv2dSubsampling(input_dim, in_channels=1, out_channels=encoder_dim) + self.input_projection = nn.Sequential( + Linear(self.conv_subsample.get_output_dim(), encoder_dim), + nn.Dropout(p=input_dropout_p), + ) + self.layers = nn.ModuleList([ + ConformerBlock( + encoder_dim=encoder_dim, + num_attention_heads=num_attention_heads, + feed_forward_expansion_factor=feed_forward_expansion_factor, + conv_expansion_factor=conv_expansion_factor, + feed_forward_dropout_p=feed_forward_dropout_p, + attention_dropout_p=attention_dropout_p, + conv_dropout_p=conv_dropout_p, + conv_kernel_size=conv_kernel_size, + half_step_residual=half_step_residual, + ) for _ in range(num_layers) + ]) + if self.joint_ctc_attention: + self.fc = nn.Sequential( + Transpose(shape=(1, 2)), + nn.Dropout(feed_forward_dropout_p), + Linear(encoder_dim, num_classes, bias=False), + ) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor, Tensor) + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * output_lengths: The length of encoders outputs. ``(batch)`` + """ + encoder_logits = None + + outputs, output_lengths = self.conv_subsample(inputs, input_lengths) + outputs = self.input_projection(outputs) + + for layer in self.layers: + outputs = layer(outputs) + + if self.joint_ctc_attention: + encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=2) + + return outputs, encoder_logits, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/contextnet_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/contextnet_encoder.py new file mode 100644 index 000000000..76de32883 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/contextnet_encoder.py @@ -0,0 +1,123 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from torch import Tensor +from typing import Tuple + +from openspeech.encoders.openspeech_encoder import OpenspeechEncoder +from openspeech.modules.contextnet_block import ContextNetBlock +from openspeech.modules import Conv2dSubsampling, Linear, ConformerBlock, Transpose + + +class ContextNetEncoder(OpenspeechEncoder): + r""" + ContextNetEncoder goes through 23 convolution blocks to convert to higher feature values. + + Args: + num_classes (int): Number of classification + model_size (str, optional): Size of the model['small', 'medium', 'large'] (default : 'medium') + input_dim (int, optional): Dimension of input vector (default : 80) + num_layers (int, optional): The number of convolutional layers (default : 5) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + num_channels (int, optional): The number of channels in the convolution filter (default: 256) + output_dim (int, optional): Dimension of encoder output vector (default: 640) + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not + + Inputs: inputs, input_lengths + - **inputs**: Parsed audio of batch size number `FloatTensor` of size ``(batch, seq_length, dimension)`` + - **input_lengths**: Tensor representing the sequence length of the input ``(batch)`` + + Returns: output, output_lengths + - **output**: Tensor of encoder output `FloatTensor` of size + ``(batch, seq_length, dimension)`` + - **encoder_logits**: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + - **output_lengths**: Tensor representing the length of the encoder output ``(batch)`` + """ + supported_models = { + 'small': 0.5, + 'medium': 1, + 'large': 2, + } + + def __init__( + self, + num_classes: int, + model_size: str = 'medium', + input_dim: int = 80, + num_layers: int = 5, + kernel_size: int = 5, + num_channels: int = 256, + output_dim: int = 640, + joint_ctc_attention: bool = False, + ) -> None: + super(ContextNetEncoder, self).__init__() + assert model_size in ('small', 'medium', 'large'), f'{model_size} is not supported.' + + alpha = self.supported_models[model_size] + + num_channels = int(num_channels * alpha) + output_dim = int(output_dim * alpha) + + self.joint_ctc_attention = joint_ctc_attention + self.blocks = ContextNetBlock.make_conv_blocks(input_dim, num_layers, kernel_size, num_channels, output_dim) + if self.joint_ctc_attention: + self.fc = nn.Linear(output_dim, num_classes, bias=False) + + def forward( + self, + inputs: Tensor, + input_lengths: Tensor, + ) -> Tuple[Tensor, Tensor, Tensor]: + r""" + Forward propagate a `inputs` for audio encoder. + + Args: + **inputs** (torch.FloatTensor): Parsed audio of batch size number `FloatTensor` of size + ``(batch, seq_length, dimension)`` + **input_lengths** (torch.LongTensor): Tensor representing the sequence length of the input + `LongTensor` of size ``(batch)`` + + Returns: + **output** (torch.FloatTensor): Tensor of encoder output `FloatTensor` of size + ``(batch, seq_length, dimension)`` + **encoder_logits** (torch.FloatTensor): Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + **output_lengths** (torch.LongTensor): Tensor representing the length of the encoder output + `LongTensor` of size ``(batch)`` + """ + encoder_logits = None + + output = inputs.transpose(1, 2) + output_lengths = input_lengths + + for block in self.blocks: + output, output_lengths = block(output, output_lengths) + + output = output.transpose(1, 2) + + if self.joint_ctc_attention: + encoder_logits = self.fc(output).log_softmax(dim=2) + + return output, encoder_logits, output_lengths \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_lstm_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_lstm_encoder.py new file mode 100644 index 000000000..74f212911 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_lstm_encoder.py @@ -0,0 +1,134 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple, Optional + +from openspeech.encoders import OpenspeechEncoder +from openspeech.modules import Transpose, Linear + + +class ConvolutionalLSTMEncoder(OpenspeechEncoder): + r""" + Converts low level speech signals into higher level features with convolutional extractor. + + Args: + input_dim (int): dimension of input vector + num_classes (int): number of classification + hidden_state_dim (int): the number of features in the encoders hidden state `h` + num_layers (int, optional): number of recurrent layers (default: 3) + bidirectional (bool, optional): if True, becomes a bidirectional encoders (default: False) + extractor (str): type of CNN extractor (default: vgg) + conv_activation (str): activation function of convolutional extractor (default: hardtanh) + rnn_type (str, optional): type of RNN cell (default: lstm) + dropout_p (float, optional): dropout probability of encoders (default: 0.2) + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not + + Inputs: inputs, input_lengths + - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens + - **input_lengths**: list of sequence lengths + + Returns: encoder_outputs, encoder_log__probs, output_lengths + - **encoder_outputs**: tensor containing the encoded features of the input sequence + - **encoder_log__probs**: tensor containing log probability for encoder_only loss + - **output_lengths**: list of sequence lengths produced by Listener + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + input_dim: int, + num_classes: int = None, + hidden_state_dim: int = 512, + dropout_p: float = 0.3, + num_layers: int = 3, + bidirectional: bool = True, + rnn_type: str = 'lstm', + extractor: str = 'vgg', + conv_activation: str = 'hardtanh', + joint_ctc_attention: bool = False, + ) -> None: + super(ConvolutionalLSTMEncoder, self).__init__() + extractor = self.supported_extractors[extractor.lower()] + self.conv = extractor(input_dim=input_dim, activation=conv_activation) + self.conv_output_dim = self.conv.get_output_dim() + + self.num_classes = num_classes + self.joint_ctc_attention = joint_ctc_attention + + self.hidden_state_dim = hidden_state_dim + self.rnn = self.supported_rnns[rnn_type.lower()]( + input_size=self.conv_output_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=bidirectional, + ) + + if self.joint_ctc_attention: + self.fc = nn.Sequential( + Transpose(shape=(1, 2)), + nn.Dropout(dropout_p), + Linear(hidden_state_dim << 1, num_classes, bias=False), + ) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * encoder_output_lengths: The length of encoders outputs. ``(batch)`` + """ + encoder_logits = None + + conv_outputs, output_lengths = self.conv(inputs, input_lengths) + + conv_outputs = nn.utils.rnn.pack_padded_sequence(conv_outputs.transpose(0, 1), output_lengths.cpu()) + outputs, hidden_states = self.rnn(conv_outputs) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) + outputs = outputs.transpose(0, 1) + + if self.joint_ctc_attention: + encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=2) + + return outputs, encoder_logits, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_transformer_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_transformer_encoder.py new file mode 100644 index 000000000..3993b5edf --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/convolutional_transformer_encoder.py @@ -0,0 +1,147 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.encoders import OpenspeechEncoder +from openspeech.encoders.transformer_encoder import TransformerEncoderLayer +from openspeech.modules import ( + Linear, + PositionalEncoding, + get_attn_pad_mask, Transpose, +) + + +class ConvolutionalTransformerEncoder(OpenspeechEncoder): + r""" + The TransformerEncoder is composed of a stack of N identical layers. + Each layer has two sub-layers. The first is a multi-head self-attention mechanism, + and the second is a simple, position-wise fully connected feed-forward network. + + Args: + input_dim: dimension of feature vector + extractor (str): convolutional extractor + d_model: dimension of model (default: 512) + d_ff: dimension of feed forward network (default: 2048) + num_layers: number of encoders layers (default: 6) + num_heads: number of attention heads (default: 8) + dropout_p (float, optional): probability of dropout (default: 0.3) + conv_activation (str, optional): activation function of convolutional extractor (default: hardtanh) + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not (default: False) + + Inputs: + - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens + - **input_lengths**: list of sequence lengths + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * output_lengths: The length of encoders outputs. ``(batch)`` + """ + + def __init__( + self, + num_classes: int, + input_dim: int, + extractor: str = 'vgg', + d_model: int = 512, + d_ff: int = 2048, + num_layers: int = 6, + num_heads: int = 8, + dropout_p: float = 0.3, + conv_activation: str = "relu", + joint_ctc_attention: bool = False, + ) -> None: + super(ConvolutionalTransformerEncoder, self).__init__() + extractor = self.supported_extractors[extractor.lower()] + self.conv = extractor(input_dim=input_dim, activation=conv_activation) + self.conv_output_dim = self.conv.get_output_dim() + + self.num_classes = num_classes + self.joint_ctc_attention = joint_ctc_attention + + self.d_model = d_model + self.num_layers = num_layers + self.num_heads = num_heads + self.input_proj = Linear(self.conv_output_dim, d_model) + self.input_norm = nn.LayerNorm(d_model) + self.input_dropout = nn.Dropout(p=dropout_p) + self.positional_encoding = PositionalEncoding(d_model) + self.layers = nn.ModuleList([ + TransformerEncoderLayer( + d_model=d_model, + num_heads=num_heads, + d_ff=d_ff, + dropout_p=dropout_p, + ) for _ in range(num_layers) + ]) + + if self.joint_ctc_attention: + self.fc = nn.Sequential( + Transpose(shape=(1, 2)), + nn.Dropout(dropout_p), + Linear(d_model, num_classes, bias=False), + ) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * output_lengths: The length of encoders outputs. ``(batch)`` + """ + encoder_logits = None + + conv_outputs, output_lengths = self.conv(inputs, input_lengths) + + self_attn_mask = get_attn_pad_mask(conv_outputs, output_lengths, conv_outputs.size(1)) + + outputs = self.input_norm(self.input_proj(conv_outputs)) + outputs += self.positional_encoding(outputs.size(1)) + outputs = self.input_dropout(outputs) + + for layer in self.layers: + outputs, attn = layer(outputs, self_attn_mask) + + if self.joint_ctc_attention: + encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=-1) + + return outputs, encoder_logits, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/deepspeech2.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/deepspeech2.py new file mode 100644 index 000000000..358c4c0d9 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/deepspeech2.py @@ -0,0 +1,123 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Tuple + +from openspeech.modules import DeepSpeech2Extractor, BNReluRNN, Linear + + +class DeepSpeech2(nn.Module): + r""" + DeepSpeech2 is a set of speech recognition models based on Baidu DeepSpeech2. DeepSpeech2 is trained with CTC loss. + + Args: + input_dim (int): dimension of input vector + num_classes (int): number of classfication + rnn_type (str, optional): type of RNN cell (default: gru) + num_rnn_layers (int, optional): number of recurrent layers (default: 5) + rnn_hidden_dim (int): the number of features in the hidden state `h` + dropout_p (float, optional): dropout probability (default: 0.1) + bidirectional (bool, optional): if True, becomes a bidirectional encoders (defulat: True) + activation (str): type of activation function (default: hardtanh) + + Inputs: inputs, input_lengths + - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens + - **input_lengths**: list of sequence lengths + + Returns: + (Tensor, Tensor): + + * predicted_log_prob (torch.FloatTensor)s: Log probability of model predictions. + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + + Reference: + Dario Amodei et al.: Deep Speech 2: End-to-End Speech Recognition in English and Mandarin + https://arxiv.org/abs/1512.02595 + """ + def __init__( + self, + input_dim: int, + num_classes: int, + rnn_type='gru', + num_rnn_layers: int = 5, + rnn_hidden_dim: int = 512, + dropout_p: float = 0.1, + bidirectional: bool = True, + activation: str = 'hardtanh', + ) -> None: + super(DeepSpeech2, self).__init__() + self.conv = DeepSpeech2Extractor(input_dim, activation=activation) + self.rnn_layers = nn.ModuleList() + rnn_output_size = rnn_hidden_dim << 1 if bidirectional else rnn_hidden_dim + + for idx in range(num_rnn_layers): + self.rnn_layers.append( + BNReluRNN( + input_size=self.conv.get_output_dim() if idx == 0 else rnn_output_size, + hidden_state_dim=rnn_hidden_dim, + rnn_type=rnn_type, + bidirectional=bidirectional, + dropout_p=dropout_p, + ) + ) + + self.fc = nn.Sequential( + nn.LayerNorm(rnn_output_size), + Linear(rnn_output_size, num_classes, bias=False), + ) + + def count_parameters(self) -> int: + r""" Count parameters of encoders """ + return sum([p.numel for p in self.parameters()]) + + def update_dropout(self, dropout_p: float) -> None: + r""" Update dropout probability of encoders """ + for name, child in self.named_children(): + if isinstance(child, nn.Dropout): + child.p = dropout_p + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate a `inputs` for encoder_only training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor): + + * predicted_log_prob (torch.FloatTensor)s: Log probability of model predictions. + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + """ + outputs, output_lengths = self.conv(inputs, input_lengths) + outputs = outputs.permute(1, 0, 2).contiguous() + + for rnn_layer in self.rnn_layers: + outputs = rnn_layer(outputs, output_lengths) + + outputs = self.fc(outputs.transpose(0, 1)).log_softmax(dim=-1) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/jasper.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/jasper.py new file mode 100644 index 000000000..80de877b2 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/jasper.py @@ -0,0 +1,182 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple + +from openspeech.modules import JasperSubBlock, JasperBlock, MaskConv1d + + +class Jasper(nn.Module): + r""" + Jasper (Just Another Speech Recognizer), an ASR model comprised of 54 layers proposed by NVIDIA. + Jasper achieved sub 3 percent word error rate (WER) on the LibriSpeech dataset. + + Args: + num_classes (int): number of classification + version (str): version of jasper. Marked as BxR: B - number of blocks, R - number of sub-blocks + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + + Returns: + (Tensor, Tensor): + + * outputs (torch.FloatTensor): Log probability of model predictions. ``(batch, seq_length, num_classes)`` + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + + Reference: + Jason Li. et al.: Jasper: An End-to-End Convolutional Neural Acoustic Model + https://arxiv.org/pdf/1904.03288.pdf + """ + + def __init__(self, configs, input_dim: int, num_classes: int) -> None: + super(Jasper, self).__init__() + self.configs = configs + self.layers = nn.ModuleList() + + in_channels = eval(self.configs.in_channels) + out_channels = eval(self.configs.out_channels) + kernel_size = eval(self.configs.kernel_size) + stride = eval(self.configs.stride) + dilation = eval(self.configs.dilation) + dropout_p = eval(self.configs.dropout_p) + + self.layers.append( + JasperSubBlock( + in_channels=input_dim, + out_channels=out_channels[0], + kernel_size=kernel_size[0], + stride=stride[0], + dilation=dilation[0], + dropout_p=dropout_p[0], + activation='relu', + bias=False, + ) + ) + self.layers.extend([ + JasperBlock( + num_sub_blocks=self.configs.num_sub_blocks, + in_channels=in_channels[i], + out_channels=out_channels[i], + kernel_size=kernel_size[i], + dilation=dilation[i], + dropout_p=dropout_p[i], + activation='relu', + bias=False, + ) for i in range(1, self.configs.num_blocks + 1) + ]) + self.postprocess_layers = nn.ModuleList([ + JasperSubBlock( + in_channels=in_channels[i], + out_channels=num_classes if out_channels[i] is None else out_channels[i], + kernel_size=kernel_size[i], + dilation=dilation[i], + dropout_p=dropout_p[i], + activation='relu', + bias=True if i == 2 else False, + ) for i in range(self.configs.num_blocks + 1, self.configs.num_blocks + 4) + ]) + + self.residual_connections = self._create_jasper_dense_residual_connections() + + def count_parameters(self) -> int: + r""" Count parameters of model """ + return sum([p.numel for p in self.parameters()]) + + def update_dropout(self, dropout_p: float) -> None: + r""" Update dropout probability of model """ + for name, child in self.named_children(): + if isinstance(child, nn.Dropout): + child.p = dropout_p + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoder_only training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor): + + * outputs (torch.FloatTensor): Log probability of model predictions. ``(batch, seq_length, num_classes)`` + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + """ + residual, prev_outputs, prev_output_lengths = None, list(), list() + inputs = inputs.transpose(1, 2) + + for i, layer in enumerate(self.layers[:-1]): + inputs, input_lengths = layer(inputs, input_lengths, residual) + prev_outputs.append(inputs) + prev_output_lengths.append(input_lengths) + residual = self._get_jasper_dencse_residual(prev_outputs, prev_output_lengths, i) + + outputs, output_lengths = self.layers[-1](inputs, input_lengths, residual) + + for i, layer in enumerate(self.postprocess_layers): + outputs, output_lengths = layer(outputs, output_lengths) + + outputs = F.log_softmax(outputs.transpose(1, 2), dim=-1) + + return outputs, output_lengths + + def _get_jasper_dencse_residual(self, prev_outputs: list, prev_output_lengths: list, index: int): + residual = None + + for item in zip(prev_outputs, prev_output_lengths, self.residual_connections[index]): + prev_output, prev_output_length, residual_modules = item + conv1x1, batch_norm = residual_modules + + if residual is None: + residual = conv1x1(prev_output, prev_output_length)[0] + else: + residual += conv1x1(prev_output, prev_output_length)[0] + + residual = batch_norm(residual) + + return residual + + def _create_jasper_dense_residual_connections(self) -> nn.ModuleList: + residual_connections = nn.ModuleList() + + for i in range(self.configs.num_blocks): + residual_modules = nn.ModuleList() + for j in range(1, i + 2): + residual_modules.append(nn.ModuleList([ + MaskConv1d( + self.configs.in_channels[j], self.configs.out_channels[j], kernel_size=1 + ), + nn.BatchNorm1d(self.configs.out_channels[i], eps=1e-03, momentum=0.1), + ])) + residual_connections.append(residual_modules) + + return residual_connections diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/lstm_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/lstm_encoder.py new file mode 100644 index 000000000..4109b0420 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/lstm_encoder.py @@ -0,0 +1,128 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple, Optional + +from openspeech.encoders import OpenspeechEncoder +from openspeech.modules import Transpose, Linear + + +class LSTMEncoder(OpenspeechEncoder): + r""" + Converts low level speech signals into higher level features + + Args: + input_dim (int): dimension of input vector + num_classes (int): number of classification + hidden_state_dim (int): the number of features in the encoders hidden state `h` + num_layers (int, optional): number of recurrent layers (default: 3) + bidirectional (bool, optional): if True, becomes a bidirectional encoders (default: False) + rnn_type (str, optional): type of RNN cell (default: lstm) + dropout_p (float, optional): dropout probability of encoders (default: 0.2) + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not + + Inputs: inputs, input_lengths + - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens + - **input_lengths**: list of sequence lengths + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * encoder_output_lengths: The length of encoders outputs. ``(batch)`` + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + input_dim: int, + num_classes: int = None, + hidden_state_dim: int = 512, + dropout_p: float = 0.3, + num_layers: int = 3, + bidirectional: bool = True, + rnn_type: str = 'lstm', + joint_ctc_attention: bool = False, + ) -> None: + super(LSTMEncoder, self).__init__() + + self.num_classes = num_classes + self.joint_ctc_attention = joint_ctc_attention + + self.hidden_state_dim = hidden_state_dim + self.rnn = self.supported_rnns[rnn_type.lower()]( + input_size=input_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=bidirectional, + ) + + if self.joint_ctc_attention: + self.fc = nn.Sequential( + Transpose(shape=(1, 2)), + nn.Dropout(dropout_p), + Linear(hidden_state_dim << 1, num_classes, bias=False), + ) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. + * encoder_output_lengths: The length of encoders outputs. ``(batch)`` + """ + encoder_logits = None + + conv_outputs = nn.utils.rnn.pack_padded_sequence(inputs.transpose(0, 1), input_lengths.cpu()) + outputs, hidden_states = self.rnn(conv_outputs) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) + outputs = outputs.transpose(0, 1) + + if self.joint_ctc_attention: + encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=2) + + return outputs, encoder_logits, input_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/openspeech_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/openspeech_encoder.py new file mode 100644 index 000000000..4717d4967 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/openspeech_encoder.py @@ -0,0 +1,72 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules import DeepSpeech2Extractor, VGGExtractor, Swish, Conv2dSubsampling + + +class OpenspeechEncoder(nn.Module): + r""" + Base Interface of Openspeech Encoder. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + """ + supported_activations = { + 'hardtanh': nn.Hardtanh(0, 20, inplace=True), + 'relu': nn.ReLU(inplace=True), + 'elu': nn.ELU(inplace=True), + 'leaky_relu': nn.LeakyReLU(inplace=True), + 'gelu': nn.GELU(), + 'swish': Swish(), + } + supported_extractors = { + 'ds2': DeepSpeech2Extractor, + 'vgg': VGGExtractor, + 'conv2d_subsample': Conv2dSubsampling, + } + + def __init__(self): + super(OpenspeechEncoder, self).__init__() + + def count_parameters(self) -> int: + r""" Count parameters of encoders """ + return sum([p.numel for p in self.parameters()]) + + def update_dropout(self, dropout_p: float) -> None: + r""" Update dropout probability of encoders """ + for name, child in self.named_children(): + if isinstance(child, nn.Dropout): + child.p = dropout_p + + def forward(self, inputs: Tensor, input_lengths: Tensor): + r""" + Forward propagate for encoders training. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + """ + raise NotImplementedError diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/quartznet.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/quartznet.py new file mode 100644 index 000000000..bf181306a --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/quartznet.py @@ -0,0 +1,120 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.modules import QuartzNetBlock, JasperSubBlock + + +class QuartzNet(nn.Module): + r""" + QuartzNet is fully convolutional automatic speech recognition model. The model is composed of multiple + blocks with residual connections between them. Each block consists of one or more modules with + 1D time-channel separable convolutional layers, batch normalization, and ReLU layers. + It is trained with CTC loss. + + Args: + configs (DictConfig): hydra configuration set. + input_dim (int): dimension of input. + num_classes (int): number of classification. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor): + + * outputs (torch.FloatTensor): Log probability of model predictions. ``(batch, seq_length, num_classes)`` + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + + Reference: + Samuel Kriman et al.: QUARTZNET: DEEP AUTOMATIC SPEECH RECOGNITION WITH 1D TIME-CHANNEL SEPARABLE CONVOLUTIONS. + https://arxiv.org/abs/1910.10261.pdf + """ + def __init__(self, configs, input_dim: int, num_classes: int) -> None: + super(QuartzNet, self).__init__() + self.configs = configs + + in_channels = eval(self.configs.model.in_channels) + out_channels = eval(self.configs.model.out_channels) + kernel_size = eval(self.configs.model.kernel_size) + dilation = eval(self.configs.model.dilation) + dropout_p = eval(self.configs.model.dropout_p) + + self.preprocess_layer = JasperSubBlock( + in_channels=input_dim, + out_channels=out_channels[0], + kernel_size=kernel_size[0], + dilation=dilation[0], + dropout_p=dropout_p[0], + activation='relu', + bias=False, + ) + self.layers = nn.ModuleList([ + QuartzNetBlock( + num_sub_blocks=self.configs.model.num_sub_blocks, + in_channels=in_channels[i], + out_channels=out_channels[i], + kernel_size=kernel_size[i], + bias=False, + ) for i in range(1, self.configs.model.num_blocks + 1) + ]) + self.postprocess_layers = nn.ModuleList([ + JasperSubBlock( + in_channels=in_channels[i], + out_channels=num_classes if out_channels[i] is None else out_channels[i], + kernel_size=kernel_size[i], + dilation=dilation[i], + dropout_p=dropout_p[i], + activation='relu', + bias=True if i == 2 else False, + ) for i in range(self.configs.model.num_blocks + 1, self.configs.model.num_blocks + 4) + ]) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoder_only training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor): + + * outputs (torch.FloatTensor): Log probability of model predictions. ``(batch, seq_length, num_classes)`` + * output_lengths (torch.LongTensor): The length of output tensor ``(batch)`` + """ + inputs = inputs.transpose(1, 2) + + outputs, output_lengths = self.preprocess_layer(inputs, input_lengths) + + for layer in self.layers: + outputs, output_lengths = layer(outputs, output_lengths) + + for layer in self.postprocess_layers: + outputs, output_lengths = layer(outputs, output_lengths) + + return outputs.transpose(1, 2), output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/rnn_transducer_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/rnn_transducer_encoder.py new file mode 100644 index 000000000..6a5b13802 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/rnn_transducer_encoder.py @@ -0,0 +1,112 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.encoders import OpenspeechEncoder +from openspeech.modules import Linear + + +class RNNTransducerEncoder(OpenspeechEncoder): + r""" + Encoder of RNN-Transducer. + + Args: + input_dim (int): dimension of input vector + hidden_state_dim (int, optional): hidden state dimension of encoders (default: 320) + output_dim (int, optional): output dimension of encoders and decoders (default: 512) + num_layers (int, optional): number of encoders layers (default: 4) + rnn_type (str, optional): type of rnn cell (default: lstm) + bidirectional (bool, optional): if True, becomes a bidirectional encoders (default: True) + + Inputs: inputs, input_lengths + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor) + + * outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + * hidden_states (torch.FloatTensor): A hidden state of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + + Reference: + A Graves: Sequence Transduction with Recurrent Neural Networks + https://arxiv.org/abs/1211.3711.pdf + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + input_dim: int, + hidden_state_dim: int, + output_dim: int, + num_layers: int, + rnn_type: str = 'lstm', + dropout_p: float = 0.2, + bidirectional: bool = True, + ): + super(RNNTransducerEncoder, self).__init__() + self.hidden_state_dim = hidden_state_dim + rnn_cell = self.supported_rnns[rnn_type.lower()] + self.rnn = rnn_cell( + input_size=input_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=bidirectional, + ) + self.fc = Linear(hidden_state_dim << 1 if bidirectional else hidden_state_dim, output_dim) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor) + + * outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + * output_lengths (torch.LongTensor): The length of output tensor. ``(batch)`` + """ + inputs = nn.utils.rnn.pack_padded_sequence(inputs.transpose(0, 1), input_lengths.cpu()) + outputs, hidden_states = self.rnn(inputs) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) + outputs = self.fc(outputs.transpose(0, 1)) + return outputs, input_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_encoder.py new file mode 100644 index 000000000..eececf6ec --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_encoder.py @@ -0,0 +1,205 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple +from torch import Tensor + +from openspeech.encoders import OpenspeechEncoder +from openspeech.modules import ( + Linear, + PositionalEncoding, + get_attn_pad_mask, + Transpose, + MultiHeadAttention, + PositionwiseFeedForward, +) + + +class TransformerEncoderLayer(nn.Module): + r""" + EncoderLayer is made up of self-attention and feedforward network. + This standard encoders layer is based on the paper "Attention Is All You Need". + + Args: + d_model: dimension of model (default: 512) + num_heads: number of attention heads (default: 8) + d_ff: dimension of feed forward network (default: 2048) + dropout_p: probability of dropout (default: 0.3) + + Inputs: + inputs (torch.FloatTensor): input sequence of transformer encoder layer + self_attn_mask (torch.BoolTensor): mask of self attention + + Returns: + (Tensor, Tensor) + + * outputs (torch.FloatTensor): output of transformer encoder layer + * attn (torch.FloatTensor): attention of transformer encoder layer + """ + + def __init__( + self, + d_model: int = 512, + num_heads: int = 8, + d_ff: int = 2048, + dropout_p: float = 0.3, + ) -> None: + super(TransformerEncoderLayer, self).__init__() + self.attention_prenorm = nn.LayerNorm(d_model) + self.feed_forward_prenorm = nn.LayerNorm(d_model) + self.self_attention = MultiHeadAttention(d_model, num_heads) + self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout_p) + + def forward(self, inputs: Tensor, self_attn_mask: Tensor = None) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate of transformer encoder layer. + + Inputs: + inputs (torch.FloatTensor): input sequence of transformer encoder layer + self_attn_mask (torch.BoolTensor): mask of self attention + + Returns: + outputs (torch.FloatTensor): output of transformer encoder layer + attn (torch.FloatTensor): attention of transformer encoder layer + """ + residual = inputs + inputs = self.attention_prenorm(inputs) + outputs, attn = self.self_attention(inputs, inputs, inputs, self_attn_mask) + outputs += residual + + residual = outputs + outputs = self.feed_forward_prenorm(outputs) + outputs = self.feed_forward(outputs) + outputs += residual + + return outputs, attn + + +class TransformerEncoder(OpenspeechEncoder): + r""" + The TransformerEncoder is composed of a stack of N identical layers. + Each layer has two sub-layers. The first is a multi-head self-attention mechanism, + and the second is a simple, position-wise fully connected feed-forward network. + + Args: + input_dim: dimension of feature vector + d_model: dimension of model (default: 512) + d_ff: dimension of feed forward network (default: 2048) + num_layers: number of encoders layers (default: 6) + num_heads: number of attention heads (default: 8) + dropout_p: probability of dropout (default: 0.3) + joint_ctc_attention (bool, optional): flag indication joint ctc attention or not + + Inputs: + - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens + - **input_lengths**: list of sequence lengths + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. ``(batch, seq_length, num_classes)`` + * output_lengths: The length of encoders outputs. ``(batch)`` + + Reference: + Ashish Vaswani et al.: Attention Is All You Need + https://arxiv.org/abs/1706.03762 + """ + + def __init__( + self, + num_classes: int, + input_dim: int = 80, + d_model: int = 512, + d_ff: int = 2048, + num_layers: int = 6, + num_heads: int = 8, + dropout_p: float = 0.3, + joint_ctc_attention: bool = False, + ) -> None: + super(TransformerEncoder, self).__init__() + + self.num_classes = num_classes + self.joint_ctc_attention = joint_ctc_attention + + self.d_model = d_model + self.num_layers = num_layers + self.num_heads = num_heads + self.input_proj = Linear(input_dim, d_model) + self.input_norm = nn.LayerNorm(d_model) + self.input_dropout = nn.Dropout(p=dropout_p) + self.positional_encoding = PositionalEncoding(d_model) + self.layers = nn.ModuleList([ + TransformerEncoderLayer( + d_model=d_model, + num_heads=num_heads, + d_ff=d_ff, + dropout_p=dropout_p, + ) for _ in range(num_layers) + ]) + + if self.joint_ctc_attention: + self.fc = nn.Sequential( + Transpose(shape=(1, 2)), + nn.Dropout(dropout_p), + Linear(d_model, num_classes, bias=False), + ) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for encoders training. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + (Tensor, Tensor, Tensor): + + * outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + * encoder_logits: Log probability of encoders outputs will be passed to CTC Loss. + If joint_ctc_attention is False, return None. ``(batch, seq_length, num_classes)`` + * output_lengths: The length of encoders outputs. ``(batch)`` + """ + encoder_logits = None + + self_attn_mask = get_attn_pad_mask(inputs, input_lengths, inputs.size(1)) + + outputs = self.input_norm(self.input_proj(inputs)) + outputs += self.positional_encoding(outputs.size(1)) + outputs = self.input_dropout(outputs) + + for layer in self.layers: + outputs, attn = layer(outputs, self_attn_mask) + + if self.joint_ctc_attention: + encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=-1) + + return outputs, encoder_logits, input_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_transducer_encoder.py b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_transducer_encoder.py new file mode 100644 index 000000000..b02427282 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/encoders/transformer_transducer_encoder.py @@ -0,0 +1,177 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple, Optional +from torch import Tensor + +from openspeech.encoders import OpenspeechEncoder +from openspeech.modules import ( + PositionalEncoding, + get_attn_pad_mask, + MultiHeadAttention, + PositionwiseFeedForward, +) + + +class TransformerTransducerEncoderLayer(nn.Module): + r""" + Repeated layers common to audio encoders and label encoders + + Args: + model_dim (int): the number of features in the encoder (default : 512) + d_ff (int): the number of features in the feed forward layers (default : 2048) + num_heads (int): the number of heads in the multi-head attention (default: 8) + dropout (float): dropout probability of encoder layer (default: 0.1) + + Inputs: inputs, self_attn_mask + - **inputs**: Audio feature or label feature + - **self_attn_mask**: Self attention mask to use in multi-head attention + + Returns: outputs, attn_distribution + (Tensor, Tensor) + + * outputs (torch.FloatTensor): Tensor containing higher (audio, label) feature values + * attn_distribution (torch.FloatTensor): Attention distribution in multi-head attention + """ + def __init__( + self, + model_dim: int = 512, + d_ff: int = 2048, + num_heads: int = 8, + dropout: float = 0.1, + ) -> None: + super(TransformerTransducerEncoderLayer, self).__init__() + self.layer_norm = nn.LayerNorm(model_dim) + self.self_attention = MultiHeadAttention(model_dim, num_heads) + self.encoder_dropout = nn.Dropout(p=dropout) + self.feed_forward = PositionwiseFeedForward(model_dim, d_ff, dropout) + + def forward( + self, + inputs: Tensor, + self_attn_mask: Optional[Tensor] = None + ) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate a `inputs` for label encoder. + + Args: + inputs : A input sequence passed to encoder layer. ``(batch, seq_length, dimension)`` + self_attn_mask : Self attention mask to cover up padding ``(batch, seq_length, seq_length)`` + + Returns: + **outputs** (Tensor): ``(batch, seq_length, dimension)`` + **attn_distribution** (Tensor): ``(batch, seq_length, seq_length)`` + """ + inputs = self.layer_norm(inputs) + self_attn_output, attn_distribution = self.self_attention(inputs, inputs, inputs, self_attn_mask) + self_attn_output += inputs + + self_attn_output = self.layer_norm(self_attn_output) + ff_output = self.feed_forward(self_attn_output) + output = self.encoder_dropout(ff_output + self_attn_output) + + return output, attn_distribution + + +class TransformerTransducerEncoder(OpenspeechEncoder): + r""" + Converts the audio signal to higher feature values + + Args: + input_size (int): dimension of input vector (default : 80) + model_dim (int): the number of features in the audio encoder (default : 512) + d_ff (int): the number of features in the feed forward layers (default : 2048) + num_layers (int): the number of audio encoder layers (default: 18) + num_heads (int): the number of heads in the multi-head attention (default: 8) + dropout (float): dropout probability of audio encoder (default: 0.1) + max_positional_length (int): Maximum length to use for positional encoding (default : 5000) + + Inputs: inputs, inputs_lens + - **inputs**: Parsed audio of batch size number + - **inputs_lens**: Tensor of sequence lengths + + Returns: + * outputs (torch.FloatTensor): ``(batch, seq_length, dimension)`` + * input_lengths (torch.LongTensor): ``(batch)`` + + Reference: + Qian Zhang et al.: Transformer Transducer: A Streamable Speech Recognition Model with Transformer Encoders and RNN-T Loss + https://arxiv.org/abs/2002.02562 + """ + def __init__( + self, + input_size: int = 80, + model_dim: int = 512, + d_ff: int = 2048, + num_layers: int = 18, + num_heads: int = 8, + dropout: float = 0.1, + max_positional_length: int = 5000, + ) -> None: + super(TransformerTransducerEncoder, self).__init__() + self.input_size = input_size + self.model_dim = model_dim + self.num_layers = num_layers + self.num_heads = num_heads + self.input_dropout = nn.Dropout(p=dropout) + self.layer_norm = nn.LayerNorm(model_dim) + self.positional_encoding = PositionalEncoding(model_dim, max_positional_length) + self.input_fc = nn.Linear(input_size, model_dim) + self.encoder_layers = nn.ModuleList([ + TransformerTransducerEncoderLayer( + model_dim, + d_ff, + num_heads, + dropout + ) for _ in range(num_layers) + ]) + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate a `inputs` for audio encoder. + + Args: + inputs (torch.FloatTensor): A input sequence passed to audio encoder. Typically inputs will be a padded + `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + **outputs** (Tensor): ``(batch, seq_length, dimension)`` + ** input_lengths**(Tensor): ``(batch)`` + """ + seq_len = inputs.size(1) + + self_attn_mask = get_attn_pad_mask(inputs, input_lengths, seq_len) + + inputs = self.input_fc(inputs) + self.positional_encoding(seq_len) + outputs = self.input_dropout(inputs) + + for encoder_layer in self.encoder_layers: + outputs, _ = encoder_layer(outputs, self_attn_mask) + + return outputs, input_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/lm/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/lm/__init__.py new file mode 100644 index 000000000..8f3de5ab0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/lm/__init__.py @@ -0,0 +1,24 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .lstm_lm import LSTMForLanguageModel +from .transformer_lm import TransformerForLanguageModel diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/lm/lstm_lm.py b/audio/speech_recognition/conformer/pytorch/openspeech/lm/lstm_lm.py new file mode 100644 index 000000000..1eab3508c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/lm/lstm_lm.py @@ -0,0 +1,158 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import random + +from openspeech.lm.openspeech_lm import OpenspeechLanguageModelBase +from openspeech.modules import Linear, View +from typing import Optional, Tuple + + +class LSTMForLanguageModel(OpenspeechLanguageModelBase): + """ + Language Modelling is the core problem for a number of of natural language processing tasks such as speech to text, + conversational system, and text summarization. A trained language model learns the likelihood of occurrence + of a word based on the previous sequence of words used in the text. + + Args: + num_classes (int): number of classification + max_length (int): max decoding length (default: 128) + hidden_state_dim (int): dimension of hidden state vector (default: 768) + rnn_type (str, optional): type of RNN cell (default: lstm) + pad_id (int, optional): index of the pad symbol (default: 0) + sos_id (int, optional): index of the start of sentence symbol (default: 1) + eos_id (int, optional): index of the end of sentence symbol (default: 2) + num_layers (int, optional): number of recurrent layers (default: 2) + dropout_p (float, optional): dropout probability of decoders (default: 0.2) + + Inputs: inputs, teacher_forcing_ratio + inputs (torch.LongTensr): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + teacher_forcing_ratio (float): ratio of teacher forcing + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + num_classes: int, + max_length: int = 128, + hidden_state_dim: int = 768, + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + num_layers: int = 2, + rnn_type: str = 'lstm', + dropout_p: float = 0.3, + ) -> None: + super(LSTMForLanguageModel, self).__init__() + self.hidden_state_dim = hidden_state_dim + self.num_classes = num_classes + self.num_layers = num_layers + self.max_length = max_length + self.eos_id = eos_id + self.sos_id = sos_id + self.pad_id = pad_id + self.embedding = nn.Embedding(num_classes, hidden_state_dim) + self.input_dropout = nn.Dropout(dropout_p) + rnn_cell = self.supported_rnns[rnn_type.lower()] + self.rnn = rnn_cell( + input_size=hidden_state_dim, + hidden_size=hidden_state_dim, + num_layers=num_layers, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=False, + ) + + self.fc = nn.Sequential( + Linear(hidden_state_dim, hidden_state_dim), + nn.Tanh(), + View(shape=(-1, self.hidden_state_dim), contiguous=True), + Linear(hidden_state_dim, num_classes), + ) + + def forward_step( + self, + input_var: torch.Tensor, + hidden_states: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + batch_size, output_lengths = input_var.size(0), input_var.size(1) + + embedded = self.embedding(input_var) + embedded = self.input_dropout(embedded) + + if self.training: + self.rnn.flatten_parameters() + + outputs, hidden_states = self.rnn(embedded, hidden_states) + + step_outputs = self.fc(outputs.reshape(-1, self.hidden_state_dim)).log_softmax(dim=-1) + step_outputs = step_outputs.view(batch_size, output_lengths, -1).squeeze(1) + + return step_outputs, hidden_states + + def forward( + self, + inputs: torch.Tensor, + teacher_forcing_ratio: float = 1.0, + ) -> torch.Tensor: + """ + Forward propagate a `encoder_outputs` for training. + + Args: + inputs (torch.LongTensr): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + teacher_forcing_ratio (float): ratio of teacher forcing + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + batch_size = inputs.size(0) + logits, hidden_states = list(), None + use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False + + if use_teacher_forcing: + inputs = inputs[inputs != self.eos_id].view(batch_size, -1) + step_outputs, hidden_states = self.forward_step(input_var=inputs, hidden_states=hidden_states) + + for di in range(step_outputs.size(1)): + step_output = step_outputs[:, di, :] + logits.append(step_output) + + else: + input_var = inputs[:, 0].unsqueeze(1) + for di in range(self.max_length): + step_output, hidden = self.forward_step(input_var=input_var, hidden_states=hidden_states) + + step_output = step_output.squeeze(1) + logits.append(step_output) + input_var = logits[-1].topk(1)[1] + + return torch.stack(logits, dim=1) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/lm/openspeech_lm.py b/audio/speech_recognition/conformer/pytorch/openspeech/lm/openspeech_lm.py new file mode 100644 index 000000000..61755396e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/lm/openspeech_lm.py @@ -0,0 +1,45 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn + + +class OpenspeechLanguageModelBase(nn.Module): + r""" Interface of OpenSpeech decoder. """ + def __init__(self): + super(OpenspeechLanguageModelBase, self).__init__() + + def count_parameters(self) -> int: + r""" Count parameters of decoders """ + return sum([p.numel for p in self.parameters()]) + + def update_dropout(self, dropout_p: float) -> None: + r""" Update dropout probability of decoders """ + for name, child in self.named_children(): + if isinstance(child, nn.Dropout): + child.p = dropout_p + + def forward_step(self, *args, **kwargs): + raise NotImplementedError + + def forward(self, *args, **kwargs): + raise NotImplementedError diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/lm/transformer_lm.py b/audio/speech_recognition/conformer/pytorch/openspeech/lm/transformer_lm.py new file mode 100644 index 000000000..205982311 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/lm/transformer_lm.py @@ -0,0 +1,171 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Optional, Tuple + +from openspeech.lm.openspeech_lm import OpenspeechLanguageModelBase +from openspeech.modules import ( + TransformerEmbedding, + PositionalEncoding, + Linear, + PositionwiseFeedForward, + MultiHeadAttention, + get_attn_pad_mask, + get_attn_subsequent_mask, +) + + +class TransformerForLanguageModelLayer(nn.Module): + def __init__( + self, + d_model: int = 768, + num_attention_heads: int = 8, + d_ff: int = 2048, + dropout_p: float = 0.3, + ) -> None: + super(TransformerForLanguageModelLayer, self).__init__() + self.attention_prenorm = nn.LayerNorm(d_model) + self.attention = MultiHeadAttention(d_model, num_attention_heads) + self.feed_forward_prenorm = nn.LayerNorm(d_model) + self.feed_forward = PositionwiseFeedForward(d_model=d_model, d_ff=d_ff, dropout_p=dropout_p) + + def forward( + self, + inputs: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + residual = inputs + inputs = self.attention_prenorm(inputs) + outputs, _ = self.attention(inputs, inputs, inputs, mask) + outputs += residual + + residual = outputs + outputs = self.feed_forward_prenorm(outputs) + outputs = self.feed_forward(outputs) + outputs += residual + + return outputs + + +class TransformerForLanguageModel(OpenspeechLanguageModelBase): + """ + Language Modelling is the core problem for a number of of natural language processing tasks such as speech to text, + conversational system, and text summarization. A trained language model learns the likelihood of occurrence + of a word based on the previous sequence of words used in the text. + + Args: + num_classes (int): number of classification + max_length (int): max decoding length (default: 128) + d_model (int): dimension of model (default: 768) + d_ff (int): dimension of feed forward network (default: 1536) + num_attention_heads (int): number of attention heads (default: 8) + pad_id (int, optional): index of the pad symbol (default: 0) + sos_id (int, optional): index of the start of sentence symbol (default: 1) + eos_id (int, optional): index of the end of sentence symbol (default: 2) + num_layers (int, optional): number of transformer layers (default: 2) + dropout_p (float, optional): dropout probability of decoders (default: 0.2) + + Inputs:, inputs, input_lengths + inputs (torch.LongTensor): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + def __init__( + self, + num_classes: int, + max_length: int = 128, + d_model: int = 768, + num_attention_heads: int = 8, + d_ff: int = 1536, + pad_id: int = 0, + sos_id: int = 1, + eos_id: int = 2, + num_layers: int = 2, + dropout_p: float = 0.3, + ): + super(TransformerForLanguageModel, self).__init__() + self.d_model = d_model + self.num_classes = num_classes + self.num_layers = num_layers + self.max_length = max_length + self.eos_id = eos_id + self.sos_id = sos_id + self.pad_id = pad_id + self.embedding = TransformerEmbedding(num_classes, pad_id, d_model) + self.positional_encoding = PositionalEncoding(d_model) + self.input_dropout = nn.Dropout(p=dropout_p) + self.layers = nn.ModuleList([ + TransformerForLanguageModelLayer( + d_model=d_model, + num_attention_heads=num_attention_heads, + d_ff=d_ff, + dropout_p=dropout_p, + ) for _ in range(num_layers) + ]) + self.fc = nn.Sequential( + nn.LayerNorm(d_model), + Linear(d_model, d_model, bias=False), + nn.Tanh(), + Linear(d_model, num_classes, bias=False), + ) + + def forward_step(self, inputs, input_lengths): + pad_mask = get_attn_pad_mask( + inputs, input_lengths, inputs.size(1) + ) + subsequent_mask = get_attn_subsequent_mask(inputs) + mask = torch.gt((pad_mask + subsequent_mask), 0) + + outputs = self.embedding(inputs) + self.positional_encoding(inputs.size(1)) + outputs = self.input_dropout(outputs) + + for layer in self.layers: + outputs = layer(inputs=outputs, mask=mask) + + step_outputs = self.fc(outputs).log_softmax(dim=-1) + + return step_outputs + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> torch.Tensor: + """ + Forward propagate a `encoder_outputs` for training. + + Args: + inputs (torch.LongTensor): A input sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + logits = list() + + step_outputs = self.forward_step(inputs, input_lengths) + + for di in range(step_outputs.size(1)): + step_output = step_outputs[:, di, :] + logits.append(step_output) + + return torch.stack(logits, dim=1) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/metrics.py b/audio/speech_recognition/conformer/pytorch/openspeech/metrics.py new file mode 100644 index 000000000..56543b678 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/metrics.py @@ -0,0 +1,156 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import Levenshtein as Lev +from typing import Tuple + + +class ErrorRate(object): + r""" + Provides inteface of error rate calcuation. + + Note: + Do not use this class directly, use one of the sub classes. + """ + + def __init__(self, tokenizer) -> None: + self.total_dist = 0.0 + self.total_length = 0.0 + self.tokenizer = tokenizer + + def __call__(self, targets, y_hats): + r""" + Calculating error rate. + + Args: + targets (torch.Tensor): set of ground truth + y_hats (torch.Tensor): predicted y values (y_hat) by the model + + Returns: + - **cer**: character error rate + """ + dist, length = self._get_distance(targets, y_hats) + self.total_dist += dist + self.total_length += length + return self.total_dist / self.total_length + + def _get_distance(self, targets: torch.Tensor, y_hats: torch.Tensor) -> Tuple[float, int]: + r""" + Provides total character distance between targets & y_hats + + Args: targets, y_hats + targets (torch.Tensor): set of ground truth + y_hats (torch.Tensor): predicted y values (y_hat) by the model + + Returns: total_dist, total_length + - **total_dist**: total distance between targets & y_hats + - **total_length**: total length of targets sequence + """ + total_dist = 0 + total_length = 0 + + for (target, y_hat) in zip(targets, y_hats): + s1 = self.tokenizer.decode(target) + s2 = self.tokenizer.decode(y_hat) + + dist, length = self.metric(s1, s2) + + total_dist += dist + total_length += length + + return total_dist, total_length + + def metric(self, *args, **kwargs) -> Tuple[float, int]: + raise NotImplementedError + + +class CharacterErrorRate(ErrorRate): + r""" + Computes the Character Error Rate, defined as the edit distance between the + two provided sentences after tokenizing to characters. + """ + def __init__(self, tokenizer): + super(CharacterErrorRate, self).__init__(tokenizer) + + def metric(self, s1: str, s2: str) -> Tuple[float, int]: + r""" + Computes the Character Error Rate, defined as the edit distance between the + two provided sentences after tokenizing to characters. + + Args: s1, s2 + s1 (string): space-separated sentence + s2 (string): space-separated sentence + + Returns: dist, length + - **dist**: distance between target & y_hat + - **length**: length of target sequence + """ + s1 = s1.replace(' ', '') + s2 = s2.replace(' ', '') + + # if '_' in sentence, means subword-unit, delete '_' + if '_' in s1: + s1 = s1.replace('_', '') + + if '_' in s2: + s2 = s2.replace('_', '') + + dist = Lev.distance(s2, s1) + length = len(s1.replace(' ', '')) + + return dist, length + + +class WordErrorRate(ErrorRate): + r""" + Computes the Word Error Rate, defined as the edit distance between the + two provided sentences after tokenizing to words. + """ + def __init__(self, tokenizer): + super(WordErrorRate, self).__init__(tokenizer) + + def metric(self, s1: str, s2: str) -> Tuple[float, int]: + r""" + Computes the Word Error Rate, defined as the edit distance between the + two provided sentences after tokenizing to words. + + Args: s1, s2 + s1 (string): space-separated sentence + s2 (string): space-separated sentence + + Returns: dist, length + - **dist**: distance between target & y_hat + - **length**: length of target sequence + """ + b = set(s1.split() + s2.split()) + word2char = dict(zip(b, range(len(b)))) + + # map the words to a char array (Levenshtein packages only accepts + # strings) + w1 = [chr(word2char[w]) for w in s1.split()] + w2 = [chr(word2char[w]) for w in s2.split()] + + dist = Lev.distance(''.join(w1), ''.join(w2)) + length = len(s1.split()) + + return dist, length diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/README.md b/audio/speech_recognition/conformer/pytorch/openspeech/models/README.md new file mode 100644 index 000000000..c71ea9977 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/README.md @@ -0,0 +1,27 @@ +## Model architectures + +We support all the models below. Note that, the important concepts of the model have been implemented to match, but the details of the implementation may vary. + +1. [**DeepSpeech2**](https://sooftware.github.io/openspeech/architectures/DeepSpeech2.html) (from Baidu Research) released with paper [Deep Speech 2: End-to-End Speech Recognition in +English and Mandarin](https://arxiv.org/abs/1512.02595.pdf), by Dario Amodei, Rishita Anubhai, Eric Battenberg, Carl Case, Jared Casper, Bryan Catanzaro, Jingdong Chen, Mike Chrzanowski, Adam Coates, Greg Diamos, Erich Elsen, Jesse Engel, Linxi Fan, Christopher Fougner, Tony Han, Awni Hannun, Billy Jun, Patrick LeGresley, Libby Lin, Sharan Narang, Andrew Ng, Sherjil Ozair, Ryan Prenger, Jonathan Raiman, Sanjeev Satheesh, David Seetapun, Shubho Sengupta, Yi Wang, Zhiqian Wang, Chong Wang, Bo Xiao, Dani Yogatama, Jun Zhan, Zhenyao Zhu. +2. [**RNN-Transducer**](https://sooftware.github.io/openspeech/architectures/RNN%20Transducer.html) (from University of Toronto) released with paper [Sequence Transduction with Recurrent Neural Networks](https://arxiv.org/abs/1211.3711.pdf), by Alex Graves. +3. [**LSTM Language Model**](https://sooftware.github.io/openspeech/architectures/LSTM%20LM.html) (from RWTH Aachen University) released with paper [LSTM Neural Networks for Language Modeling](http://www-i6.informatik.rwth-aachen.de/publications/download/820/Sundermeyer-2012.pdf), by Martin Sundermeyer, Ralf Schluter, and Hermann Ney. +3. [**Listen Attend Spell**](https://sooftware.github.io/openspeech/architectures/Listen%20Attend%20Spell.html) (from Carnegie Mellon University and Google Brain) released with paper [Listen, Attend and Spell](https://arxiv.org/abs/1508.01211), by William Chan, Navdeep Jaitly, Quoc V. Le, Oriol Vinyals. +4. [**Location-aware attention based Listen Attend Spell**](https://sooftware.github.io/openspeech/architectures/Listen%20Attend%20Spell.html) (from University of Wrocław and Jacobs University and Universite de Montreal) released with paper [Attention-Based Models for Speech Recognition](https://arxiv.org/abs/1506.07503), by Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, Kyunghyun Cho, Yoshua Bengio. +5. [**Joint CTC-Attention based Listen Attend Spell**](https://sooftware.github.io/openspeech/architectures/Listen%20Attend%20Spell.html) (from Mitsubishi Electric Research Laboratories and Carnegie Mellon University) released with paper [Joint CTC-Attention based End-to-End Speech Recognition using Multi-task Learning](https://arxiv.org/abs/1609.06773), by Suyoun Kim, Takaaki Hori, Shinji Watanabe. +6. [**Deep CNN Encoder with Joint CTC-Attention Listen Attend Spell**](https://sooftware.github.io/openspeech/architectures/Listen%20Attend%20Spell.html) (from Mitsubishi Electric Research Laboratories and Massachusetts Institute of Technology and Carnegie Mellon University) released with paper [Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM](https://arxiv.org/abs/1706.02737), by Takaaki Hori, Shinji Watanabe, Yu Zhang, William Chan. +7. [**Multi-head attention based Listen Attend Spell**](https://sooftware.github.io/openspeech/architectures/Listen%20Attend%20Spell.html) (from Google) released with paper [State-of-the-art Speech Recognition With Sequence-to-Sequence Models](https://arxiv.org/abs/1712.01769), by Chung-Cheng Chiu, Tara N. Sainath, Yonghui Wu, Rohit Prabhavalkar, Patrick Nguyen, Zhifeng Chen, Anjuli Kannan, Ron J. Weiss, Kanishka Rao, Ekaterina Gonina, Navdeep Jaitly, Bo Li, Jan Chorowski, Michiel Bacchiani. +8. [**Speech-Transformer**](https://sooftware.github.io/openspeech/architectures/Transformer.html) (from University of Chinese Academy of Sciences and Institute of Automation and Chinese Academy of Sciences) released with paper [Speech-Transformer: A No-Recurrence Sequence-to-Sequence Model for Speech Recognition](https://ieeexplore.ieee.org/document/8462506), by Linhao Dong; Shuang Xu; Bo Xu. +9. [**VGG-Transformer**](https://sooftware.github.io/openspeech/architectures/Transformer.html) (from Facebook AI Research) released with paper [Transformers with convolutional context for ASR](https://arxiv.org/abs/1904.11660), by Abdelrahman Mohamed, Dmytro Okhonko, Luke Zettlemoyer. +10. [**Transformer with CTC**](https://sooftware.github.io/openspeech/architectures/Transformer.html) (from NTT Communication Science Laboratories, Waseda University, Center for Language and Speech Processing, Johns Hopkins University) released with paper [Improving Transformer-based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration](https://www.isca-speech.org/archive/Interspeech_2019/pdfs/1938.pdf), by Shigeki Karita, Nelson Enrique Yalta Soplin, Shinji Watanabe, Marc Delcroix, Atsunori Ogawa, Tomohiro Nakatani. +11. [**Joint CTC-Attention based Transformer**](https://sooftware.github.io/openspeech/architectures/Transformer.html)(from NTT Corporation) released with paper [Self-Distillation for Improving CTC-Transformer-based ASR Systems](https://www.isca-speech.org/archive/Interspeech_2020/pdfs/1223.pdf), by Takafumi Moriya, Tsubasa Ochiai, Shigeki Karita, Hiroshi Sato, Tomohiro Tanaka, Takanori Ashihara, Ryo Masumura, Yusuke Shinohara, Marc Delcroix. +12. [**Transformer Language Model**](https://sooftware.github.io/openspeech/architectures/Transformer%20LM.html) (from Amazon Web Services) released with paper [Language Models with Transformers](https://arxiv.org/abs/1904.09408), by Chenguang Wang, Mu Li, Alexander J. Smola. +12. [**Jasper**](https://sooftware.github.io/openspeech/modules/Encoders.html#module-openspeech.encoders.jasper) (from NVIDIA and New York University) released with paper [Jasper: An End-to-End Convolutional Neural Acoustic Model](https://arxiv.org/pdf/1904.03288.pdf), by Jason Li, Vitaly Lavrukhin, Boris Ginsburg, Ryan Leary, Oleksii Kuchaiev, Jonathan M. Cohen, Huyen Nguyen, Ravi Teja Gadde. +13. [**QuartzNet**](https://sooftware.github.io/openspeech/modules/Encoders.html#module-openspeech.encoders.quartznet) (from NVIDIA and Univ. of Illinois and Univ. of Saint Petersburg) released with paper [QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions](https://arxiv.org/abs/1910.10261.pdf), by Samuel Kriman, Stanislav Beliaev, Boris Ginsburg, Jocelyn Huang, Oleksii Kuchaiev, Vitaly Lavrukhin, Ryan Leary, Jason Li, Yang Zhang. +14. [**Transformer Transducer**](https://sooftware.github.io/openspeech/architectures/Transformer%20Transducer.html) (from Facebook AI) released with paper [Transformer-Transducer: +End-to-End Speech Recognition with Self-Attention](https://arxiv.org/abs/1910.12977.pdf), by Ching-Feng Yeh, Jay Mahadeokar, Kaustubh Kalgaonkar, Yongqiang Wang, Duc Le, Mahaveer Jain, Kjell Schubert, Christian Fuegen, Michael L. Seltzer. +15. [**Conformer**](https://sooftware.github.io/openspeech/architectures/Conformer.html) (from Google) released with paper [Conformer: Convolution-augmented Transformer for Speech Recognition](https://arxiv.org/abs/2005.08100), by Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zhengdong Zhang, Yonghui Wu, Ruoming Pang. +16. [**Conformer with CTC**](https://sooftware.github.io/openspeech/architectures/Conformer.html) (from Northwestern Polytechnical University and University of Bordeaux and Johns Hopkins University and Human Dataware Lab and Kyoto University and NTT Corporation and Shanghai Jiao Tong University and Chinese Academy of Sciences) released with paper [Recent Developments on ESPNET Toolkit Boosted by Conformer](https://arxiv.org/abs/2010.13956.pdf), by Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, Jing Shi, Shinji Watanabe, Kun Wei, Wangyou Zhang, Yuekai Zhang. +17. [**Conformer with LSTM Decoder**](https://sooftware.github.io/openspeech/architectures/Conformer.html) (from IBM Research AI) released with paper [On the limit of English conversational speech recognition](https://arxiv.org/abs/2105.00982.pdf), by Zoltán Tüske, George Saon, Brian Kingsbury. +18. [**ContextNet**](https://sooftware.github.io/openspeech/architectures/ContextNet.html) (from Google) released with paper [ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context](https://arxiv.org/abs/2005.03191), by Wei Han, Zhengdong Zhang, Yu Zhang, Jiahui Yu, Chung-Cheng Chiu, James Qin, Anmol Gulati, Ruoming Pang, Yonghui Wu. + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/__init__.py new file mode 100644 index 000000000..65f665c71 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/__init__.py @@ -0,0 +1,208 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import importlib +import os + +from .openspeech_model import OpenspeechModel +from .openspeech_encoder_decoder_model import OpenspeechEncoderDecoderModel +from .openspeech_transducer_model import OpenspeechTransducerModel +from .openspeech_ctc_model import OpenspeechCTCModel + +MODEL_REGISTRY = dict() +MODEL_DATACLASS_REGISTRY = dict() + + +def register_model(name: str, dataclass=None): + r""" + New model types can be added to OpenSpeech with the :func:`register_model` function decorator. + + For example:: + @register_model('conformer_lstm') + class ConformerLSTMModel(OpenspeechModel): + (...) + + .. note:: All models must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the model + """ + + def register_model_cls(cls): + if name in MODEL_REGISTRY: + raise ValueError(f"Cannot register duplicate model ({name})") + if not issubclass(cls, OpenspeechModel): + raise ValueError(f"Model ({name}: {cls.__name__}) must extend OpenspeechModel") + + MODEL_REGISTRY[name] = cls + + cls.__dataclass = dataclass + if dataclass is not None: + if name in MODEL_DATACLASS_REGISTRY: + raise ValueError(f"Cannot register duplicate model ({name})") + MODEL_DATACLASS_REGISTRY[name] = dataclass + + return cls + + return register_model_cls + + +# automatically import any Python files in the models/ directory +models_dir = os.path.dirname(__file__) +for file in os.listdir(models_dir): + if os.path.isdir(os.path.join(models_dir, file)) and not file.startswith('__'): + for subfile in os.listdir(os.path.join(models_dir, file)): + path = os.path.join(models_dir, file, subfile) + if subfile.endswith(".py"): + python_file = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile + module = importlib.import_module(f"openspeech.models.{file}.{python_file}") + continue + + path = os.path.join(models_dir, file) + if file.endswith(".py"): + model_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module(f"openspeech.models.{model_name}") + + +from .conformer import ( + ConformerLSTMConfigs, + ConformerTransducerConfigs, + ConformerConfigs, + JointCTCConformerLSTMConfigs, + ConformerModel, + ConformerLSTMModel, + ConformerTransducerModel, + JointCTCConformerLSTMModel, +) + +from .contextnet import ( + ContextNetTransducerConfigs, + ContextNetLSTMConfigs, + ContextNetConfigs, + ContextNetModel, + ContextNetTransducerModel, + ContextNetLSTMModel, +) +from .deepspeech2 import ( + DeepSpeech2Model, + DeepSpeech2Configs, +) +from .jasper import ( + Jasper5x3Config, + Jasper10x5Config, + Jasper5x3Model, + Jasper10x5Model, +) +from .listen_attend_spell import ( + ListenAttendSpellConfigs, + ListenAttendSpellWithMultiHeadConfigs, + ListenAttendSpellWithLocationAwareConfigs, + DeepCNNWithJointCTCListenAttendSpellConfigs, + JointCTCListenAttendSpellConfigs, + ListenAttendSpellModel, + ListenAttendSpellWithMultiHeadModel, + ListenAttendSpellWithLocationAwareModel, + JointCTCListenAttendSpellModel, + DeepCNNWithJointCTCListenAttendSpellModel +) +from .quartznet import ( + QuartzNet5x5Configs, + QuartzNet15x5Configs, + QuartzNet10x5Configs, + QuartzNet5x5Model, + QuartzNet15x5Model, + QuartzNet10x5Model, +) + +from .rnn_transducer import ( + RNNTransducerModel, + RNNTransducerConfigs, +) +from .lstm_lm import ( + LSTMLanguageModel, + LSTMLanguageModelConfigs, +) +from .transformer_lm import ( + TransformerLanguageModelConfigs, + TransformerLanguageModel +) +from .transformer_transducer import ( + TransformerTransducerModel, + TransformerTransducerConfigs, +) + +__all__ = [ + "OpenspeechModel", + "OpenspeechEncoderDecoderModel", + "OpenspeechTransducerModel", + "OpenspeechCTCModel", + "ConformerModel", + "ConformerConfigs", + "ConformerLSTMModel", + "ConformerLSTMConfigs", + "ConformerTransducerModel", + "ConformerTransducerConfigs", + "ContextNetModel", + "ContextNetConfigs", + "ContextNetLSTMModel", + "ContextNetLSTMConfigs", + "ContextNetTransducerModel", + "ContextNetTransducerConfigs", + "DeepCNNWithJointCTCListenAttendSpellModel", + "DeepCNNWithJointCTCListenAttendSpellConfigs", + "DeepSpeech2Model", + "DeepSpeech2Configs", + "Jasper5x3Model", + "Jasper5x3Config", + "Jasper10x5Model", + "Jasper10x5Config", + "JointCTCConformerLSTMModel", + "JointCTCConformerLSTMConfigs", + "JointCTCListenAttendSpellConfigs", + "JointCTCTransformerConfigs", + "JointCTCTransformerModel", + "JointCTCListenAttendSpellModel", + "ListenAttendSpellConfigs", + "ListenAttendSpellWithMultiHeadConfigs", + "ListenAttendSpellWithLocationAwareConfigs", + "ListenAttendSpellModel", + "ListenAttendSpellWithLocationAwareModel", + "ListenAttendSpellWithMultiHeadModel", + "VGGTransformerConfigs", + "VGGTransformerModel", + "QuartzNet15x5Configs", + "QuartzNet10x5Configs", + "QuartzNet5x5Configs", + "QuartzNet15x5Model", + "QuartzNet10x5Model", + "RNNTransducerConfigs", + "RNNTransducerModel", + "TransformerModel", + "TransformerConfigs", + "TransformerWithCTCConfigs", + "TransformerTransducerConfigs", + "TransformerTransducerModel", + "TransformerWithCTCModel", + "LSTMLanguageModel", + "LSTMLanguageModelConfigs", + "TransformerLanguageModelConfigs", +] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/__init__.py new file mode 100644 index 000000000..81778cbfd --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/__init__.py @@ -0,0 +1,34 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + ConformerConfigs, + ConformerLSTMConfigs, + ConformerTransducerConfigs, + JointCTCConformerLSTMConfigs, +) +from .model import ( + ConformerTransducerModel, + ConformerLSTMModel, + ConformerModel, + JointCTCConformerLSTMModel +) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/configurations.py new file mode 100644 index 000000000..46776922b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/configurations.py @@ -0,0 +1,368 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class ConformerConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.Conformer`. + + It is used to initiated an `Conformer` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: conformer) + encoder_dim (int): Dimension of encoder. (default: 512) + num_encoder_layers (int): The number of encoder layers. (default: 17) + num_attention_heads (int): The number of attention heads. (default: 8) + feed_forward_expansion_factor (int): The expansion factor of feed forward module. (default: 4) + conv_expansion_factor (int): The expansion factor of convolution module. (default: 2) + input_dropout_p (float): The dropout probability of inputs. (default: 0.1) + feed_forward_dropout_p (float): The dropout probability of feed forward module. (default: 0.1) + attention_dropout_p (float): The dropout probability of attention module. (default: 0.1) + conv_dropout_p (float): The dropout probability of convolution module. (default: 0.1) + conv_kernel_size (int): The kernel size of convolution. (default: eq) + half_step_residual (bool): Flag indication whether to use half step residual or not (default: True) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="conformer", metadata={"help": "Model name"} + ) + encoder_dim: int = field( + default=512, metadata={"help": "Dimension of encoder."} + ) + num_encoder_layers: int = field( + default=17, metadata={"help": "The number of encoder layers."} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + feed_forward_expansion_factor: int = field( + default=4, metadata={"help": "The expansion factor of feed forward module."} + ) + conv_expansion_factor: int = field( + default=2, metadata={"help": "The expansion factor of convolution module."} + ) + input_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of inputs."} + ) + feed_forward_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of feed forward module."} + ) + attention_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of attention module."} + ) + conv_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of convolution module."} + ) + conv_kernel_size: int = field( + default=31, metadata={"help": "The kernel size of convolution."} + ) + half_step_residual: bool = field( + default=True, metadata={"help": "Flag indication whether to use half step residual or not"} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class ConformerLSTMConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ConformerLSTM`. + + It is used to initiated an `ConformerLSTM` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: conformer_lstm) + encoder_dim (int): Dimension of encoder. (default: 512) + num_encoder_layers (int): The number of encoder layers. (default: 17) + num_attention_heads (int): The number of attention heads. (default: 8) + feed_forward_expansion_factor (int): The expansion factor of feed forward module. (default: 4) + conv_expansion_factor (int): The expansion factor of convolution module. (default: 2) + input_dropout_p (float): The dropout probability of inputs. (default: 0.1) + feed_forward_dropout_p (float): The dropout probability of feed forward module. (default: 0.1) + attention_dropout_p (float): The dropout probability of attention module. (default: 0.1) + conv_dropout_p (float): The dropout probability of convolution module. (default: 0.1) + conv_kernel_size (int): The kernel size of convolution. (default: eq) + half_step_residual (bool): Flag indication whether to use half step residual or not (default: True) + num_decoder_layers (int): The number of decoder layers. (default: 2) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.1) + max_length (int): Max decoding length. (default: 128) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="conformer_lstm", metadata={"help": "Model name"} + ) + encoder_dim: int = field( + default=512, metadata={"help": "Dimension of encoder."} + ) + num_encoder_layers: int = field( + default=17, metadata={"help": "The number of encoder layers."} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + feed_forward_expansion_factor: int = field( + default=4, metadata={"help": "The expansion factor of feed forward module."} + ) + conv_expansion_factor: int = field( + default=2, metadata={"help": "The expansion factor of convolution module."} + ) + input_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of inputs."} + ) + feed_forward_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of feed forward module."} + ) + attention_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of attention module."} + ) + conv_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of convolution module."} + ) + conv_kernel_size: int = field( + default=31, metadata={"help": "The kernel size of convolution."} + ) + half_step_residual: bool = field( + default=True, metadata={"help": "Flag indication whether to use half step residual or not"} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + decoder_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of decoder."} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class ConformerTransducerConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ConformerTransducer`. + + It is used to initiated an `ConformerTransducer` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: conformer_transducer) + encoder_dim (int): Dimension of encoder. (default: 512) + num_encoder_layers (int): The number of encoder layers. (default: 17) + num_attention_heads (int): The number of attention heads. (default: 8) + feed_forward_expansion_factor (int): The expansion factor of feed forward module. (default: 4) + conv_expansion_factor (int): The expansion factor of convolution module. (default: 2) + input_dropout_p (float): The dropout probability of inputs. (default: 0.1) + feed_forward_dropout_p (float): The dropout probability of feed forward module. (default: 0.1) + attention_dropout_p (float): The dropout probability of attention module. (default: 0.1) + conv_dropout_p (float): The dropout probability of convolution module. (default: 0.1) + conv_kernel_size (int): The kernel size of convolution. (default: eq) + half_step_residual (bool): Flag indication whether to use half step residual or not (default: True) + num_decoder_layers (int): The number of decoder layers. (default: 1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.1) + max_length (int): Max decoding length. (default: 128) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + decoder_hidden_state_dim (int): Hidden state dimension of decoder. (default: 640) + decoder_output_dim (int): Output dimension of decoder. (default: 640) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="conformer_transducer", metadata={"help": "Model name"} + ) + encoder_dim: int = field( + default=512, metadata={"help": "Dimension of encoder."} + ) + num_encoder_layers: int = field( + default=17, metadata={"help": "The number of encoder layers."} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + feed_forward_expansion_factor: int = field( + default=4, metadata={"help": "The expansion factor of feed forward module."} + ) + conv_expansion_factor: int = field( + default=2, metadata={"help": "The expansion factor of convolution module."} + ) + input_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of inputs."} + ) + feed_forward_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of feed forward module."} + ) + attention_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of attention module."} + ) + conv_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of convolution module."} + ) + conv_kernel_size: int = field( + default=31, metadata={"help": "The kernel size of convolution."} + ) + half_step_residual: bool = field( + default=True, metadata={"help": "Flag indication whether to use half step residual or not"} + ) + num_decoder_layers: int = field( + default=1, metadata={"help": "The number of decoder layers."} + ) + decoder_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of decoder."} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": " The ratio of teacher forcing. "} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + decoder_hidden_state_dim: int = field( + default=640, metadata={"help": "Hidden state dimension of decoder."} + ) + decoder_output_dim: int = field( + default=640, metadata={"help": "Output dimension of decoder."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class JointCTCConformerLSTMConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.JointCTCConformerLSTM`. + + It is used to initiated an `JointCTCConformerLSTM` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: joint_ctc_conformer_lstm) + encoder_dim (int): Dimension of encoder. (default: 512) + num_encoder_layers (int): The number of encoder layers. (default: 17) + num_attention_heads (int): The number of attention heads. (default: 8) + feed_forward_expansion_factor (int): The expansion factor of feed forward module. (default: 4) + conv_expansion_factor (int): The expansion factor of convolution module. (default: 2) + input_dropout_p (float): The dropout probability of inputs. (default: 0.1) + feed_forward_dropout_p (float): The dropout probability of feed forward module. (default: 0.1) + attention_dropout_p (float): The dropout probability of attention module. (default: 0.1) + conv_dropout_p (float): The dropout probability of convolution module. (default: 0.1) + conv_kernel_size (int): The kernel size of convolution. (default: eq) + half_step_residual (bool): Flag indication whether to use half step residual or not (default: True) + num_decoder_layers (int): The number of decoder layers. (default: 2) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.1) + max_length (int): Max decoding length. (default: 128) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="joint_ctc_conformer_lstm", metadata={"help": "Model name"} + ) + encoder_dim: int = field( + default=512, metadata={"help": "Dimension of encoder."} + ) + num_encoder_layers: int = field( + default=17, metadata={"help": "The number of encoder layers."} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + feed_forward_expansion_factor: int = field( + default=4, metadata={"help": "The expansion factor of feed forward module."} + ) + conv_expansion_factor: int = field( + default=2, metadata={"help": "The expansion factor of convolution module."} + ) + input_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of inputs."} + ) + feed_forward_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of feed forward module."} + ) + attention_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of attention module."} + ) + conv_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of convolution module."} + ) + conv_kernel_size: int = field( + default=31, metadata={"help": "The kernel size of convolution."} + ) + half_step_residual: bool = field( + default=True, metadata={"help": "Flag indication whether to use half step residual or not"} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + decoder_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of decoder."} + ) + num_decoder_attention_heads: int = field( + default=1, metadata={"help": "The number of decoder attention heads."} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": " The ratio of teacher forcing. "} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/model.py new file mode 100644 index 000000000..356d84535 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/conformer/model.py @@ -0,0 +1,328 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch import Tensor +from typing import Dict +from collections import OrderedDict + +from openspeech.decoders import LSTMAttentionDecoder, RNNTransducerDecoder +from openspeech.models import register_model, OpenspeechEncoderDecoderModel, OpenspeechTransducerModel +from openspeech.models import OpenspeechCTCModel +from openspeech.encoders import ConformerEncoder +from openspeech.modules.wrapper import Linear +from openspeech.tokenizers.tokenizer import Tokenizer +from openspeech.models.conformer.configurations import ( + ConformerConfigs, + ConformerLSTMConfigs, + ConformerTransducerConfigs, + JointCTCConformerLSTMConfigs, +) + + +@register_model('conformer', dataclass=ConformerConfigs) +class ConformerModel(OpenspeechCTCModel): + r""" + Conformer Encoder Only Model. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ConformerModel, self).__init__(configs, tokenizer) + self.fc = Linear(self.configs.model.encoder_dim, self.num_classes, bias=False) + + def build_model(self): + self.encoder = ConformerEncoder( + num_classes=self.num_classes, + input_dim=self.configs.audio.num_mels, + encoder_dim=self.configs.model.encoder_dim, + num_layers=self.configs.model.num_encoder_layers, + num_attention_heads=self.configs.model.num_attention_heads, + feed_forward_expansion_factor=self.configs.model.feed_forward_expansion_factor, + conv_expansion_factor=self.configs.model.conv_expansion_factor, + input_dropout_p=self.configs.model.input_dropout_p, + feed_forward_dropout_p=self.configs.model.feed_forward_dropout_p, + attention_dropout_p=self.configs.model.attention_dropout_p, + conv_dropout_p=self.configs.model.conv_dropout_p, + conv_kernel_size=self.configs.model.conv_kernel_size, + half_step_residual=self.configs.model.half_step_residual, + joint_ctc_attention=False, + ) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + return super(ConformerModel, self).forward(inputs, input_lengths) + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='train', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='valid', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='test', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + +@register_model('conformer_lstm', dataclass=ConformerLSTMConfigs) +class ConformerLSTMModel(OpenspeechEncoderDecoderModel): + r""" + Conformer encoder + LSTM decoder. + + Args: + configs (DictConfig): configuraion set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, + `encoder_outputs`, `encoder_logits`, `encoder_output_lengths`. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ConformerLSTMModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ConformerEncoder( + num_classes=self.num_classes, + input_dim=self.configs.audio.num_mels, + encoder_dim=self.configs.model.encoder_dim, + num_layers=self.configs.model.num_encoder_layers, + num_attention_heads=self.configs.model.num_attention_heads, + feed_forward_expansion_factor=self.configs.model.feed_forward_expansion_factor, + conv_expansion_factor=self.configs.model.conv_expansion_factor, + input_dropout_p=self.configs.model.input_dropout_p, + feed_forward_dropout_p=self.configs.model.feed_forward_dropout_p, + attention_dropout_p=self.configs.model.attention_dropout_p, + conv_dropout_p=self.configs.model.conv_dropout_p, + conv_kernel_size=self.configs.model.conv_kernel_size, + half_step_residual=self.configs.model.half_step_residual, + joint_ctc_attention=False, + ) + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=self.configs.model.encoder_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('conformer_transducer', dataclass=ConformerTransducerConfigs) +class ConformerTransducerModel(OpenspeechTransducerModel): + r""" + Conformer: Convolution-augmented Transformer for Speech Recognition + Paper: https://arxiv.org/abs/2005.08100 + + Args: + configs (DictConfig): configuraion set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ConformerTransducerModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ConformerEncoder( + num_classes=self.num_classes, + input_dim=self.configs.audio.num_mels, + encoder_dim=self.configs.model.encoder_dim, + num_layers=self.configs.model.num_encoder_layers, + num_attention_heads=self.configs.model.num_attention_heads, + feed_forward_expansion_factor=self.configs.model.feed_forward_expansion_factor, + conv_expansion_factor=self.configs.model.conv_expansion_factor, + input_dropout_p=self.configs.model.input_dropout_p, + feed_forward_dropout_p=self.configs.model.feed_forward_dropout_p, + attention_dropout_p=self.configs.model.attention_dropout_p, + conv_dropout_p=self.configs.model.conv_dropout_p, + conv_kernel_size=self.configs.model.conv_kernel_size, + half_step_residual=self.configs.model.half_step_residual, + joint_ctc_attention=False, + ) + self.decoder = RNNTransducerDecoder( + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.decoder_hidden_state_dim, + output_dim=self.configs.model.decoder_output_dim, + num_layers=self.configs.model.num_decoder_layers, + rnn_type=self.configs.model.rnn_type, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + dropout_p=self.configs.model.decoder_dropout_p, + ) + + +@register_model('joint_ctc_conformer_lstm', dataclass=JointCTCConformerLSTMConfigs) +class JointCTCConformerLSTMModel(OpenspeechEncoderDecoderModel): + r""" + Conformer encoder + LSTM decoder. + + Args: + configs (DictConfig): configuraion set + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (torch.FloatTensor): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(JointCTCConformerLSTMModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ConformerEncoder( + num_classes=self.num_classes, + input_dim=self.configs.audio.num_mels, + encoder_dim=self.configs.model.encoder_dim, + num_layers=self.configs.model.num_encoder_layers, + num_attention_heads=self.configs.model.num_attention_heads, + feed_forward_expansion_factor=self.configs.model.feed_forward_expansion_factor, + conv_expansion_factor=self.configs.model.conv_expansion_factor, + input_dropout_p=self.configs.model.input_dropout_p, + feed_forward_dropout_p=self.configs.model.feed_forward_dropout_p, + attention_dropout_p=self.configs.model.attention_dropout_p, + conv_dropout_p=self.configs.model.conv_dropout_p, + conv_kernel_size=self.configs.model.conv_kernel_size, + half_step_residual=self.configs.model.half_step_residual, + joint_ctc_attention=True, + ) + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=self.configs.model.encoder_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_decoder_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/__init__.py new file mode 100644 index 000000000..8d1ec5dd5 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/__init__.py @@ -0,0 +1,32 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + ContextNetTransducerConfigs, + ContextNetLSTMConfigs, + ContextNetConfigs, +) +from .model import ( + ContextNetTransducerModel, + ContextNetLSTMModel, + ContextNetModel, +) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/configurations.py new file mode 100644 index 000000000..99ee77b85 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/configurations.py @@ -0,0 +1,215 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class ContextNetConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ContextNet`. + + It is used to initiated an `ContextNet` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: contextnet) + model_size (str, optional): Size of the model['small', 'medium', 'large'] (default : 'medium') + input_dim (int, optional): Dimension of input vector (default : 80) + num_encoder_layers (int, optional): The number of convolution layers (default : 5) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + num_channels (int, optional): The number of channels in the convolution filter (default: 256) + encoder_dim (int, optional): Dimension of encoder output vector (default: 640) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="contextnet", metadata={"help": "Model name"} + ) + model_size: str = field( + default="medium", metadata={"help": "Model size"} + ) + input_dim: int = field( + default=80, metadata={"help": "Dimension of input vector"} + ) + num_encoder_layers: int = field( + default=5, metadata={"help": "The number of convolution layers"} + ) + kernel_size: int = field( + default=5, metadata={"help": "Value of convolution kernel size"} + ) + num_channels: int = field( + default=256, metadata={"help": "The number of channels in the convolution filter"} + ) + encoder_dim: int = field( + default=640, metadata={"help": "Dimension of encoder output vector"} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training"} + ) + + +@dataclass +class ContextNetLSTMConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ContextNetLSTM`. + + It is used to initiated an `ContextNetLSTM` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: contextnet_lstm) + model_size (str, optional): Size of the model['small', 'medium', 'large'] (default : 'medium') + input_dim (int, optional): Dimension of input vector (default : 80) + num_encoder_layers (int, optional): The number of convolution layers (default : 5) + num_decoder_layers (int): The number of decoder layers. (default: 2) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + num_channels (int, optional): The number of channels in the convolution filter (default: 256) + encoder_dim (int, optional): Dimension of encoder output vector (default: 640) + num_attention_heads (int): The number of attention heads. (default: 8) + attention_dropout_p (float): The dropout probability of attention module. (default: 0.1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.1) + max_length (int): Max decoding length. (default: 128) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="contextnet_lstm", metadata={"help": "Model name"} + ) + model_size: str = field( + default="medium", metadata={"help": "Model size"} + ) + input_dim: int = field( + default=80, metadata={"help": "Dimension of input vector"} + ) + num_encoder_layers: int = field( + default=5, metadata={"help": "The number of convolution layers"} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + kernel_size: int = field( + default=5, metadata={"help": "Value of convolution kernel size"} + ) + num_channels: int = field( + default=256, metadata={"help": "The number of channels in the convolution filter"} + ) + encoder_dim: int = field( + default=640, metadata={"help": "Dimension of encoder output vector"} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + attention_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of attention module."} + ) + decoder_dropout_p: float = field( + default=0.1, metadata={"help": "The dropout probability of decoder."} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class ContextNetTransducerConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ContextNetTransducer`. + + It is used to initiated an `ContextNetTransducer` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: contextnet_transducer) + model_size (str, optional): Size of the model['small', 'medium', 'large'] (default : 'medium') + input_dim (int, optional): Dimension of input vector (default : 80) + num_encoder_layers (int, optional): The number of convolution layers (default : 5) + num_decoder_layers (int, optional): The number of rnn layers (default : 1) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + num_channels (int, optional): The number of channels in the convolution filter (default: 256) + hidden_dim (int, optional): The number of features in the decoder hidden state (default : 2048) + encoder_dim (int, optional): Dimension of encoder output vector (default: 640) + decoder_output_dim (int, optional): Dimension of decoder output vector (default: 640) + dropout (float, optional): Dropout probability of decoder (default: 0.1) + rnn_type (str, optional): Type of rnn cell (rnn, lstm, gru) (default: lstm) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="contextnet_transducer", metadata={"help": "Model name"} + ) + model_size: str = field( + default="medium", metadata={"help": "Model size"} + ) + input_dim: int = field( + default=80, metadata={"help": "Dimension of input vector"} + ) + num_encoder_layers: int = field( + default=5, metadata={"help": "The number of convolution layers"} + ) + num_decoder_layers: int = field( + default=1, metadata={"help": "The number of rnn layers"} + ) + kernel_size: int = field( + default=5, metadata={"help": "Value of convolution kernel size"} + ) + num_channels: int = field( + default=256, metadata={"help": "The number of channels in the convolution filter"} + ) + decoder_hidden_state_dim: int = field( + default=2048, metadata={"help": "The number of features in the decoder hidden state"} + ) + encoder_dim: int = field( + default=640, metadata={"help": "Dimension of encoder output vector"} + ) + decoder_output_dim: int = field( + default=640, metadata={"help": "Dimension of decoder output vector"} + ) + decoder_dropout_p: float = field( + default=0.1, metadata={"help": "Dropout probability of decoder"} + ) + rnn_type: str = field( + default='lstm', metadata={"help": "Type of rnn cell"} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training"} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/model.py new file mode 100644 index 000000000..2ea7a8778 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/contextnet/model.py @@ -0,0 +1,255 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch import Tensor +from typing import Dict +from collections import OrderedDict + +from openspeech.decoders import RNNTransducerDecoder, LSTMAttentionDecoder +from openspeech.models import register_model, OpenspeechTransducerModel, OpenspeechEncoderDecoderModel +from openspeech.models import OpenspeechCTCModel +from openspeech.encoders import ContextNetEncoder +from openspeech.modules.wrapper import Linear +from openspeech.tokenizers.tokenizer import Tokenizer +from openspeech.models.contextnet.configurations import ( + ContextNetConfigs, + ContextNetTransducerConfigs, + ContextNetLSTMConfigs, +) + + +@register_model('contextnet', dataclass=ContextNetConfigs) +class ContextNetModel(OpenspeechCTCModel): + r""" + Conformer Encoder Only Model. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ContextNetModel, self).__init__(configs, tokenizer) + supported_models = { + 'small': 0.5, + 'medium': 1, + 'large': 2, + } + alpha = supported_models[self.configs.model.model_size] + self.fc = Linear(int(self.configs.model.encoder_dim * alpha), self.num_classes, bias=False) + + def build_model(self): + self.encoder = ContextNetEncoder( + num_classes=self.num_classes, + model_size=self.configs.model.model_size, + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + kernel_size=self.configs.model.kernel_size, + num_channels=self.configs.model.num_channels, + output_dim=self.configs.model.encoder_dim, + joint_ctc_attention=False, + ) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + return super(ContextNetModel, self).forward(inputs, input_lengths) + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='train', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='valid', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + encoder_outputs, encoder_logits, output_lengths = self.encoder(inputs, input_lengths) + logits = self.fc(encoder_outputs).log_softmax(dim=-1) + return self.collect_outputs( + stage='test', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + +@register_model('contextnet_lstm', dataclass=ContextNetLSTMConfigs) +class ContextNetLSTMModel(OpenspeechEncoderDecoderModel): + r""" + ContextNet encoder + LSTM decoder. + + Args: + configs (DictConfig): configuraion set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, + `encoder_outputs`, `encoder_logits`, `encoder_output_lengths`. + """ + + def __init__(self, configs, tokenizer: Tokenizer, ) -> None: + super(ContextNetLSTMModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ContextNetEncoder( + num_classes=self.num_classes, + model_size=self.configs.model.model_size, + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + kernel_size=self.configs.model.kernel_size, + num_channels=self.configs.model.num_channels, + output_dim=self.configs.model.encoder_dim, + joint_ctc_attention=False, + ) + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=self.configs.model.encoder_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('contextnet_transducer', dataclass=ContextNetTransducerConfigs) +class ContextNetTransducerModel(OpenspeechTransducerModel): + r""" + ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context + Paper: https://arxiv.org/abs/2005.03191 + + Args: + configs (DictConfig): configuraion set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer, ) -> None: + super(ContextNetTransducerModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ContextNetEncoder( + num_classes=self.num_classes, + model_size=self.configs.model.model_size, + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + kernel_size=self.configs.model.kernel_size, + num_channels=self.configs.model.num_channels, + output_dim=self.configs.model.encoder_dim, + joint_ctc_attention=False, + ) + self.decoder = RNNTransducerDecoder( + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.decoder_hidden_state_dim, + output_dim=self.configs.model.decoder_output_dim, + num_layers=self.configs.model.num_decoder_layers, + rnn_type=self.configs.model.rnn_type, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + dropout_p=self.configs.model.decoder_dropout_p, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/__init__.py new file mode 100644 index 000000000..8c40ad5d8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/__init__.py @@ -0,0 +1,28 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + DeepSpeech2Configs +) +from .model import ( + DeepSpeech2Model +) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/configurations.py new file mode 100644 index 000000000..e75414bf4 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/configurations.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class DeepSpeech2Configs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.DeepSpeech2`. + + It is used to initiated an `DeepSpeech2` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: deepspeech2) + num_rnn_layers (int): The number of rnn layers. (default: 5) + rnn_hidden_dim (int): The hidden state dimension of rnn. (default: 1024) + dropout_p (float): The dropout probability of model. (default: 0.3) + bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: gru) + activation (str): Type of activation function (default: str) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="deepspeech2", metadata={"help": "Model name"} + ) + rnn_type: str = field( + default="gru", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + num_rnn_layers: int = field( + default=5, metadata={"help": "The number of rnn layers"} + ) + rnn_hidden_dim: int = field( + default=1024, metadata={"help": "Hidden state dimenstion of RNN."} + ) + dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of model."} + ) + bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + activation: str = field( + default="hardtanh", metadata={"help": "Type of activation function"} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/model.py new file mode 100644 index 000000000..3de3c0de0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/deepspeech2/model.py @@ -0,0 +1,116 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import Dict +from torch import Tensor +from collections import OrderedDict + +from openspeech.models import OpenspeechCTCModel, register_model +from openspeech.encoders.deepspeech2 import DeepSpeech2 +from openspeech.models.deepspeech2.configurations import DeepSpeech2Configs +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('deepspeech2', dataclass=DeepSpeech2Configs) +class DeepSpeech2Model(OpenspeechCTCModel): + r""" + Deep Speech2 model with configurable encoders and decoders. + Paper: https://arxiv.org/abs/1512.02595 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(DeepSpeech2Model, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = DeepSpeech2( + input_dim=self.configs.audio.num_mels, + num_classes=self.num_classes, + rnn_type=self.configs.model.rnn_type, + num_rnn_layers=self.configs.model.num_rnn_layers, + rnn_hidden_dim=self.configs.model.rnn_hidden_dim, + dropout_p=self.configs.model.dropout_p, + bidirectional=self.configs.model.bidirectional, + activation=self.configs.model.activation, + ) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + return super(DeepSpeech2Model, self).forward(inputs, input_lengths) + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + return super(DeepSpeech2Model, self).training_step(batch, batch_idx) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + return super(DeepSpeech2Model, self).validation_step(batch, batch_idx) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + return super(DeepSpeech2Model, self).test_step(batch, batch_idx) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/__init__.py new file mode 100644 index 000000000..79ab64924 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/__init__.py @@ -0,0 +1,30 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + Jasper5x3Config, + Jasper10x5Config, +) +from .model import ( + Jasper5x3Model, + Jasper10x5Model, +) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/configurations.py new file mode 100644 index 000000000..8fe2f8663 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/configurations.py @@ -0,0 +1,135 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class Jasper5x3Config(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.Jasper5x3`. + + It is used to initiated an `Jasper5x3` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: jasper5x3) + num_blocks (int): Number of jasper blocks (default: 5) + num_sub_blocks (int): Number of jasper sub blocks (default: 3) + in_channels (str): Output channels of jasper block's convolution + out_channels (str): Output channels of jasper block's convolution + kernel_size (str): Kernel size of jasper block's convolution + dilation (str): Dilation of jasper block's convolution + dropout_p (str): Dropout probability + optimizer (str): Optimizer for training. + """ + model_name: str = field( + default="jasper5x3", metadata={"help": "Model name"} + ) + num_blocks: int = field( + default=5, metadata={"help": "Number of jasper blocks"} + ) + num_sub_blocks: int = field( + default=3, metadata={"help": "Number of jasper sub blocks"} + ) + in_channels: str = field( + default="(None, 256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 896, 1024)", + metadata={"help": "Input channels of jasper blocks"} + ) + out_channels: str = field( + default="(256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 896, 1024, None)", + metadata={"help": "Output channels of jasper block's convolution"} + ) + kernel_size: str = field( + default="(11, 11, 11, 13, 13, 17, 17, 21, 21, 25, 25, 29, 1, 1)", + metadata={"help": "Kernel size of jasper block's convolution"} + ) + dilation: str = field( + default="(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1)", + metadata={"help": "Dilation of jasper block's convolution"} + ) + dropout_p: str = field( + default="(0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4, 0.0)", + metadata={"help": "Dropout probability"} + ) + optimizer: str = field( + default="novograd", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class Jasper10x5Config(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.Jasper10x5`. + + It is used to initiated an `Jasper10x5` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: jasper10x5) + num_blocks (int): Number of jasper blocks (default: 10) + num_sub_blocks (int): Number of jasper sub blocks (default: 5) + in_channels (str): Output channels of jasper block's convolution + out_channels (str): Output channels of jasper block's convolution + kernel_size (str): Kernel size of jasper block's convolution + dilation (str): Dilation of jasper block's convolution + dropout_p (str): Dropout probability + optimizer (str): Optimizer for training. + """ + model_name: str = field( + default="jasper10x5", metadata={"help": "Model name"} + ) + num_blocks: int = field( + default=10, metadata={"help": "Number of jasper blocks"} + ) + num_sub_blocks: int = field( + default=5, metadata={"help": "Number of jasper sub blocks"} + ) + in_channels: str = field( + default="(None, 256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 896, 1024)", + metadata={"help": "Input channels of jasper blocks"} + ) + out_channels: str = field( + default="(256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 768, 896, 1024, None)", + metadata={"help": "Output channels of jasper block's convolution"} + ) + kernel_size: str = field( + default="(11, 11, 11, 13, 13, 17, 17, 21, 21, 25, 25, 29, 1, 1)", + metadata={"help": "Kernel size of jasper block's convolution"} + ) + dilation: str = field( + default="(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1)", + metadata={"help": "Dilation of jasper block's convolution"} + ) + dropout_p: str = field( + default="(0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4, 0.0)", + metadata={"help": "Dropout probability"} + ) + optimizer: str = field( + default="novograd", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/model.py new file mode 100644 index 000000000..ea65b96c9 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/jasper/model.py @@ -0,0 +1,79 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.models import register_model +from openspeech.models import OpenspeechCTCModel +from openspeech.encoders import Jasper +from openspeech.models.jasper.configurations import Jasper5x3Config, Jasper10x5Config +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('jasper5x3', dataclass=Jasper5x3Config) +class Jasper5x3Model(OpenspeechCTCModel): + r""" + Jasper: An End-to-End Convolutional Neural Acoustic Model + Paper: https://arxiv.org/pdf/1904.03288.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(Jasper5x3Model, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = Jasper( + configs=self.configs, + input_dim=self.configs.audio.num_mels, + num_classes=self.num_classes, + ) + + +@register_model('jasper10x5', dataclass=Jasper10x5Config) +class Jasper10x5Model(Jasper5x3Model): + r""" + Jasper: An End-to-End Convolutional Neural Acoustic Model + Paper: https://arxiv.org/pdf/1904.03288.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(Jasper10x5Model, self).__init__(configs, tokenizer) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/__init__.py new file mode 100644 index 000000000..d74c1a04a --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/__init__.py @@ -0,0 +1,36 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + ListenAttendSpellWithLocationAwareConfigs, + ListenAttendSpellWithMultiHeadConfigs, + ListenAttendSpellConfigs, + JointCTCListenAttendSpellConfigs, + DeepCNNWithJointCTCListenAttendSpellConfigs, +) +from .model import ( + ListenAttendSpellWithLocationAwareModel, + ListenAttendSpellWithMultiHeadModel, + ListenAttendSpellModel, + JointCTCListenAttendSpellModel, + DeepCNNWithJointCTCListenAttendSpellModel, +) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/configurations.py new file mode 100644 index 000000000..884d02289 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/configurations.py @@ -0,0 +1,383 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class ListenAttendSpellConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ListenAttendSpell`. + + It is used to initiated an `ListenAttendSpell` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: listen_attend_spell) + num_encoder_layers (int): The number of encoder layers. (default: 3) + num_decoder_layers (int): The number of decoder layers. (default: 2) + hidden_state_dim (int): The hidden state dimension of encoder. (default: 512) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.3) + encoder_bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + joint_ctc_attention (bool): Flag indication joint ctc attention or not (default: False) + max_length (int): Max decoding length. (default: 128) + num_attention_heads (int): The number of attention heads. (default: 1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: dot) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="listen_attend_spell", metadata={"help": "Model name"} + ) + num_encoder_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + hidden_state_dim: int = field( + default=512, metadata={"help": "The hidden state dimension of encoder."} + ) + encoder_dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + encoder_bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + joint_ctc_attention: bool = field( + default=False, metadata={"help": "Flag indication joint ctc attention or not"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + num_attention_heads: int = field( + default=1, metadata={"help": "The number of attention heads."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + decoder_attn_mechanism: str = field( + default="dot", metadata={"help": "The attention mechanism for decoder."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class ListenAttendSpellWithLocationAwareConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ListenAttendSpellWithLocationAware`. + + It is used to initiated an `ListenAttendSpellWithLocationAware` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: listen_attend_spell_with_location_aware) + num_encoder_layers (int): The number of encoder layers. (default: 3) + num_decoder_layers (int): The number of decoder layers. (default: 2) + hidden_state_dim (int): The hidden state dimension of encoder. (default: 512) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.3) + encoder_bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + joint_ctc_attention (bool): Flag indication joint ctc attention or not (default: False) + max_length (int): Max decoding length. (default: 128) + num_attention_heads (int): The number of attention heads. (default: 1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="listen_attend_spell_with_location_aware", metadata={"help": "Model name"} + ) + num_encoder_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + hidden_state_dim: int = field( + default=512, metadata={"help": "The hidden state dimension of encoder."} + ) + encoder_dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + encoder_bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + joint_ctc_attention: bool = field( + default=False, metadata={"help": "Flag indication joint ctc attention or not"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + num_attention_heads: int = field( + default=1, metadata={"help": "The number of attention heads."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class ListenAttendSpellWithMultiHeadConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.ListenAttendSpellWithMultiHead`. + + It is used to initiated an `ListenAttendSpellWithMultiHead` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: listen_attend_spell_with_multi_head) + num_encoder_layers (int): The number of encoder layers. (default: 3) + num_decoder_layers (int): The number of decoder layers. (default: 2) + hidden_state_dim (int): The hidden state dimension of encoder. (default: 512) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.3) + encoder_bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + joint_ctc_attention (bool): Flag indication joint ctc attention or not (default: False) + max_length (int): Max decoding length. (default: 128) + num_attention_heads (int): The number of attention heads. (default: 4) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: multi-head) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="listen_attend_spell_with_multi_head", metadata={"help": "Model name"} + ) + num_encoder_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + hidden_state_dim: int = field( + default=512, metadata={"help": "The hidden state dimension of encoder."} + ) + encoder_dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + encoder_bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + joint_ctc_attention: bool = field( + default=False, metadata={"help": "Flag indication joint ctc attention or not"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + num_attention_heads: int = field( + default=4, metadata={"help": "The number of attention heads."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + decoder_attn_mechanism: str = field( + default="multi-head", metadata={"help": "The attention mechanism for decoder."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class JointCTCListenAttendSpellConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.JointCTCListenAttendSpell`. + + It is used to initiated an `JointCTCListenAttendSpell` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: joint_ctc_listen_attend_spell) + num_encoder_layers (int): The number of encoder layers. (default: 3) + num_decoder_layers (int): The number of decoder layers. (default: 2) + hidden_state_dim (int): The hidden state dimension of encoder. (default: 768) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.3) + encoder_bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + joint_ctc_attention (bool): Flag indication joint ctc attention or not (default: True) + max_length (int): Max decoding length. (default: 128) + num_attention_heads (int): The number of attention heads. (default: 1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="joint_ctc_listen_attend_spell", metadata={"help": "Model name"} + ) + num_encoder_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + hidden_state_dim: int = field( + default=768, metadata={"help": "The hidden state dimension of encoder."} + ) + encoder_dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + encoder_bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + joint_ctc_attention: bool = field( + default=True, metadata={"help": "Flag indication joint ctc attention or not"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + num_attention_heads: int = field( + default=1, metadata={"help": "The number of attention heads."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class DeepCNNWithJointCTCListenAttendSpellConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.DeepCNNWithJointCTCListenAttendSpell`. + + It is used to initiated an `DeepCNNWithJointCTCListenAttendSpell` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: deep_cnn_with_joint_ctc_listen_attend_spell) + num_encoder_layers (int): The number of encoder layers. (default: 3) + num_decoder_layers (int): The number of decoder layers. (default: 2) + hidden_state_dim (int): The hidden state dimension of encoder. (default: 768) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.3) + encoder_bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + extractor (str): The CNN feature extractor. (default: vgg) + activation (str): Type of activation function (default: str) + joint_ctc_attention (bool): Flag indication joint ctc attention or not (default: True) + max_length (int): Max decoding length. (default: 128) + num_attention_heads (int): The number of attention heads. (default: 1) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + decoder_attn_mechanism (str): The attention mechanism for decoder. (default: loc) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="deep_cnn_with_joint_ctc_listen_attend_spell", metadata={"help": "Model name"} + ) + num_encoder_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=2, metadata={"help": "The number of decoder layers."} + ) + hidden_state_dim: int = field( + default=768, metadata={"help": "The hidden state dimension of encoder."} + ) + encoder_dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + encoder_bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + extractor: str = field( + default="vgg", metadata={"help": "The CNN feature extractor."} + ) + activation: str = field( + default="hardtanh", metadata={"help": "Type of activation function"} + ) + joint_ctc_attention: bool = field( + default=True, metadata={"help": "Flag indication joint ctc attention or not"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + num_attention_heads: int = field( + default=1, metadata={"help": "The number of attention heads."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + decoder_attn_mechanism: str = field( + default="loc", metadata={"help": "The attention mechanism for decoder."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/model.py new file mode 100644 index 000000000..9d8b37ff8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/listen_attend_spell/model.py @@ -0,0 +1,324 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.models import register_model, OpenspeechEncoderDecoderModel +from openspeech.decoders import LSTMAttentionDecoder +from openspeech.encoders import LSTMEncoder, ConvolutionalLSTMEncoder +from openspeech.tokenizers.tokenizer import Tokenizer +from openspeech.models.listen_attend_spell.configurations import ( + ListenAttendSpellConfigs, + JointCTCListenAttendSpellConfigs, + ListenAttendSpellWithLocationAwareConfigs, + ListenAttendSpellWithMultiHeadConfigs, + DeepCNNWithJointCTCListenAttendSpellConfigs, +) + + +@register_model('listen_attend_spell', dataclass=ListenAttendSpellConfigs) +class ListenAttendSpellModel(OpenspeechEncoderDecoderModel): + r""" + Listen, Attend and Spell model with configurable encoder and decoder. + Paper: https://arxiv.org/abs/1508.01211 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ListenAttendSpellModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = LSTMEncoder( + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.hidden_state_dim, + dropout_p=self.configs.model.encoder_dropout_p, + bidirectional=self.configs.model.encoder_bidirectional, + rnn_type=self.configs.model.rnn_type, + joint_ctc_attention=self.configs.model.joint_ctc_attention, + ) + decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \ + if self.configs.model.encoder_bidirectional \ + else self.configs.model.hidden_state_dim + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=decoder_hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('listen_attend_spell_with_location_aware', dataclass=ListenAttendSpellWithLocationAwareConfigs) +class ListenAttendSpellWithLocationAwareModel(OpenspeechEncoderDecoderModel): + r""" + Listen, Attend and Spell model with configurable encoder and decoder. + Paper: https://arxiv.org/abs/1508.01211 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ListenAttendSpellWithLocationAwareModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = LSTMEncoder( + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.hidden_state_dim, + dropout_p=self.configs.model.encoder_dropout_p, + bidirectional=self.configs.model.encoder_bidirectional, + rnn_type=self.configs.model.rnn_type, + joint_ctc_attention=self.configs.model.joint_ctc_attention, + ) + decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \ + if self.configs.model.encoder_bidirectional \ + else self.configs.model.hidden_state_dim + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=decoder_hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('listen_attend_spell_with_multi_head', dataclass=ListenAttendSpellWithMultiHeadConfigs) +class ListenAttendSpellWithMultiHeadModel(OpenspeechEncoderDecoderModel): + r""" + Listen, Attend and Spell model with configurable encoder and decoder. + Paper: https://arxiv.org/abs/1508.01211 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(ListenAttendSpellWithMultiHeadModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = LSTMEncoder( + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.hidden_state_dim, + dropout_p=self.configs.model.encoder_dropout_p, + bidirectional=self.configs.model.encoder_bidirectional, + rnn_type=self.configs.model.rnn_type, + joint_ctc_attention=self.configs.model.joint_ctc_attention, + ) + decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \ + if self.configs.model.encoder_bidirectional \ + else self.configs.model.hidden_state_dim + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=decoder_hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('joint_ctc_listen_attend_spell', dataclass=JointCTCListenAttendSpellConfigs) +class JointCTCListenAttendSpellModel(OpenspeechEncoderDecoderModel): + r""" + Joint CTC-Attention Listen, Attend and Spell model with configurable encoder and decoder. + Paper: https://arxiv.org/abs/1609.06773 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(JointCTCListenAttendSpellModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = LSTMEncoder( + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.hidden_state_dim, + dropout_p=self.configs.model.encoder_dropout_p, + bidirectional=self.configs.model.encoder_bidirectional, + rnn_type=self.configs.model.rnn_type, + joint_ctc_attention=self.configs.model.joint_ctc_attention, + ) + decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \ + if self.configs.model.encoder_bidirectional \ + else self.configs.model.hidden_state_dim + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=decoder_hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search.beam_search_lstm import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) + + +@register_model('deep_cnn_with_joint_ctc_listen_attend_spell', dataclass=DeepCNNWithJointCTCListenAttendSpellConfigs) +class DeepCNNWithJointCTCListenAttendSpellModel(OpenspeechEncoderDecoderModel): + r""" + Listen, Attend and Spell model with configurable encoder and decoder. + Paper: https://arxiv.org/abs/1508.01211 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(DeepCNNWithJointCTCListenAttendSpellModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = ConvolutionalLSTMEncoder( + input_dim=self.configs.audio.num_mels, + num_layers=self.configs.model.num_encoder_layers, + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.hidden_state_dim, + dropout_p=self.configs.model.encoder_dropout_p, + bidirectional=self.configs.model.encoder_bidirectional, + rnn_type=self.configs.model.rnn_type, + joint_ctc_attention=self.configs.model.joint_ctc_attention, + ) + decoder_hidden_state_dim = self.configs.model.hidden_state_dim << 1 \ + if self.configs.model.encoder_bidirectional \ + else self.configs.model.hidden_state_dim + self.decoder = LSTMAttentionDecoder( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=decoder_hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + num_heads=self.configs.model.num_attention_heads, + dropout_p=self.configs.model.decoder_dropout_p, + num_layers=self.configs.model.num_decoder_layers, + attn_mechanism=self.configs.model.decoder_attn_mechanism, + rnn_type=self.configs.model.rnn_type, + ) + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchLSTM + self.decoder = BeamSearchLSTM( + decoder=self.decoder, + beam_size=beam_size, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/__init__.py new file mode 100644 index 000000000..045035ba4 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/__init__.py @@ -0,0 +1,28 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + LSTMLanguageModelConfigs +) +from .model import ( + LSTMLanguageModel +) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/configurations.py new file mode 100644 index 000000000..0370862c7 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/configurations.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class LSTMLanguageModelConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.LSTMLanguageModel`. + + It is used to initiated an `LSTMLanguageModel` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: lstm_lm) + num_layers (int): The number of lstm layers. (default: 3) + hidden_state_dim (int): The hidden state dimension of model. (default: 512) + dropout_p (float): The dropout probability of encoder. (default: 0.3) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + max_length (int): Max decoding length. (default: 128) + teacher_forcing_ratio (float): The ratio of teacher forcing. (default: 1.0) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="lstm_lm", metadata={"help": "Model name"} + ) + num_layers: int = field( + default=3, metadata={"help": "The number of encoder layers."} + ) + hidden_state_dim: int = field( + default=512, metadata={"help": "The hidden state dimension of encoder."} + ) + dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + teacher_forcing_ratio: float = field( + default=1.0, metadata={"help": "The ratio of teacher forcing. "} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/model.py new file mode 100644 index 000000000..f1a7fe36f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/lstm_lm/model.py @@ -0,0 +1,62 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.lm.lstm_lm import LSTMForLanguageModel +from openspeech.models import register_model +from openspeech.models.lstm_lm.configurations import LSTMLanguageModelConfigs +from openspeech.models.openspeech_language_model import OpenspeechLanguageModel +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('lstm_lm', dataclass=LSTMLanguageModelConfigs) +class LSTMLanguageModel(OpenspeechLanguageModel): + r""" + LSTM language model. + Paper: http://www-i6.informatik.rwth-aachen.de/publications/download/820/Sundermeyer-2012.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be + a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + + Returns: + outputs (dict): Result of model predictions. + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(LSTMLanguageModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.lm = LSTMForLanguageModel( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + hidden_state_dim=self.configs.model.hidden_state_dim, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + dropout_p=self.configs.model.dropout_p, + num_layers=self.configs.model.num_layers, + rnn_type=self.configs.model.rnn_type, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_ctc_model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_ctc_model.py new file mode 100644 index 000000000..67eb6f365 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_ctc_model.py @@ -0,0 +1,174 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from collections import OrderedDict +from typing import Dict + +from openspeech.models import OpenspeechModel +from openspeech.tokenizers.tokenizer import Tokenizer + + +class OpenspeechCTCModel(OpenspeechModel): + r""" + Base class for OpenSpeech's encoder-only models (ctc-model). + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + ouputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(OpenspeechCTCModel, self).__init__(configs, tokenizer) + self.encoder = None + self.decoder = None + + def set_beam_decoder(self, beam_size: int = 3): + """ Setting beam search decoder """ + from openspeech.search import BeamSearchCTC + self.decoder = BeamSearchCTC( + labels=self.tokenizer.labels, + blank_id=self.tokenizer.blank_id, + beam_size=beam_size, + ) + + def collect_outputs( + self, + stage: str, + logits: torch.FloatTensor, + output_lengths: torch.IntTensor, + targets: torch.IntTensor, + target_lengths: torch.IntTensor, + ) -> OrderedDict: + loss = self.criterion( + log_probs=logits.transpose(0, 1), + targets=targets[:, 1:], + input_lengths=output_lengths, + target_lengths=target_lengths, + ) + predictions = logits.max(-1)[1] + + wer = self.wer_metric(targets[:, 1:], predictions) + cer = self.cer_metric(targets[:, 1:], predictions) + + return OrderedDict({ + "loss": loss, + "wer": wer, + "cer": cer, + }) + + def forward(self, inputs: torch.FloatTensor, input_lengths: torch.IntTensor) -> Dict[str, torch.Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.IntTensor): The length of input tensor. ``(batch)`` + + Returns: + ouputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + outputs = self.encoder(inputs, input_lengths) + + if len(outputs) == 2: + logits, output_lengths = outputs + else: + logits, _, output_lengths = outputs + + if self.decoder is not None: + y_hats = self.decoder(logits) + else: + y_hats = logits.max(-1)[1] + return { + "predictions": y_hats, + "logits": logits, + "output_lengths": output_lengths, + } + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + logits, output_lengths = self.encoder(inputs, input_lengths) + return self.collect_outputs( + stage='train', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + logits, output_lengths = self.encoder(inputs, input_lengths) + return self.collect_outputs( + stage='val', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + logits, output_lengths = self.encoder(inputs, input_lengths) + return self.collect_outputs( + stage='test', + logits=logits, + output_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_encoder_decoder_model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_encoder_decoder_model.py new file mode 100644 index 000000000..7ff4f7bcc --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_encoder_decoder_model.py @@ -0,0 +1,225 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch import Tensor +from collections import OrderedDict +from typing import Dict + +from openspeech.models import OpenspeechModel +from openspeech.utils import get_class_name +from openspeech.tokenizers.tokenizer import Tokenizer + + +class OpenspeechEncoderDecoderModel(OpenspeechModel): + r""" + Base class for OpenSpeech's encoder-decoder models. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `predictions`, `logits`, `encoder_outputs`, + `encoder_logits`, `encoder_output_lengths`. + """ + + def __init__(self, configs, tokenizer: Tokenizer, ) -> None: + super(OpenspeechEncoderDecoderModel, self).__init__(configs, tokenizer) + self.teacher_forcing_ratio = configs.model.teacher_forcing_ratio + self.encoder = None + self.decoder = None + self.criterion = self.configure_criterion(self.configs.criterion.criterion_name) + + def set_beam_decoder(self, beam_size: int = 3): + raise NotImplementedError + + def collect_outputs( + self, + stage: str, + logits: Tensor, + encoder_logits: Tensor, + encoder_output_lengths: Tensor, + targets: Tensor, + target_lengths: Tensor, + ) -> OrderedDict: + cross_entropy_loss, ctc_loss = None, None + + if get_class_name(self.criterion) == "JointCTCCrossEntropyLoss": + loss, ctc_loss, cross_entropy_loss = self.criterion( + encoder_logits=encoder_logits.transpose(0, 1), + logits=logits, + output_lengths=encoder_output_lengths, + targets=targets[:, 1:], + target_lengths=target_lengths, + ) + elif get_class_name(self.criterion) == "LabelSmoothedCrossEntropyLoss" \ + or get_class_name(self.criterion) == "CrossEntropyLoss": + loss = self.criterion(logits, targets[:, 1:]) + else: + raise ValueError(f"Unsupported criterion: {self.criterion}") + + predictions = logits.max(-1)[1] + + wer = self.wer_metric(targets[:, 1:], predictions) + cer = self.cer_metric(targets[:, 1:], predictions) + + return OrderedDict({ + "loss": loss, + "cross_entropy_loss": cross_entropy_loss, + "ctc_loss": ctc_loss, + "wer": wer, + "cer": cer, + "predictions": predictions, + "targets": targets, + "logits": logits + }) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `predictions`, `logits`, `encoder_outputs`, + `encoder_logits`, `encoder_output_lengths`. + """ + logits = None + encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths) + + if get_class_name(self.decoder) in ("BeamSearchLSTM", "BeamSearchTransformer"): + predictions = self.decoder(encoder_outputs, encoder_output_lengths) + else: + logits = self.decoder( + encoder_outputs=encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + teacher_forcing_ratio=0.0, + ) + predictions = logits.max(-1)[1] + return { + "predictions": predictions, + "logits": logits, + "encoder_outputs": encoder_outputs, + "encoder_logits": encoder_logits, + "encoder_output_lengths": encoder_output_lengths, + } + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths) + if get_class_name(self.decoder) == "TransformerDecoder": + logits = self.decoder( + encoder_outputs=encoder_outputs, + targets=targets, + encoder_output_lengths=encoder_output_lengths, + target_lengths=target_lengths, + teacher_forcing_ratio=self.teacher_forcing_ratio, + ) + else: + logits = self.decoder( + encoder_outputs=encoder_outputs, + targets=targets, + encoder_output_lengths=encoder_output_lengths, + teacher_forcing_ratio=self.teacher_forcing_ratio, + ) + + return self.collect_outputs( + stage='train', + logits=logits, + encoder_logits=encoder_logits, + encoder_output_lengths=encoder_output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths) + logits = self.decoder( + encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + teacher_forcing_ratio=0.0, + ) + return self.collect_outputs( + stage='val', + logits=logits, + encoder_logits=encoder_logits, + encoder_output_lengths=encoder_output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths) + logits = self.decoder( + encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + teacher_forcing_ratio=0.0, + ) + return self.collect_outputs( + stage='test', + logits=logits, + encoder_logits=encoder_logits, + encoder_output_lengths=encoder_output_lengths, + targets=targets, + target_lengths=target_lengths, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_language_model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_language_model.py new file mode 100644 index 000000000..b919bc76b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_language_model.py @@ -0,0 +1,168 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from collections import OrderedDict +from typing import Dict + +from openspeech.models import OpenspeechModel +from openspeech.tokenizers.tokenizer import Tokenizer +from openspeech.utils import get_class_name + + +class OpenspeechLanguageModel(OpenspeechModel): + r""" + Base class for OpenSpeech's language models. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `loss`, `logits`, `targets`, `predictions`. + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(OpenspeechLanguageModel, self).__init__(configs, tokenizer) + + def build_model(self): + raise NotImplementedError + + def collect_outputs( + self, + stage: str, + logits: torch.Tensor, + targets: torch.Tensor, + ) -> OrderedDict: + perplexity = self.criterion(logits, targets[:, 1:]) + predictions = logits.max(-1)[1] + + return OrderedDict({ + "loss": perplexity, + "logits": logits, + "targets": targets, + "predictions": predictions, + }) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Dict[str, torch.Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `loss`, `logits`, `targets`, `predictions`. + """ + if get_class_name(self.lm) == 'LSTMLanguageModel': + logits = self.lm(inputs, teacher_forcing_ratio=0.0) + elif get_class_name(self.lm) == 'TransformerLanguageModel': + logits = self.lm(inputs, input_lengths) + else: + raise ValueError(f"Unsupported language model class: {get_class_name(self.lm)}") + + predictions = logits.max(-1)[1] + return { + "predictions": predictions, + "logits": logits, + } + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + train_batch (tuple): A train batch contains `inputs` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, input_lengths, targets = batch + if get_class_name(self.lm) == 'LSTMLanguageModel': + logits = self.lm(inputs, teacher_forcing_ratio=self.teacher_forcing_ratio) + elif get_class_name(self.lm) == 'TransformerLanguageModel': + logits = self.lm(inputs, input_lengths) + else: + raise ValueError(f"Unsupported language model class: {get_class_name(self.lm)}") + + return self.collect_outputs( + stage='train', + logits=logits, + targets=targets, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + train_batch (tuple): A train batch contains `inputs` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, input_lengths, targets = batch + + if get_class_name(self.lm) == 'LSTMLanguageModel': + logits = self.lm(inputs, teacher_forcing_ratio=0.0) + elif get_class_name(self.lm) == 'TransformerLanguageModel': + logits = self.lm(inputs, input_lengths) + else: + raise ValueError(f"Unsupported language model class: {get_class_name(self.lm)}") + + return self.collect_outputs( + stage='val', + logits=logits, + targets=targets, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + train_batch (tuple): A train batch contains `inputs` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, input_lengths, targets = batch + + if get_class_name(self.lm) == 'LSTMLanguageModel': + logits = self.lm(inputs, teacher_forcing_ratio=0.0) + elif get_class_name(self.lm) == 'TransformerLanguageModel': + logits = self.lm(inputs, input_lengths) + else: + raise ValueError(f"Unsupported language model class: {get_class_name(self.lm)}") + + return self.collect_outputs( + stage='test', + logits=logits, + targets=targets, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_model.py new file mode 100644 index 000000000..c6a6ade95 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_model.py @@ -0,0 +1,188 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Dict +from torch import Tensor +from torch.optim import Adam, Adagrad, Adadelta, Adamax, AdamW, SGD, ASGD + +from openspeech.optim import AdamP, RAdam, Novograd +from openspeech.criterion import CRITERION_REGISTRY +from openspeech.metrics import WordErrorRate, CharacterErrorRate +from openspeech.optim.scheduler import SCHEDULER_REGISTRY +from openspeech.tokenizers.tokenizer import Tokenizer + + +class OpenspeechModel(nn.Module): + r""" + Super class of openspeech models. + + Note: + Do not use this class directly, use one of the sub classes. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(OpenspeechModel, self).__init__() + self.configs = configs + self.num_classes = len(tokenizer) + self.gradient_clip_val = configs.trainer.gradient_clip_val + self.tokenizer = tokenizer + self.current_val_loss = 100.0 + self.wer_metric = WordErrorRate(tokenizer) + self.cer_metric = CharacterErrorRate(tokenizer) + self.tokenizer = tokenizer + self.criterion = self.configure_criterion(configs.criterion.criterion_name) + + def build_model(self): + raise NotImplementedError + + def set_beam_decoder(self, beam_size: int = 3): + raise NotImplementedError + + def info(self, dictionary: dict) -> None: + r""" + Logging information from dictionary. + + Args: + dictionary (dict): dictionary contains information. + """ + for key, value in dictionary.items(): + print(key, value) + + def forward(self, inputs: torch.FloatTensor, input_lengths: torch.LongTensor) -> Dict[str, Tensor]: + r""" + Forward propagate a `inputs` and `targets` pair for inference. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + raise NotImplementedError + + def training_step(self, batch: tuple, batch_idx: int): + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + raise NotImplementedError + + def validation_step(self, batch: tuple, batch_idx: int): + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + raise NotImplementedError + + def test_step(self, batch: tuple, batch_idx: int): + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + raise NotImplementedError + + def configure_optimizers(self): + r""" + Choose what optimizers and learning-rate schedulers to use in your optimization. + + + Returns: + - **Dictionary** - The first item has multiple optimizers, and the second has multiple LR schedulers (or multiple ``lr_dict``). + """ + SUPPORTED_OPTIMIZERS = { + "adam": Adam, + "adamp": AdamP, + "radam": RAdam, + "adagrad": Adagrad, + "adadelta": Adadelta, + "adamax": Adamax, + "adamw": AdamW, + "sgd": SGD, + "asgd": ASGD, + "novograd": Novograd, + } + + assert self.configs.model.optimizer in SUPPORTED_OPTIMIZERS.keys(), \ + f"Unsupported Optimizer: {self.configs.model.optimizer}\n" \ + f"Supported Optimizers: {SUPPORTED_OPTIMIZERS.keys()}" + + optimizer = SUPPORTED_OPTIMIZERS[self.configs.model.optimizer]( + self.parameters(), + lr=self.configs.lr_scheduler.lr, + ) + scheduler = SCHEDULER_REGISTRY[self.configs.lr_scheduler.scheduler_name]( + optimizer, self.configs) + + return optimizer, scheduler + + def configure_criterion(self, criterion_name: str) -> nn.Module: + r""" + Configure criterion for training. + + Args: + criterion_name (str): name of criterion + + Returns: + criterion (nn.Module): criterion for training + """ + if criterion_name in ('joint_ctc_cross_entropy', 'label_smoothed_cross_entropy'): + return CRITERION_REGISTRY[criterion_name]( + configs=self.configs, + num_classes=self.num_classes, + tokenizer=self.tokenizer, + ) + else: + return CRITERION_REGISTRY[criterion_name]( + configs=self.configs, + tokenizer=self.tokenizer, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_transducer_model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_transducer_model.py new file mode 100644 index 000000000..0b74c7dc6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/openspeech_transducer_model.py @@ -0,0 +1,287 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import warnings +from torch import Tensor +from collections import OrderedDict +from typing import Tuple, Dict + +from openspeech.models import OpenspeechModel +from openspeech.search import BeamSearchRNNTransducer +from openspeech.modules import Linear +from openspeech.utils import get_class_name +from openspeech.tokenizers.tokenizer import Tokenizer + + +class OpenspeechTransducerModel(OpenspeechModel): + r""" + Base class for OpenSpeech's transducer models. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + dict (dict): Result of model predictions that contains `predictions`, `logits`, `encoder_outputs`, `encoder_output_lengths` + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(OpenspeechTransducerModel, self).__init__(configs, tokenizer) + self.encoder = None + self.decoder = None + self.decode = self.greedy_decode + + if hasattr(self.configs.model, "encoder_dim"): + in_features = self.configs.model.encoder_dim + self.configs.model.decoder_output_dim + elif hasattr(self.configs.model, "output_dim"): + in_features = self.configs.model.output_dim << 1 + else: + raise ValueError("Transducer model must be contain `encoder_dim` or `encoder_hidden_state_dim` config.") + + self.fc = nn.Sequential( + Linear(in_features=in_features, out_features=in_features), + nn.Tanh(), + Linear(in_features=in_features, out_features=self.num_classes), + ) + + def set_beam_decode(self, beam_size: int = 3, expand_beam: float = 2.3, state_beam: float = 4.6): + """ Setting beam search decode """ + self.decode = BeamSearchRNNTransducer( + joint=self.joint, + decoder=self.decoder, + beam_size=beam_size, + expand_beam=expand_beam, + state_beam=state_beam, + blank_id=self.tokenizer.blank_id, + ) + + def collect_outputs( + self, + stage: str, + logits: torch.FloatTensor, + input_lengths: torch.IntTensor, + targets: torch.IntTensor, + target_lengths: torch.IntTensor, + ) -> OrderedDict: + predictions = logits.max(-1)[1] + + loss = self.criterion( + logits=logits, + targets=targets[:, 1:].contiguous().int(), + input_lengths=input_lengths.int(), + target_lengths=target_lengths.int(), + ) + + return OrderedDict({ + "loss": loss, + "predictions": predictions, + "targets": targets, + "logits": logits, + }) + + def _expand_for_joint(self, encoder_outputs: Tensor, decoder_outputs: Tensor) -> Tuple[Tensor, Tensor]: + input_length = encoder_outputs.size(1) + target_length = decoder_outputs.size(1) + + encoder_outputs = encoder_outputs.unsqueeze(2) + decoder_outputs = decoder_outputs.unsqueeze(1) + + encoder_outputs = encoder_outputs.repeat([1, 1, target_length, 1]) + decoder_outputs = decoder_outputs.repeat([1, input_length, 1, 1]) + return encoder_outputs, decoder_outputs + + def joint(self, encoder_outputs: Tensor, decoder_outputs: Tensor) -> Tensor: + r""" + Joint `encoder_outputs` and `decoder_outputs`. + + Args: + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + decoder_outputs (torch.FloatTensor): A output sequence of decoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + + Returns: + outputs (torch.FloatTensor): outputs of joint `encoder_outputs` and `decoder_outputs`.. + """ + if encoder_outputs.dim() == 3 and decoder_outputs.dim() == 3: + encoder_outputs, decoder_outputs = self._expand_for_joint(encoder_outputs, decoder_outputs) + else: + assert encoder_outputs.dim() == decoder_outputs.dim() + + outputs = torch.cat((encoder_outputs, decoder_outputs), dim=-1) + outputs = self.fc(outputs).log_softmax(dim=-1) + + return outputs + + def greedy_decode(self, encoder_outputs: Tensor, max_length: int) -> Tensor: + r""" + Decode `encoder_outputs`. + + Args: + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + outputs = list() + + for encoder_output in encoder_outputs: + pred_tokens = list() + decoder_input = encoder_output.new_zeros(1, 1).fill_(self.decoder.sos_id).long() + decoder_output, hidden_state = self.decoder(decoder_input) + + for t in range(max_length): + step_output = self.joint(encoder_output[t].view(-1), decoder_output.view(-1)) + + pred_token = step_output.argmax(dim=0) + pred_token = int(pred_token.item()) + pred_tokens.append(pred_token) + + decoder_input = torch.LongTensor([[pred_token]]) + if torch.cuda.is_available(): + decoder_input = decoder_input.cuda() + + decoder_output, hidden_state = self.decoder( + decoder_input, hidden_states=hidden_state + ) + + outputs.append(torch.LongTensor(pred_tokens)) + + return torch.stack(outputs, dim=0) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]: + r""" + Decode `encoder_outputs`. + + Args: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + dict (dict): Result of model predictions that contains `predictions`, + `encoder_outputs`, `encoder_output_lengths` + """ + if get_class_name(self.encoder) in ["ConformerEncoder", "ContextNetEncoder"]: + encoder_outputs, _, output_lengths = self.encoder(inputs, input_lengths) + else: + encoder_outputs, output_lengths = self.encoder(inputs, input_lengths) + max_length = encoder_outputs.size(1) + + predictions = self.decode(encoder_outputs, max_length) + return { + "predictions": predictions, + "encoder_outputs": encoder_outputs, + "encoder_output_lengths": output_lengths, + } + + def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for training. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + if get_class_name(self.encoder) in ["ConformerEncoder", "ContextNetEncoder"]: + encoder_outputs, _, output_lengths = self.encoder(inputs, input_lengths) + else: + encoder_outputs, output_lengths = self.encoder(inputs, input_lengths) + + decoder_outputs, _ = self.decoder(targets, target_lengths) + logits = self.joint(encoder_outputs, decoder_outputs) + + return self.collect_outputs( + 'train', + logits=logits, + input_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for validation. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + if get_class_name(self.encoder) in ["ConformerEncoder", "ContextNetEncoder"]: + encoder_outputs, _, output_lengths = self.encoder(inputs, input_lengths) + else: + encoder_outputs, output_lengths = self.encoder(inputs, input_lengths) + + decoder_outputs, _ = self.decoder(targets, target_lengths) + logits = self.joint(encoder_outputs, decoder_outputs) + + return self.collect_outputs( + 'val', + logits=logits, + input_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) + + def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict: + r""" + Forward propagate a `inputs` and `targets` pair for test. + + Inputs: + train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths` + batch_idx (int): The index of batch + + Returns: + loss (torch.Tensor): loss for training + """ + inputs, targets, input_lengths, target_lengths = batch + + if get_class_name(self.encoder) in ["ConformerEncoder", "ContextNetEncoder"]: + encoder_outputs, _, output_lengths = self.encoder(inputs, input_lengths) + else: + encoder_outputs, output_lengths = self.encoder(inputs, input_lengths) + + decoder_outputs, _ = self.decoder(targets, target_lengths) + logits = self.joint(encoder_outputs, decoder_outputs) + + return self.collect_outputs( + 'test', + logits=logits, + input_lengths=output_lengths, + targets=targets, + target_lengths=target_lengths, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/__init__.py new file mode 100644 index 000000000..ca88df541 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/__init__.py @@ -0,0 +1,32 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import ( + QuartzNet15x5Configs, + QuartzNet5x5Configs, + QuartzNet10x5Configs +) +from .model import ( + QuartzNet10x5Model, + QuartzNet15x5Model, + QuartzNet5x5Model +) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/configurations.py new file mode 100644 index 000000000..451193691 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/configurations.py @@ -0,0 +1,193 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class QuartzNet5x5Configs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.QuartzNet5x5`. + + It is used to initiated an `QuartzNet5x5` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: quartznet5x5) + num_blocks (int): Number of quartznet blocks (default: 5) + num_sub_blocks (int): Number of quartznet sub blocks (default: 5) + in_channels (str): Output channels of jasper block's convolution + out_channels (str): Output channels of jasper block's convolution + kernel_size (str): Kernel size of jasper block's convolution + dilation (str): Dilation of jasper block's convolution + dropout_p (str): Dropout probability + optimizer (str): Optimizer for training. + """ + model_name: str = field( + default="quartznet5x5", metadata={"help": "Model name"} + ) + num_blocks: int = field( + default=5, metadata={"help": "Number of quartznet blocks"} + ) + num_sub_blocks: int = field( + default=5, metadata={"help": "Number of quartznet sub blocks"} + ) + in_channels: str = field( + default="(None, 256, 256, 256, 512, 512, 512, 512, 1024)", + metadata={"help": "Input channels of jasper blocks"} + ) + out_channels: str = field( + default="(256, 256, 256, 512, 512, 512, 512, 1024, None)", + metadata={"help": "Output channels of jasper block's convolution"} + ) + kernel_size: str = field( + default="(33, 33, 39, 51, 63, 75, 87, 1, 1)", + metadata={"help": "Kernel size of jasper block's convolution"} + ) + dilation: str = field( + default="(1, 1, 1, 1, 1, 1, 1, 1, 2)", + metadata={"help": "Dilation of jasper block's convolution"} + ) + dropout_p: str = field( + default="(0.2, None, None, None, None, None, 0.2, 0.2, 0.2)", + metadata={"help": "Dropout probability"} + ) + optimizer: str = field( + default="novograd", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class QuartzNet10x5Configs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.QuartzNet10x5`. + + It is used to initiated an `QuartzNet10x5` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: quartznet5x5) + num_blocks (int): Number of quartznet blocks (default: 10) + num_sub_blocks (int): Number of quartznet sub blocks (default: 5) + in_channels (str): Output channels of jasper block's convolution + out_channels (str): Output channels of jasper block's convolution + kernel_size (str): Kernel size of jasper block's convolution + dilation (str): Dilation of jasper block's convolution + dropout_p (str): Dropout probability + optimizer (str): Optimizer for training. + """ + model_name: str = field( + default="quartznet10x5", metadata={"help": "Model name"} + ) + num_blocks: int = field( + default=10, metadata={"help": "Number of quartznet blocks"} + ) + num_sub_blocks: int = field( + default=5, metadata={"help": "Number of quartznet sub blocks"} + ) + in_channels: str = field( + default="(None, 256, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 1024)", + metadata={"help": "Input channels of jasper blocks"} + ) + out_channels: str = field( + default="(256, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 1024, None)", + metadata={"help": "Output channels of jasper block's convolution"} + ) + kernel_size: str = field( + default="(33, 33, 33, 39, 39, 51, 51, 63, 63, 75, 75, 87, 1, 1)", + metadata={"help": "Kernel size of jasper block's convolution"} + ) + dilation: str = field( + default="(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2)", + metadata={"help": "Dilation of jasper block's convolution"} + ) + dropout_p: str = field( + default="(0.2, None, None, None, None, None, None, None, None, None, None, 0.2, 0.2, 0.2)", + metadata={"help": "Dropout probability"} + ) + optimizer: str = field( + default="novograd", metadata={"help": "Optimizer for training."} + ) + + +@dataclass +class QuartzNet15x5Configs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.QuartzNet15x5`. + + It is used to initiated an `QuartzNet15x5` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: quartznet15x5) + num_blocks (int): Number of quartznet blocks (default: 15) + num_sub_blocks (int): Number of quartznet sub blocks (default: 5) + in_channels (str): Output channels of jasper block's convolution + out_channels (str): Output channels of jasper block's convolution + kernel_size (str): Kernel size of jasper block's convolution + dilation (str): Dilation of jasper block's convolution + dropout_p (str): Dropout probability + optimizer (str): Optimizer for training. + """ + model_name: str = field( + default="quartznet15x5", metadata={"help": "Model name"} + ) + num_blocks: int = field( + default=15, metadata={"help": "Number of quartznet5x5 blocks"} + ) + num_sub_blocks: int = field( + default=5, metadata={"help": "Number of quartznet5x5 sub blocks"} + ) + in_channels: str = field( + default="(None, 256, 256, 256, 256, 256, 256, 256, " + "512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024)", + metadata={"help": "Input channels of jasper blocks"} + ) + out_channels: str = field( + default="(256, 256, 256, 256, 256, 256, 256, " + "512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, None)", + metadata={"help": "Output channels of jasper block's convolution"} + ) + kernel_size: str = field( + default="(33, 33, 33, 33, 39, 39, 39, 51, 51, 51, 63, 63, 63, 75, 75, 75, 87, 1, 1)", + metadata={"help": "Kernel size of jasper block's convolution"} + ) + dilation: str = field( + default="(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2)", + metadata={"help": "Dilation of jasper block's convolution"} + ) + dropout_p: str = field( + default="(0.2, None, None, None, None, None, None, None, None, " + "None, None, None, None, None, None, None, 0.2, 0.2, 0.2)", + metadata={"help": "Dropout probability"} + ) + optimizer: str = field( + default="novograd", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/model.py new file mode 100644 index 000000000..b5673366b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/quartznet/model.py @@ -0,0 +1,103 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.models import register_model +from openspeech.models import OpenspeechCTCModel +from openspeech.encoders.quartznet import QuartzNet +from openspeech.tokenizers.tokenizer import Tokenizer +from openspeech.models.quartznet.configurations import ( + QuartzNet5x5Configs, + QuartzNet10x5Configs, + QuartzNet15x5Configs, +) + + +@register_model('quartznet5x5', dataclass=QuartzNet5x5Configs) +class QuartzNet5x5Model(OpenspeechCTCModel): + r""" + QUARTZNET: DEEP AUTOMATIC SPEECH RECOGNITION WITH 1D TIME-CHANNEL SEPARABLE CONVOLUTIONS + Paper: https://arxiv.org/abs/1910.10261.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(QuartzNet5x5Model, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = QuartzNet( + configs=self.configs, + input_dim=self.configs.audio.num_mels, + num_classes=self.num_classes, + ) + + +@register_model('quartznet10x5', dataclass=QuartzNet10x5Configs) +class QuartzNet10x5Model(QuartzNet5x5Model): + r""" + QUARTZNET: DEEP AUTOMATIC SPEECH RECOGNITION WITH 1D TIME-CHANNEL SEPARABLE CONVOLUTIONS + Paper: https://arxiv.org/abs/1910.10261.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(QuartzNet10x5Model, self).__init__(configs, tokenizer) + + +@register_model('quartznet15x5', dataclass=QuartzNet15x5Configs) +class QuartzNet15x5Model(QuartzNet5x5Model): + r""" + QUARTZNET: DEEP AUTOMATIC SPEECH RECOGNITION WITH 1D TIME-CHANNEL SEPARABLE CONVOLUTIONS + Paper: https://arxiv.org/abs/1910.10261.pdf + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokeizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions that contains `y_hats`, `logits`, `output_lengths` + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(QuartzNet15x5Model, self).__init__(configs, tokenizer) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/__init__.py new file mode 100644 index 000000000..20514a8b2 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/__init__.py @@ -0,0 +1,24 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import RNNTransducerConfigs +from .model import RNNTransducerModel \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/configurations.py new file mode 100644 index 000000000..0efb842a4 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/configurations.py @@ -0,0 +1,83 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class RNNTransducerConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.RNNTransducer`. + + It is used to initiated an `RNNTransducer` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: transformer_transducer) + encoder_hidden_state_dim (int): Hidden state dimension of encoder (default: 312) + decoder_hidden_state_dim (int): Hidden state dimension of decoder (default: 512) + num_encoder_layers (int): The number of encoder layers. (default: 4) + num_decoder_layers (int): The number of decoder layers. (default: 1) + encoder_dropout_p (float): The dropout probability of encoder. (default: 0.2) + decoder_dropout_p (float): The dropout probability of decoder. (default: 0.2) + bidirectional (bool): If True, becomes a bidirectional encoders (default: True) + rnn_type (str): Type of rnn cell (rnn, lstm, gru) (default: lstm) + output_dim (int): dimension of model output. (default: 512) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="rnn_transducer", metadata={"help": "Model name"} + ) + encoder_hidden_state_dim: int = field( + default=320, metadata={"help": "Dimension of encoder."} + ) + decoder_hidden_state_dim: int = field( + default=512, metadata={"help": "Dimension of decoder."} + ) + num_encoder_layers: int = field( + default=4, metadata={"help": "The number of encoder layers."} + ) + num_decoder_layers: int = field( + default=1, metadata={"help": "The number of decoder layers."} + ) + encoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of encoder."} + ) + decoder_dropout_p: float = field( + default=0.2, metadata={"help": "The dropout probability of decoder."} + ) + bidirectional: bool = field( + default=True, metadata={"help": "If True, becomes a bidirectional encoders"} + ) + rnn_type: str = field( + default="lstm", metadata={"help": "Type of rnn cell (rnn, lstm, gru)"} + ) + output_dim: int = field( + default=512, metadata={"help": "Dimension of outputs"} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/model.py new file mode 100644 index 000000000..f3aba75b9 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/rnn_transducer/model.py @@ -0,0 +1,75 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.models import register_model +from openspeech.models import OpenspeechTransducerModel +from openspeech.decoders import RNNTransducerDecoder +from openspeech.encoders import RNNTransducerEncoder +from openspeech.models.rnn_transducer.configurations import RNNTransducerConfigs +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('rnn_transducer', dataclass=RNNTransducerConfigs) +class RNNTransducerModel(OpenspeechTransducerModel): + r""" + RNN-Transducer are a form of sequence-to-sequence models that do not employ attention mechanisms. + Unlike most sequence-to-sequence models, which typically need to process the entire input sequence + (the waveform in our case) to produce an output (the sentence), the RNN-T continuously processes input samples and + streams output symbols, a property that is welcome for speech dictation. In our implementation, + the output symbols are the characters of the alphabet. + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(RNNTransducerModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = RNNTransducerEncoder( + input_dim=self.configs.audio.num_mels, + hidden_state_dim=self.configs.model.encoder_hidden_state_dim, + output_dim=self.configs.model.output_dim, + num_layers=self.configs.model.num_encoder_layers, + rnn_type=self.configs.model.rnn_type, + dropout_p=self.configs.model.encoder_dropout_p, + ) + self.decoder = RNNTransducerDecoder( + num_classes=self.num_classes, + hidden_state_dim=self.configs.model.decoder_hidden_state_dim, + output_dim=self.configs.model.output_dim, + num_layers=self.configs.model.num_decoder_layers, + rnn_type=self.configs.model.rnn_type, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + dropout_p=self.configs.model.decoder_dropout_p, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/__init__.py new file mode 100644 index 000000000..f1a4c1fad --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/__init__.py @@ -0,0 +1,24 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import TransformerLanguageModelConfigs +from .model import TransformerLanguageModel diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/configurations.py new file mode 100644 index 000000000..48255d033 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/configurations.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class TransformerLanguageModelConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.TransformerLanguageModel`. + + It is used to initiated an `TransformerLanguageModel` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: transformer_lm) + num_layers (int): The number of lstm layers. (default: 6) + d_model (int): The dimension of model. (default: 768) + dropout_p (float): The dropout probability of encoder. (default: 0.3) + d_ff (int): Dimenstion of feed forward network. (default: 2048) + num_attention_heads (int): The number of attention heads. (default: 8) + max_length (int): Max decoding length. (default: 128) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="transformer_lm", metadata={"help": "Model name"} + ) + num_layers: int = field( + default=6, metadata={"help": "The number of encoder layers."} + ) + d_model: int = field( + default=768, metadata={"help": "The dimension of model."} + ) + d_ff: int = field( + default=1536, metadata={"help": "The dimenstion of feed forward network."} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "The number of attention heads."} + ) + dropout_p: float = field( + default=0.3, metadata={"help": "The dropout probability of encoder."} + ) + max_length: int = field( + default=128, metadata={"help": "Max decoding length."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/model.py new file mode 100644 index 000000000..39938ed84 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_lm/model.py @@ -0,0 +1,62 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from openspeech.lm.transformer_lm import TransformerForLanguageModel +from openspeech.models import register_model, OpenspeechModel +from openspeech.models.transformer_lm.configurations import TransformerLanguageModelConfigs +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('transformer_lm', dataclass=TransformerLanguageModelConfigs) +class TransformerLanguageModel(OpenspeechModel): + r""" + Transformer language model. + Paper: https://arxiv.org/abs/1904.09408 + + Args: + configs (DictConfig): configuration set. + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(TransformerLanguageModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.lm = TransformerForLanguageModel( + num_classes=self.num_classes, + max_length=self.configs.model.max_length, + d_model=self.configs.model.d_model, + d_ff=self.configs.model.d_ff, + num_attention_heads=self.configs.model.num_attention_heads, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + dropout_p=self.configs.model.dropout_p, + num_layers=self.configs.model.num_layers, + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/__init__.py new file mode 100644 index 000000000..dc26f6666 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/__init__.py @@ -0,0 +1,24 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .configurations import TransformerTransducerConfigs +from .model import TransformerTransducerModel diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/configurations.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/configurations.py new file mode 100644 index 000000000..b0796f196 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/configurations.py @@ -0,0 +1,92 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import OpenspeechDataclass + + +@dataclass +class TransformerTransducerConfigs(OpenspeechDataclass): + r""" + This is the configuration class to store the configuration of + a :class:`~openspeech.models.TransformerTransducer`. + + It is used to initiated an `TransformerTransducer` model. + + Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. + + Args: + model_name (str): Model name (default: transformer_transducer) + extractor (str): The CNN feature extractor. (default: conv2d_subsample) + d_model (int): Dimension of model. (default: 512) + d_ff (int): Dimension of feed forward network. (default: 2048) + num_attention_heads (int): The number of attention heads. (default: 8) + num_audio_layers (int): The number of audio layers. (default: 18) + num_label_layers (int): The number of label layers. (default: 2) + audio_dropout_p (float): The dropout probability of encoder. (default: 0.1) + label_dropout_p (float): The dropout probability of decoder. (default: 0.1) + decoder_hidden_state_dim (int): Hidden state dimension of decoder (default: 512) + decoder_output_dim (int): dimension of model output. (default: 512) + conv_kernel_size (int): Kernel size of convolution layer. (default: 31) + max_positional_length (int): Max length of positional encoding. (default: 5000) + optimizer (str): Optimizer for training. (default: adam) + """ + model_name: str = field( + default="transformer_transducer", metadata={"help": "Model name"} + ) + encoder_dim: int = field( + default=512, metadata={"help": "Dimension of encoder name"} + ) + d_ff: int = field( + default=2048, metadata={"help": "Dimension of feed forward network"} + ) + num_audio_layers: int = field( + default=18, metadata={"help": "Number of audio layers"} + ) + num_label_layers: int = field( + default=2, metadata={"help": "Number of label layers"} + ) + num_attention_heads: int = field( + default=8, metadata={"help": "Number of attention heads"} + ) + audio_dropout_p: float = field( + default=0.1, metadata={"help": "Dropout probability of audio layer"} + ) + label_dropout_p: float = field( + default=0.1, metadata={"help": "Dropout probability of label layer"} + ) + decoder_hidden_state_dim: int = field( + default=512, metadata={"help": "Hidden state dimension of decoder"} + ) + decoder_output_dim: int = field( + default=512, metadata={"help": "Dimension of model output."} + ) + conv_kernel_size: int = field( + default=31, metadata={"help": "Kernel size of convolution layer."} + ) + max_positional_length: int = field( + default=5000, metadata={"help": "Max length of positional encoding."} + ) + optimizer: str = field( + default="adam", metadata={"help": "Optimizer for training."} + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/model.py b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/model.py new file mode 100644 index 000000000..95911e237 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/models/transformer_transducer/model.py @@ -0,0 +1,119 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from torch import Tensor +from typing import Dict +from collections import OrderedDict + +from openspeech.models import register_model, OpenspeechTransducerModel +from openspeech.decoders import TransformerTransducerDecoder +from openspeech.encoders import TransformerTransducerEncoder +from openspeech.search import BeamSearchTransformerTransducer +from openspeech.models.transformer_transducer.configurations import TransformerTransducerConfigs +from openspeech.tokenizers.tokenizer import Tokenizer + + +@register_model('transformer_transducer', dataclass=TransformerTransducerConfigs) +class TransformerTransducerModel(OpenspeechTransducerModel): + r""" + Transformer-Transducer is that every layer is identical for both audio and label encoders. + Unlike the basic transformer structure, the audio encoder and label encoder are separate. + So, the alignment is handled by a separate forward-backward process within the RNN-T architecture. + And we replace the LSTM encoders in RNN-T architecture with Transformer encoders. + + Args: + configs (DictConfig): configuraion set + tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. + + Inputs: + inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + input_lengths (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + outputs (dict): Result of model predictions. + """ + + def __init__(self, configs, tokenizer: Tokenizer) -> None: + super(TransformerTransducerModel, self).__init__(configs, tokenizer) + + def build_model(self): + self.encoder = TransformerTransducerEncoder( + input_size=self.configs.audio.num_mels, + model_dim=self.configs.model.encoder_dim, + d_ff=self.configs.model.d_ff, + num_layers=self.configs.model.num_audio_layers, + num_heads=self.configs.model.num_attention_heads, + dropout=self.configs.model.audio_dropout_p, + max_positional_length=self.configs.model.max_positional_length, + ) + self.decoder = TransformerTransducerDecoder( + num_classes=self.num_classes, + model_dim=self.configs.model.encoder_dim, + d_ff=self.configs.model.d_ff, + num_layers=self.configs.model.num_label_layers, + num_heads=self.configs.model.num_attention_heads, + dropout=self.configs.model.label_dropout_p, + max_positional_length=self.configs.model.max_positional_length, + pad_id=self.tokenizer.pad_id, + sos_id=self.tokenizer.sos_id, + eos_id=self.tokenizer.eos_id, + ) + + def set_beam_decode(self, beam_size: int = 3, expand_beam: float = 2.3, state_beam: float = 4.6): + """ Setting beam search decode """ + self.decode = BeamSearchTransformerTransducer( + joint=self.joint, + decoder=self.decoder, + beam_size=beam_size, + expand_beam=expand_beam, + state_beam=state_beam, + blank_id=self.tokenizer.blank_id, + ) + + def greedy_decode(self, encoder_outputs: Tensor, max_length: int) -> Tensor: + r""" + Decode `encoder_outputs`. + + Args: + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size ``(seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + y_hats (torch.IntTensor): model's predictions. + """ + batch = encoder_outputs.size(0) + pred_tokens = list() + + targets = encoder_outputs.new_tensor([self.decoder.sos_id] * batch, dtype=torch.long) + + for i in range(max_length): + decoder_output, _ = self.decoder(targets, None) + decoder_output = decoder_output.squeeze(1) + encoder_output = encoder_outputs[:, i, :] + targets = self.joint(encoder_output, decoder_output) + targets = targets.max(1)[1] + pred_tokens.append(targets) + + pred_tokens = torch.stack(pred_tokens, dim=1) + + return torch.LongTensor(pred_tokens) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/__init__.py new file mode 100644 index 000000000..a8cfa9112 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/__init__.py @@ -0,0 +1,97 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .wrapper import Linear, View, Transpose +from .additive_attention import AdditiveAttention +from .add_normalization import AddNorm +from .batchnorm_relu_rnn import BNReluRNN +from .conformer_attention_module import MultiHeadedSelfAttentionModule +from .conformer_block import ConformerBlock +from .conformer_convolution_module import ConformerConvModule +from .conformer_feed_forward_module import FeedForwardModule +from .conv2d_extractor import Conv2dExtractor +from .conv2d_subsampling import Conv2dSubsampling +from .deepspeech2_extractor import DeepSpeech2Extractor +from .vgg_extractor import VGGExtractor +from .conv_base import BaseConv1d +from .conv_group_shuffle import ConvGroupShuffle +from .depthwise_conv1d import DepthwiseConv1d +from .glu import GLU +from .jasper_subblock import JasperSubBlock +from .jasper_block import JasperBlock +from .location_aware_attention import LocationAwareAttention +from .mask import get_attn_pad_mask, get_attn_subsequent_mask +from .mask_conv1d import MaskConv1d +from .mask_conv2d import MaskConv2d +from .multi_head_attention import MultiHeadAttention +from .pointwise_conv1d import PointwiseConv1d +from .positional_encoding import PositionalEncoding +from .positionwise_feed_forward import PositionwiseFeedForward +from .quartznet_subblock import QuartzNetSubBlock +from .quartznet_block import QuartzNetBlock +from .relative_multi_head_attention import RelativeMultiHeadAttention +from .residual_connection_module import ResidualConnectionModule +from .dot_product_attention import DotProductAttention +from .swish import Swish +from .time_channel_separable_conv1d import TimeChannelSeparableConv1d +from .transformer_embedding import TransformerEmbedding + + +__all__ = [ + "AdditiveAttention", + "AddNorm", + "BNReluRNN", + "MultiHeadAttention", + "ConformerBlock", + "ConformerConvModule", + "FeedForwardModule", + "Conv2dExtractor", + "Conv2dSubsampling", + "DeepSpeech2Extractor", + "VGGExtractor", + "BaseConv1d", + "ConvGroupShuffle", + "DepthwiseConv1d", + "GLU", + "JasperSubBlock", + "JasperBlock", + "LocationAwareAttention", + "get_attn_pad_mask", + "get_attn_subsequent_mask", + "MaskConv1d", + "MaskConv2d", + "MultiHeadAttention", + "PointwiseConv1d", + "PositionalEncoding", + "PositionwiseFeedForward", + "QuartzNetSubBlock", + "QuartzNetBlock", + "RelativeMultiHeadAttention", + "ResidualConnectionModule", + "DotProductAttention", + "Swish", + "TimeChannelSeparableConv1d", + "TransformerEmbedding", + "Linear", + "View", + "Transpose", +] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/add_normalization.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/add_normalization.py new file mode 100644 index 000000000..6d2cd278a --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/add_normalization.py @@ -0,0 +1,44 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn + + +class AddNorm(nn.Module): + """ + Add & Normalization layer proposed in "Attention Is All You Need". + Transformer employ a residual connection around each of the two sub-layers, + (Multi-Head Attention & Feed-Forward) followed by layer normalization. + """ + def __init__(self, sublayer: nn.Module, d_model: int = 512) -> None: + super(AddNorm, self).__init__() + self.sublayer = sublayer + self.layer_norm = nn.LayerNorm(d_model) + + def forward(self, *args): + residual = args[0] + outputs = self.sublayer(*args) + + if isinstance(outputs, tuple): + return self.layer_norm(outputs[0] + residual), outputs[1] + + return self.layer_norm(outputs + residual) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/additive_attention.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/additive_attention.py new file mode 100644 index 000000000..cb08716c8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/additive_attention.py @@ -0,0 +1,63 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from typing import Tuple + +from openspeech.modules.wrapper import Linear + + +class AdditiveAttention(nn.Module): + r""" + Applies a additive attention (bahdanau) mechanism on the output features from the decoders. + Additive attention proposed in "Neural Machine Translation by Jointly Learning to Align and Translate" paper. + + Args: + dim (int): dimension of model + + Inputs: query, key, value + - **query** (batch_size, q_len, hidden_dim): tensor containing the output features from the decoders. + - **key** (batch, k_len, d_model): tensor containing projection vector for encoders. + - **value** (batch_size, v_len, hidden_dim): tensor containing features of the encoded input sequence. + + Returns: context, attn + - **context**: tensor containing the context vector from attention mechanism. + - **attn**: tensor containing the alignment from the encoders outputs. + """ + def __init__(self, dim: int) -> None: + super(AdditiveAttention, self).__init__() + self.query_proj = Linear(dim, dim, bias=False) + self.key_proj = Linear(dim, dim, bias=False) + self.score_proj = Linear(dim, 1) + self.bias = nn.Parameter(torch.rand(dim).uniform_(-0.1, 0.1)) + + def forward(self, query: Tensor, key: Tensor, value: Tensor) -> Tuple[Tensor, Tensor]: + score = self.score_proj(torch.tanh(self.key_proj(key) + self.query_proj(query) + self.bias)).squeeze(-1) + attn = F.softmax(score, dim=-1) + context = torch.bmm(attn.unsqueeze(1), value) + + context += query + + return context, attn diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/batchnorm_relu_rnn.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/batchnorm_relu_rnn.py new file mode 100644 index 000000000..1bdc77a2e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/batchnorm_relu_rnn.py @@ -0,0 +1,84 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + + +class BNReluRNN(nn.Module): + r""" + Recurrent neural network with batch normalization layer & ReLU activation function. + + Args: + input_size (int): size of input + hidden_state_dim (int): the number of features in the hidden state `h` + rnn_type (str, optional): type of RNN cell (default: gru) + bidirectional (bool, optional): if True, becomes a bidirectional encoders (defulat: True) + dropout_p (float, optional): dropout probability (default: 0.1) + + Inputs: inputs, input_lengths + - **inputs** (batch, time, dim): Tensor containing input vectors + - **input_lengths**: Tensor containing containing sequence lengths + + Returns: outputs + - **outputs**: Tensor produced by the BNReluRNN module + """ + supported_rnns = { + 'lstm': nn.LSTM, + 'gru': nn.GRU, + 'rnn': nn.RNN, + } + + def __init__( + self, + input_size: int, + hidden_state_dim: int = 512, + rnn_type: str = 'gru', + bidirectional: bool = True, + dropout_p: float = 0.1, + ): + super(BNReluRNN, self).__init__() + self.hidden_state_dim = hidden_state_dim + self.batch_norm = nn.BatchNorm1d(input_size) + rnn_cell = self.supported_rnns[rnn_type] + self.rnn = rnn_cell( + input_size=input_size, + hidden_size=hidden_state_dim, + num_layers=1, + bias=True, + batch_first=True, + dropout=dropout_p, + bidirectional=bidirectional, + ) + + def forward(self, inputs: Tensor, input_lengths: Tensor): + total_length = inputs.size(0) + + inputs = F.relu(self.batch_norm(inputs.transpose(1, 2))) + inputs = inputs.transpose(1, 2) + + outputs = nn.utils.rnn.pack_padded_sequence(inputs, input_lengths.cpu()) + outputs, hidden_states = self.rnn(outputs) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, total_length=total_length) + + return outputs diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_attention_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_attention_module.py new file mode 100644 index 000000000..975c7f812 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_attention_module.py @@ -0,0 +1,81 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Optional + +from openspeech.modules.relative_multi_head_attention import RelativeMultiHeadAttention +from openspeech.modules.positional_encoding import PositionalEncoding + + +class MultiHeadedSelfAttentionModule(nn.Module): + r""" + Conformer employ multi-headed self-attention (MHSA) while integrating an important technique from Transformer-XL, + the relative sinusoidal positional encoding scheme. The relative positional encoding allows the self-attention + module to generalize better on different input length and the resulting encoders is more robust to the variance of + the utterance length. Conformer use prenorm residual units with dropout which helps training + and regularizing deeper models. + + Args: + d_model (int): The dimension of model + num_heads (int): The number of attention heads. + dropout_p (float): probability of dropout + + Inputs: inputs, mask + - **inputs** (batch, time, dim): Tensor containing input vector + - **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked + + Returns: + - **outputs** (batch, time, dim): Tensor produces by relative multi headed self attention module. + """ + def __init__( + self, + d_model: int, + num_heads: int, + dropout_p: float = 0.1, + ) -> None: + super(MultiHeadedSelfAttentionModule, self).__init__() + self.positional_encoding = PositionalEncoding(d_model) + self.layer_norm = nn.LayerNorm(d_model) + self.attention = RelativeMultiHeadAttention(d_model, num_heads, dropout_p) + self.dropout = nn.Dropout(p=dropout_p) + + def forward(self, inputs: Tensor, mask: Optional[Tensor] = None) -> Tensor: + r""" + Forward propagate of conformer's multi-headed self attention module. + + Inputs: inputs, mask + - **inputs** (batch, time, dim): Tensor containing input vector + - **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked + + Returns: + - **outputs** (batch, time, dim): Tensor produces by relative multi headed self attention module. + """ + batch_size, seq_length, _ = inputs.size() + pos_embedding = self.positional_encoding(seq_length) + pos_embedding = pos_embedding.repeat(batch_size, 1, 1) + + inputs = self.layer_norm(inputs) + outputs = self.attention(inputs, inputs, inputs, pos_embedding=pos_embedding, mask=mask) + + return self.dropout(outputs) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_block.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_block.py new file mode 100644 index 000000000..585f08286 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_block.py @@ -0,0 +1,110 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules.conformer_attention_module import MultiHeadedSelfAttentionModule +from openspeech.modules.conformer_convolution_module import ConformerConvModule +from openspeech.modules.conformer_feed_forward_module import FeedForwardModule +from openspeech.modules.residual_connection_module import ResidualConnectionModule + + +class ConformerBlock(nn.Module): + r""" + Conformer block contains two Feed Forward modules sandwiching the Multi-Headed Self-Attention module + and the Convolution module. This sandwich structure is inspired by Macaron-Net, which proposes replacing + the original feed-forward layer in the Transformer block into two half-step feed-forward layers, + one before the attention layer and one after. + + Args: + encoder_dim (int, optional): Dimension of conformer encoders + num_attention_heads (int, optional): Number of attention heads + feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module + conv_expansion_factor (int, optional): Expansion factor of conformer convolution module + feed_forward_dropout_p (float, optional): Probability of feed forward module dropout + attention_dropout_p (float, optional): Probability of attention module dropout + conv_dropout_p (float, optional): Probability of conformer convolution module dropout + conv_kernel_size (int or tuple, optional): Size of the convolving kernel + half_step_residual (bool): Flag indication whether to use half step residual or not + + Inputs: inputs + - **inputs** (batch, time, dim): Tensor containing input vector + + Returns: outputs + - **outputs** (batch, time, dim): Tensor produces by conformer block. + """ + def __init__( + self, + encoder_dim: int = 512, + num_attention_heads: int = 8, + feed_forward_expansion_factor: int = 4, + conv_expansion_factor: int = 2, + feed_forward_dropout_p: float = 0.1, + attention_dropout_p: float = 0.1, + conv_dropout_p: float = 0.1, + conv_kernel_size: int = 31, + half_step_residual: bool = True, + ) -> None: + super(ConformerBlock, self).__init__() + if half_step_residual: + self.feed_forward_residual_factor = 0.5 + else: + self.feed_forward_residual_factor = 1 + + self.sequential = nn.Sequential( + ResidualConnectionModule( + module=FeedForwardModule( + encoder_dim=encoder_dim, + expansion_factor=feed_forward_expansion_factor, + dropout_p=feed_forward_dropout_p, + ), + module_factor=self.feed_forward_residual_factor, + ), + ResidualConnectionModule( + module=MultiHeadedSelfAttentionModule( + d_model=encoder_dim, + num_heads=num_attention_heads, + dropout_p=attention_dropout_p, + ), + ), + ResidualConnectionModule( + module=ConformerConvModule( + in_channels=encoder_dim, + kernel_size=conv_kernel_size, + expansion_factor=conv_expansion_factor, + dropout_p=conv_dropout_p, + ), + ), + ResidualConnectionModule( + module=FeedForwardModule( + encoder_dim=encoder_dim, + expansion_factor=feed_forward_expansion_factor, + dropout_p=feed_forward_dropout_p, + ), + module_factor=self.feed_forward_residual_factor, + ), + nn.LayerNorm(encoder_dim), + ) + + def forward(self, inputs: Tensor) -> Tensor: + return self.sequential(inputs) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_convolution_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_convolution_module.py new file mode 100644 index 000000000..4adf07fbc --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_convolution_module.py @@ -0,0 +1,83 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules.glu import GLU +from openspeech.modules.swish import Swish +from openspeech.modules.pointwise_conv1d import PointwiseConv1d +from openspeech.modules.depthwise_conv1d import DepthwiseConv1d +from openspeech.modules.wrapper import Transpose + + +class ConformerConvModule(nn.Module): + r""" + Conformer convolution module starts with a pointwise convolution and a gated linear unit (GLU). + This is followed by a single 1-D depthwise convolution layer. Batchnorm is deployed just after the convolution + to aid training deep models. + + Args: + in_channels (int): Number of channels in the input + kernel_size (int or tuple, optional): Size of the convolving kernel Default: 31 + dropout_p (float, optional): probability of dropout + + Inputs: inputs + inputs (batch, time, dim): Tensor contains input sequences + + Outputs: outputs + outputs (batch, time, dim): Tensor produces by conformer convolution module. + """ + def __init__( + self, + in_channels: int, + kernel_size: int = 31, + expansion_factor: int = 2, + dropout_p: float = 0.1, + ) -> None: + super(ConformerConvModule, self).__init__() + assert (kernel_size - 1) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding" + assert expansion_factor == 2, "Currently, Only Supports expansion_factor 2" + + self.sequential = nn.Sequential( + nn.LayerNorm(in_channels), + Transpose(shape=(1, 2)), + PointwiseConv1d(in_channels, in_channels * expansion_factor, stride=1, padding=0, bias=True), + GLU(dim=1), + DepthwiseConv1d(in_channels, in_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2), + nn.BatchNorm1d(in_channels), + Swish(), + PointwiseConv1d(in_channels, in_channels, stride=1, padding=0, bias=True), + nn.Dropout(p=dropout_p), + ) + + def forward(self, inputs: Tensor) -> Tensor: + r""" + Forward propagate of conformer's convolution module. + + Inputs: inputs + inputs (batch, time, dim): Tensor contains input sequences + + Outputs: outputs + outputs (batch, time, dim): Tensor produces by conformer convolution module. + """ + return self.sequential(inputs).transpose(1, 2) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_feed_forward_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_feed_forward_module.py new file mode 100644 index 000000000..d4382fdd6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conformer_feed_forward_module.py @@ -0,0 +1,73 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules.swish import Swish +from openspeech.modules.wrapper import Linear + + +class FeedForwardModule(nn.Module): + r""" + Conformer Feed Forward Module follow pre-norm residual units and apply layer normalization within the residual unit + and on the input before the first linear layer. This module also apply Swish activation and dropout, which helps + regularizing the network. + + Args: + encoder_dim (int): Dimension of conformer encoders + expansion_factor (int): Expansion factor of feed forward module. + dropout_p (float): Ratio of dropout + + Inputs: inputs + - **inputs** (batch, time, dim): Tensor contains input sequences + + Outputs: outputs + - **outputs** (batch, time, dim): Tensor produces by feed forward module. + """ + def __init__( + self, + encoder_dim: int = 512, + expansion_factor: int = 4, + dropout_p: float = 0.1, + ) -> None: + super(FeedForwardModule, self).__init__() + self.sequential = nn.Sequential( + nn.LayerNorm(encoder_dim), + Linear(encoder_dim, encoder_dim * expansion_factor, bias=True), + Swish(), + nn.Dropout(p=dropout_p), + Linear(encoder_dim * expansion_factor, encoder_dim, bias=True), + nn.Dropout(p=dropout_p), + ) + + def forward(self, inputs: Tensor) -> Tensor: + r""" + Forward propagate of conformer's feed-forward module. + + Inputs: inputs + - **inputs** (batch, time, dim): Tensor contains input sequences + + Outputs: outputs + - **outputs** (batch, time, dim): Tensor produces by feed forward module. + """ + return self.sequential(inputs) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_block.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_block.py new file mode 100644 index 000000000..934e03b7d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_block.py @@ -0,0 +1,199 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import Tuple +from torch import Tensor +from openspeech.modules.contextnet_module import ContextNetConvModule, ContextNetSEModule +from openspeech.modules.swish import Swish +import torch.nn as nn + + +class ContextNetBlock(nn.Module): + r""" + Convolution block contains a number of convolutions, each followed by batch normalization and activation. + Squeeze-and-excitation (SE) block operates on the output of the last convolution layer. + Skip connection with projection is applied on the output of the squeeze-and-excitation block. + + Args: + in_channels (int): Input channel in convolutional layer + out_channels (int): Output channel in convolutional layer + num_layers (int, optional): The number of convolutional layers (default : 5) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + stride(int, optional): Value of stride (default : 1) + padding (int, optional): Value of padding (default: 0) + residual (bool, optional): Flag indication residual or not (default : True) + + Inputs: inputs, input_lengths + - **inputs**: Input of convolution block `FloatTensor` of size ``(batch, dimension, seq_length)`` + - **input_lengths**: The length of input tensor. ``(batch)`` + + Returns: output, output_lengths + - **output**: Output of convolution block `FloatTensor` of size + ``(batch, dimension, seq_length)`` + - **output_lengths**: The length of output tensor. ``(batch)`` + """ + def __init__( + self, + in_channels: int, + out_channels: int, + num_layers: int = 5, + kernel_size: int = 5, + stride: int = 1, + padding: int = 0, + residual: bool = True, + ) -> None: + super(ContextNetBlock, self).__init__() + self.num_layers = num_layers + self.swish = Swish() + self.se_layer = ContextNetSEModule(out_channels) + self.residual = None + + if residual: + self.residual = ContextNetConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + activation=False, + ) + + if self.num_layers == 1: + self.conv_layers = ContextNetConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + + else: + stride_list = [1 for _ in range(num_layers - 1)] + [stride] + in_channel_list = [in_channels] + [out_channels for _ in range(num_layers - 1)] + + self.conv_layers = nn.ModuleList(list()) + for in_channels, stride in zip(in_channel_list, stride_list): + self.conv_layers.append( + ContextNetConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + + def forward( + self, + inputs: Tensor, + input_lengths: Tensor, + ) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate a `inputs` for convolution block. + + Args: + **inputs** (torch.FloatTensor): Input of convolution block `FloatTensor` of size + ``(batch, dimension, seq_length)`` + **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + **output** (torch.FloatTensor): Output of convolution block `FloatTensor` of size + ``(batch, dimension, seq_length)`` + **output_lengths** (torch.LongTensor): The length of output tensor. ``(batch)`` + """ + output = inputs + output_lengths = input_lengths + + if self.num_layers == 1: + output, output_lengths = self.conv_layers(output, output_lengths) + else: + for conv_layer in self.conv_layers: + output, output_lengths = conv_layer(output, output_lengths) + + output = self.se_layer(output, output_lengths) + + if self.residual is not None: + residual, _ = self.residual(inputs, input_lengths) + output += residual + + return self.swish(output), output_lengths + + @staticmethod + def make_conv_blocks( + input_dim: int = 80, + num_layers: int = 5, + kernel_size: int = 5, + num_channels: int = 256, + output_dim: int = 640, + ) -> nn.ModuleList: + r""" + Create 23 convolution blocks. + + Args: + input_dim (int, optional): Dimension of input vector (default : 80) + num_layers (int, optional): The number of convolutional layers (default : 5) + kernel_size (int, optional): Value of convolution kernel size (default : 5) + num_channels (int, optional): The number of channels in the convolution filter (default: 256) + output_dim (int, optional): Dimension of encoder output vector (default: 640) + + Returns: + **conv_blocks** (nn.ModuleList): ModuleList with 23 convolution blocks + """ + conv_blocks = nn.ModuleList() + + # C0 : 1 conv layer, init_dim output channels, stride 1, no residual + conv_blocks.append(ContextNetBlock(input_dim, num_channels, 1, kernel_size, 1, 0, False)) + + # C1-2 : 5 conv layers, init_dim output channels, stride 1 + for _ in range(1, 2 + 1): + conv_blocks.append(ContextNetBlock(num_channels, num_channels, num_layers, kernel_size, 1, 0, True)) + + # C3 : 5 conv layer, init_dim output channels, stride 2 + conv_blocks.append(ContextNetBlock(num_channels, num_channels, num_layers, kernel_size, 2, 0, True)) + + # C4-6 : 5 conv layers, init_dim output channels, stride 1 + for _ in range(4, 6 + 1): + conv_blocks.append(ContextNetBlock(num_channels, num_channels, num_layers, kernel_size, 1, 0, True)) + + # C7 : 5 conv layers, init_dim output channels, stride 2 + conv_blocks.append(ContextNetBlock(num_channels, num_channels, num_layers, kernel_size, 2, 0, True)) + + # C8-10 : 5 conv layers, init_dim output channels, stride 1 + for _ in range(8, 10 + 1): + conv_blocks.append(ContextNetBlock(num_channels, num_channels, num_layers, kernel_size, 1, 0, True)) + + # C11-13 : 5 conv layers, middle_dim output channels, stride 1 + conv_blocks.append(ContextNetBlock(num_channels, num_channels << 1, num_layers, kernel_size, 1, 0, True)) + for _ in range(12, 13 + 1): + conv_blocks.append(ContextNetBlock(num_channels << 1, num_channels << 1, num_layers, kernel_size, 1, 0, True)) + + # C14 : 5 conv layers, middle_dim output channels, stride 2 + conv_blocks.append(ContextNetBlock(num_channels << 1, num_channels << 1, num_layers, kernel_size, 2, 0, True)) + + # C15-21 : 5 conv layers, middle_dim output channels, stride 1 + for i in range(15, 21 + 1): + conv_blocks.append(ContextNetBlock(num_channels << 1, num_channels << 1, num_layers, kernel_size, 1, 0, True)) + + # C22 : 1 conv layer, final_dim output channels, stride 1, no residual + conv_blocks.append(ContextNetBlock(num_channels << 1, output_dim, 1, kernel_size, 1, 0, False)) + + return conv_blocks \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_module.py new file mode 100644 index 000000000..0540dbb66 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/contextnet_module.py @@ -0,0 +1,182 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch import Tensor +from typing import Tuple +from openspeech.modules.swish import Swish +import torch.nn as nn + + +class ContextNetSEModule(nn.Module): + r""" + Squeeze-and-excitation module. + + Args: + dim (int): Dimension to be used for two fully connected (FC) layers + + Inputs: inputs, input_lengths + - **inputs**: The output of the last convolution layer. `FloatTensor` of size + ``(batch, dimension, seq_length)`` + - **input_lengths**: The length of input tensor. ``(batch)`` + + Returns: output + - **output**: Output of SELayer `FloatTensor` of size + ``(batch, dimension, seq_length)`` + """ + def __init__(self, dim: int) -> None: + super(ContextNetSEModule, self).__init__() + assert dim % 8 == 0, 'Dimension should be divisible by 8.' + + self.dim = dim + self.sequential = nn.Sequential( + nn.Linear(dim, dim // 8), + Swish(), + nn.Linear(dim // 8, dim), + ) + + def forward( + self, + inputs: Tensor, + input_lengths: Tensor, + ) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate a `inputs` for SE Layer. + + Args: + **inputs** (torch.FloatTensor): The output of the last convolution layer. `FloatTensor` of size + ``(batch, dimension, seq_length)`` + **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + **output** (torch.FloatTensor): Output of SELayer `FloatTensor` of size + ``(batch, dimension, seq_length)`` + """ + residual = inputs + seq_lengths = inputs.size(2) + + inputs = inputs.sum(dim=2) / input_lengths.unsqueeze(1) + output = self.sequential(inputs) + + output = output.sigmoid().unsqueeze(2) + output = output.repeat(1, 1, seq_lengths) + + return output * residual + + +class ContextNetConvModule(nn.Module): + r""" + When the stride is 1, it pads the input so the output has the shape as the input. + And when the stride is 2, it does not pad the input. + + Args: + in_channels (int): Input channel in convolutional layer + out_channels (int): Output channel in convolutional layer + kernel_size (int, optional): Value of convolution kernel size (default : 5) + stride(int, optional): Value of stride (default : 1) + padding (int, optional): Value of padding (default: 0) + activation (bool, optional): Flag indication use activation function or not (default : True) + groups(int, optional): Value of groups (default : 1) + bias (bool, optional): Flag indication use bias or not (default : True) + + Inputs: inputs, input_lengths + - **inputs**: Input of convolution layer `FloatTensor` of size ``(batch, dimension, seq_length)`` + - **input_lengths**: The length of input tensor. ``(batch)`` + + Returns: output, output_lengths + - **output**: Output of convolution layer `FloatTensor` of size + ``(batch, dimension, seq_length)`` + - **output_lengths**: The length of output tensor. ``(batch)`` + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 5, + stride: int = 1, + padding: int = 0, + activation: bool = True, + groups: int = 1, + bias: bool = True, + ): + super(ContextNetConvModule, self).__init__() + assert kernel_size == 5, "The convolution layer in the ContextNet model has 5 kernels." + + if stride == 1: + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + dilation=1, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=bias, + ) + elif stride == 2: + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + dilation=1, + padding=padding, + groups=groups, + bias=bias, + ) + + self.batch_norm = nn.BatchNorm1d(num_features=out_channels) + self.activation = activation + + if self.activation: + self.swish = Swish() + + def forward( + self, + inputs: Tensor, + input_lengths: Tensor, + ) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate a `inputs` for convolution layer. + + Args: + **inputs** (torch.FloatTensor): Input of convolution layer `FloatTensor` of size + ``(batch, dimension, seq_length)`` + **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + **output** (torch.FloatTensor): Output of convolution layer `FloatTensor` of size + ``(batch, dimension, seq_length)`` + **output_lengths** (torch.LongTensor): The length of output tensor. ``(batch)`` + """ + outputs, output_lengths = self.conv(inputs), self._get_sequence_lengths(input_lengths) + outputs = self.batch_norm(outputs) + + if self.activation: + outputs = self.swish(outputs) + + return outputs, output_lengths + + def _get_sequence_lengths(self, seq_lengths): + return ( + (seq_lengths + 2 * self.conv.padding[0] + - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1) // self.conv.stride[0] + 1 + ) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_extractor.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_extractor.py new file mode 100644 index 000000000..fa604998c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_extractor.py @@ -0,0 +1,107 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch +import torch.nn as nn +from torch import Tensor +from typing import Tuple + +from openspeech.modules.swish import Swish +from openspeech.utils import get_class_name + + +class Conv2dExtractor(nn.Module): + r""" + Provides inteface of convolutional extractor. + + Note: + Do not use this class directly, use one of the sub classes. + Define the 'self.conv' class variable. + + Inputs: inputs, input_lengths + - **inputs** (batch, time, dim): Tensor containing input vectors + - **input_lengths**: Tensor containing containing sequence lengths + + Returns: outputs, output_lengths + - **outputs**: Tensor produced by the convolution + - **output_lengths**: Tensor containing sequence lengths produced by the convolution + """ + supported_activations = { + 'hardtanh': nn.Hardtanh(0, 20, inplace=True), + 'relu': nn.ReLU(inplace=True), + 'elu': nn.ELU(inplace=True), + 'leaky_relu': nn.LeakyReLU(inplace=True), + 'gelu': nn.GELU(), + 'swish': Swish(), + } + + def __init__(self, input_dim: int, activation: str = 'hardtanh') -> None: + super(Conv2dExtractor, self).__init__() + self.input_dim = input_dim + self.activation = Conv2dExtractor.supported_activations[activation] + self.conv = None + + def get_output_lengths(self, seq_lengths: torch.Tensor): + assert self.conv is not None, "self.conv should be defined" + + for module in self.conv: + if isinstance(module, nn.Conv2d): + numerator = seq_lengths + 2 * module.padding[1] - module.dilation[1] * (module.kernel_size[1] - 1) - 1 + seq_lengths = numerator.float() / float(module.stride[1]) + seq_lengths = seq_lengths.int() + 1 + + elif isinstance(module, nn.MaxPool2d): + seq_lengths >>= 1 + + return seq_lengths.int() + + def get_output_dim(self): + if get_class_name(self) == "VGGExtractor": + output_dim = (self.input_dim - 1) << 5 if self.input_dim % 2 else self.input_dim << 5 + + elif get_class_name(self) == "DeepSpeech2Extractor": + output_dim = int(math.floor(self.input_dim + 2 * 20 - 41) / 2 + 1) + output_dim = int(math.floor(output_dim + 2 * 10 - 21) / 2 + 1) + output_dim <<= 5 + + elif get_class_name(self) == "Conv2dSubsampling": + factor = ((self.input_dim - 1) // 2 - 1) // 2 + output_dim = self.out_channels * factor + + else: + raise ValueError(f"Unsupported Extractor : {self.extractor}") + + return output_dim + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]: + r""" + inputs: torch.FloatTensor (batch, time, dimension) + input_lengths: torch.IntTensor (batch) + """ + outputs, output_lengths = self.conv(inputs.unsqueeze(1).transpose(2, 3), input_lengths) + + batch_size, channels, dimension, seq_lengths = outputs.size() + outputs = outputs.permute(0, 3, 1, 2) + outputs = outputs.view(batch_size, seq_lengths, channels * dimension) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_subsampling.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_subsampling.py new file mode 100644 index 000000000..b5eac267f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv2d_subsampling.py @@ -0,0 +1,71 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.modules import Conv2dExtractor + + +class Conv2dSubsampling(Conv2dExtractor): + r""" + Convolutional 2D subsampling (to 1/4 length) + + Args: + input_dim (int): Dimension of input vector + in_channels (int): Number of channels in the input vector + out_channels (int): Number of channels produced by the convolution + activation (str): Activation function + + Inputs: inputs + - **inputs** (batch, time, dim): Tensor containing sequence of inputs + - **input_lengths** (batch): list of sequence input lengths + + Returns: outputs, output_lengths + - **outputs** (batch, time, dim): Tensor produced by the convolution + - **output_lengths** (batch): list of sequence output lengths + """ + def __init__( + self, + input_dim: int, + in_channels: int, + out_channels: int, + activation: str = 'relu', + ) -> None: + super(Conv2dSubsampling, self).__init__(input_dim, activation) + self.in_channels = in_channels + self.out_channels = out_channels + from openspeech.modules import MaskConv2d + self.conv = MaskConv2d( + nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2), + self.activation, + nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2), + self.activation, + ) + ) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + outputs, output_lengths = super().forward(inputs, input_lengths) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_base.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_base.py new file mode 100644 index 000000000..2747e97d6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_base.py @@ -0,0 +1,38 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn + + +class BaseConv1d(nn.Module): + """ Base convolution module. """ + def __init__(self): + super(BaseConv1d, self).__init__() + + def _get_sequence_lengths(self, seq_lengths): + return ( + (seq_lengths + 2 * self.conv.padding[0] + - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1) // self.conv.stride[0] + 1 + ) + + def forward(self, *args, **kwargs): + raise NotImplementedError diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_group_shuffle.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_group_shuffle.py new file mode 100644 index 000000000..704bd6c9e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/conv_group_shuffle.py @@ -0,0 +1,42 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from torch import Tensor + + +class ConvGroupShuffle(nn.Module): + """ Convolution group shuffle module. """ + def __init__(self, groups, channels): + super(ConvGroupShuffle, self).__init__() + self.groups = groups + self.channels_per_group = channels // groups + + def forward(self, x: Tensor): + dim = x.size(-1) + + x = x.view(-1, self.groups, self.channels_per_group, dim) + x = torch.transpose(x, 1, 2).contiguous() + y = x.view(-1, self.groups * self.channels_per_group, dim) + + return y diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/deepspeech2_extractor.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/deepspeech2_extractor.py new file mode 100644 index 000000000..2fcbba414 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/deepspeech2_extractor.py @@ -0,0 +1,73 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.modules import Conv2dExtractor + + +class DeepSpeech2Extractor(Conv2dExtractor): + r""" + DeepSpeech2 extractor for automatic speech recognition described in + "Deep Speech 2: End-to-End Speech Recognition in English and Mandarin" paper + - https://arxiv.org/abs/1512.02595 + + Args: + input_dim (int): Dimension of input vector + in_channels (int): Number of channels in the input vector + out_channels (int): Number of channels produced by the convolution + activation (str): Activation function + + Inputs: inputs, input_lengths + - **inputs** (batch, time, dim): Tensor containing input vectors + - **input_lengths**: Tensor containing containing sequence lengths + + Returns: outputs, output_lengths + - **outputs**: Tensor produced by the convolution + - **output_lengths**: Tensor containing sequence lengths produced by the convolution + """ + def __init__( + self, + input_dim: int, + in_channels: int = 1, + out_channels: int = 32, + activation: str = 'hardtanh', + ) -> None: + super(DeepSpeech2Extractor, self).__init__(input_dim=input_dim, activation=activation) + self.in_channels = in_channels + self.out_channels = out_channels + from openspeech.modules import MaskConv2d + self.conv = MaskConv2d( + nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=(41, 11), stride=(2, 2), padding=(20, 5), bias=False), + nn.BatchNorm2d(out_channels), + self.activation, + nn.Conv2d(out_channels, out_channels, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5), bias=False), + nn.BatchNorm2d(out_channels), + self.activation, + ) + ) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + return super().forward(inputs, input_lengths) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/depthwise_conv1d.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/depthwise_conv1d.py new file mode 100644 index 000000000..37c0651c8 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/depthwise_conv1d.py @@ -0,0 +1,74 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Optional + +from openspeech.modules.conv_base import BaseConv1d + + +class DepthwiseConv1d(BaseConv1d): + r""" + When groups == in_channels and out_channels == K * in_channels, where K is a positive integer, + this operation is termed in literature as depthwise convolution. + + Args: + in_channels (int): Number of channels in the input + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + bias (bool, optional): If True, adds a learnable bias to the output. Default: True + + Inputs: inputs + - **inputs** (batch, in_channels, time): Tensor containing input vector + + Returns: outputs + - **outputs** (batch, out_channels, time): Tensor produces by depthwise 1-D convolution. + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + bias: bool = False, + ) -> None: + super(DepthwiseConv1d, self).__init__() + assert out_channels % in_channels == 0, "out_channels should be constant multiple of in_channels" + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + groups=in_channels, + stride=stride, + padding=padding, + bias=bias, + ) + + def forward(self, inputs: Tensor, input_lengths: Optional[Tensor] = None) -> Tensor: + if input_lengths is None: + return self.conv(inputs) + else: + return self.conv(inputs), self._get_sequence_lengths(input_lengths) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/dot_product_attention.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/dot_product_attention.py new file mode 100644 index 000000000..831d209f0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/dot_product_attention.py @@ -0,0 +1,80 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from torch import Tensor +from typing import Tuple, Optional + + +class DotProductAttention(nn.Module): + r""" + Scaled Dot-Product Attention proposed in "Attention Is All You Need" + Compute the dot products of the query with all keys, divide each by sqrt(dim), + and apply a softmax function to obtain the weights on the values + + Args: dim, mask + dim (int): dimension of attention + mask (torch.Tensor): tensor containing indices to be masked + + Inputs: query, key, value, mask + - **query** (batch, q_len, d_model): tensor containing projection vector for decoders. + - **key** (batch, k_len, d_model): tensor containing projection vector for encoders. + - **value** (batch, v_len, d_model): tensor containing features of the encoded input sequence. + - **mask** (-): tensor containing indices to be masked + + Returns: context, attn + - **context**: tensor containing the context vector from attention mechanism. + - **attn**: tensor containing the attention (alignment) from the encoders outputs. + """ + def __init__(self, dim: int, scale: bool = True) -> None: + super(DotProductAttention, self).__init__() + if scale: + self.sqrt_dim = np.sqrt(dim) + else: + self.sqrt_dim = 1 + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Tensor]: + if len(query.size()) == 3: + score = torch.bmm(query, key.transpose(1, 2)) / self.sqrt_dim + else: + score = torch.matmul(query, key.transpose(2, 3)) / self.sqrt_dim + + if mask is not None: + score.masked_fill_(mask, -1e4) + + attn = F.softmax(score, -1) + + if len(query.size()) == 3: + context = torch.bmm(attn, value) + else: + context = torch.matmul(attn, value) + + return context, attn diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/glu.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/glu.py new file mode 100644 index 000000000..3e670d6f2 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/glu.py @@ -0,0 +1,38 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + + +class GLU(nn.Module): + r""" + The gating mechanism is called Gated Linear Units (GLU), which was first introduced for natural language processing + in the paper “Language Modeling with Gated Convolutional Networks” + """ + def __init__(self, dim: int) -> None: + super(GLU, self).__init__() + self.dim = dim + + def forward(self, inputs: Tensor) -> Tensor: + outputs, gate = inputs.chunk(2, dim=self.dim) + return outputs * gate.sigmoid() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_block.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_block.py new file mode 100644 index 000000000..26fc7fedf --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_block.py @@ -0,0 +1,109 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Tuple + +from openspeech.modules.jasper_subblock import JasperSubBlock + + +class JasperBlock(nn.Module): + r""" + Jasper Block: The Jasper Block consists of R Jasper sub-block. + + Args: + num_sub_blocks (int): number of sub block + in_channels (int): number of channels in the input feature + out_channels (int): number of channels produced by the convolution + kernel_size (int): size of the convolving kernel + stride (int): stride of the convolution. (default: 1) + dilation (int): spacing between kernel elements. (default: 1) + bias (bool): if True, adds a learnable bias to the output. (default: True) + dropout_p (float): probability of dropout + activation (str): activation function + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + - **residual**: tensor contains residual vector + + Returns: output, output_lengths + (torch.FloatTensor, torch.LongTensor) + + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + def __init__( + self, + num_sub_blocks: int, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + bias: bool = True, + dropout_p: float = 0.2, + activation: str = 'relu', + ) -> None: + super(JasperBlock, self).__init__() + padding = self._get_same_padding(kernel_size, stride, dilation) + self.layers = nn.ModuleList([ + JasperSubBlock( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding, + bias=bias, + dropout_p=dropout_p, + activation=activation, + ) for i in range(num_sub_blocks) + ]) + + def _get_same_padding(self, kernel_size: int, stride: int, dilation: int): + if stride > 1 and dilation > 1: + raise ValueError("Only stride OR dilation may be greater than 1") + return (kernel_size // 2) * dilation + + def forward(self, inputs: Tensor, input_lengths: Tensor, residual: Tensor) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate of jasper block. + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + - **residual**: tensor contains residual vector + + Returns: output, output_lengths + (torch.FloatTensor, torch.LongTensor) + + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + for layer in self.layers[:-1]: + inputs, input_lengths = layer(inputs, input_lengths) + + outputs, output_lengths = self.layers[-1](inputs, input_lengths, residual) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_subblock.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_subblock.py new file mode 100644 index 000000000..ae551d609 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/jasper_subblock.py @@ -0,0 +1,115 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from typing import Tuple, Optional +from torch import Tensor + +from openspeech.modules.mask_conv1d import MaskConv1d + + +class JasperSubBlock(nn.Module): + r""" + Jasper sub-block applies the following operations: a 1D-convolution, batch norm, ReLU, and dropout. + + Args: + in_channels (int): number of channels in the input feature + out_channels (int): number of channels produced by the convolution + kernel_size (int): size of the convolving kernel + stride (int): stride of the convolution. (default: 1) + dilation (int): spacing between kernel elements. (default: 1) + padding (int): zero-padding added to both sides of the input. (default: 0) + bias (bool): if True, adds a learnable bias to the output. (default: False) + dropout_p (float): probability of dropout + activation (str): activation function + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + - **residual**: tensor contains residual vector + + Returns: output, output_lengths + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + supported_activations = { + 'hardtanh': nn.Hardtanh(0, 20, inplace=True), + 'relu': nn.ReLU(inplace=True), + 'elu': nn.ELU(inplace=True), + 'leaky_relu': nn.LeakyReLU(inplace=True), + 'gelu': nn.GELU(), + } + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + padding: int = 0, + bias: bool = False, + dropout_p: float = 0.2, + activation: str = 'relu', + ) -> None: + super(JasperSubBlock, self).__init__() + + self.conv = MaskConv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias, + dilation=dilation, + ) + self.batch_norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1) + self.activation = self.supported_activations[activation] + self.dropout = nn.Dropout(p=dropout_p) + + def forward( + self, + inputs: Tensor, + input_lengths: Tensor, + residual: Optional[Tensor] = None, + ) -> Tuple[Tensor, Tensor]: + r""" + Forward propagate of conformer's subblock. + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + - **residual**: tensor contains residual vector + + Returns: output, output_lengths + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + outputs, output_lengths = self.conv(inputs, input_lengths) + outputs = self.batch_norm(outputs) + + if residual is not None: + outputs += residual + + outputs = self.dropout(self.activation(outputs)) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/location_aware_attention.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/location_aware_attention.py new file mode 100644 index 000000000..dffd00cfe --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/location_aware_attention.py @@ -0,0 +1,92 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from typing import Tuple + +from openspeech.modules.wrapper import Linear + + +class LocationAwareAttention(nn.Module): + r""" + Applies a location-aware attention mechanism on the output features from the decoders. + Location-aware attention proposed in "Attention-Based Models for Speech Recognition" paper. + The location-aware attention mechanism is performing well in speech recognition tasks. + We refer to implementation of ClovaCall Attention style. + + Args: + dim (int): dimension of model + attn_dim (int): dimension of attention + smoothing (bool): flag indication whether to use smoothing or not. + + Inputs: query, value, last_attn + - **query** (batch, q_len, hidden_dim): tensor containing the output features from the decoders. + - **value** (batch, v_len, hidden_dim): tensor containing features of the encoded input sequence. + - **last_attn** (batch_size, v_len): tensor containing previous timestep`s attention (alignment) + + Returns: output, attn + - **output** (batch, output_len, dimensions): tensor containing the feature from encoders outputs + - **attn** (batch * num_heads, v_len): tensor containing the attention (alignment) from the encoders outputs. + + Reference: + Jan Chorowski et al.: Attention-Based Models for Speech Recognition. + https://arxiv.org/abs/1506.07503 + """ + def __init__(self, dim: int = 1024, attn_dim: int = 1024, smoothing: bool = False) -> None: + super(LocationAwareAttention, self).__init__() + self.location_conv = nn.Conv1d(in_channels=1, out_channels=attn_dim, kernel_size=3, padding=1) + self.query_proj = Linear(dim, attn_dim, bias=False) + self.value_proj = Linear(dim, attn_dim, bias=False) + self.bias = nn.Parameter(torch.rand(attn_dim).uniform_(-0.1, 0.1)) + self.fc = Linear(attn_dim, 1, bias=True) + self.smoothing = smoothing + + def forward(self, query: Tensor, value: Tensor, last_alignment_energy: Tensor) -> Tuple[Tensor, Tensor]: + batch_size, hidden_dim, seq_length = query.size(0), query.size(2), value.size(1) + + if last_alignment_energy is None: + last_alignment_energy = value.new_zeros(batch_size, seq_length) + + last_alignment_energy = self.location_conv(last_alignment_energy.unsqueeze(dim=1)) + last_alignment_energy = last_alignment_energy.transpose(1, 2) + + alignmment_energy = self.fc(torch.tanh( + self.query_proj(query) + + self.value_proj(value) + + last_alignment_energy + + self.bias + )).squeeze(dim=-1) + + if self.smoothing: + alignmment_energy = torch.sigmoid(alignmment_energy) + alignmment_energy = torch.div(alignmment_energy, alignmment_energy.sum(dim=-1).unsqueeze(dim=-1)) + + else: + alignmment_energy = F.softmax(alignmment_energy, dim=-1) + + context = torch.bmm(alignmment_energy.unsqueeze(dim=1), value) + + return context, alignmment_energy + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask.py new file mode 100644 index 000000000..00e1268fa --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from torch import Tensor + + +def get_attn_pad_mask(inputs, input_lengths, expand_length): + """ mask position is set to 1 """ + + def get_transformer_non_pad_mask(inputs: Tensor, input_lengths: Tensor) -> Tensor: + """ Padding position is set to 0, either use input_lengths or pad_id """ + batch_size = inputs.size(0) + + if len(inputs.size()) == 2: + non_pad_mask = inputs.new_ones(inputs.size()) # B x T + elif len(inputs.size()) == 3: + non_pad_mask = inputs.new_ones(inputs.size()[:-1]) # B x T + else: + raise ValueError(f"Unsupported input shape {inputs.size()}") + + for i in range(batch_size): + non_pad_mask[i, input_lengths[i]:] = 0 + + return non_pad_mask + + non_pad_mask = get_transformer_non_pad_mask(inputs, input_lengths) + pad_mask = non_pad_mask.lt(1) + attn_pad_mask = pad_mask.unsqueeze(1).expand(-1, expand_length, -1) + return attn_pad_mask + + +def get_attn_subsequent_mask(seq): + assert seq.dim() == 2 + attn_shape = [seq.size(0), seq.size(1), seq.size(1)] + subsequent_mask = torch.triu(torch.ones(attn_shape), diagonal=1) + + if seq.is_cuda: + subsequent_mask = subsequent_mask.cuda() + + return subsequent_mask diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv1d.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv1d.py new file mode 100644 index 000000000..dad6c35fa --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv1d.py @@ -0,0 +1,89 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from torch import Tensor +from typing import Tuple + + +class MaskConv1d(nn.Conv1d): + r""" + 1D convolution with masking + + Args: + in_channels (int): Number of channels in the input vector + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int): Stride of the convolution. Default: 1 + padding (int): Zero-padding added to both sides of the input. Default: 0 + dilation (int): Spacing between kernel elements. Default: 1 + groups (int): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool): If True, adds a learnable bias to the output. Default: True + + Inputs: inputs, seq_lengths + - **inputs** (torch.FloatTensor): The input of size (batch, dimension, time) + - **seq_lengths** (torch.IntTensor): The actual length of each sequence in the batch + + Returns: output, seq_lengths + - **output**: Masked output from the conv1d + - **seq_lengths**: Sequence length of output from the conv1d + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + dilation: int = 1, + groups: int = 1, + bias: bool = False, + ) -> None: + super(MaskConv1d, self).__init__(in_channels=in_channels, out_channels=out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, groups=groups, bias=bias) + + def _get_sequence_lengths(self, seq_lengths): + return ( + (seq_lengths + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1 + ) + + def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]: + r""" + inputs: (batch, dimension, time) + input_lengths: (batch) + """ + max_length = inputs.size(2) + + indices = torch.arange(max_length).to(input_lengths.dtype).to(input_lengths.device) + indices = indices.expand(len(input_lengths), max_length) + + mask = indices >= input_lengths.unsqueeze(1) + inputs = inputs.masked_fill(mask.unsqueeze(1).to(device=inputs.device), 0) + + output_lengths = self._get_sequence_lengths(input_lengths) + output = super(MaskConv1d, self).forward(inputs) + + del mask, indices + + return output, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv2d.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv2d.py new file mode 100644 index 000000000..6bbdad6c6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/mask_conv2d.py @@ -0,0 +1,98 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from torch import Tensor +from typing import Tuple + + +class MaskConv2d(nn.Module): + r""" + Masking Convolutional Neural Network + + Adds padding to the output of the module based on the given lengths. + This is to ensure that the results of the model do not change when batch sizes change during inference. + Input needs to be in the shape of (batch_size, channel, hidden_dim, seq_len) + + Refer to https://github.com/SeanNaren/deepspeech.pytorch/blob/master/model.py + Copyright (c) 2017 Sean Naren + MIT License + + Args: + sequential (torch.nn): sequential list of convolution layer + + Inputs: inputs, seq_lengths + - **inputs** (torch.FloatTensor): The input of size BxCxHxT + - **seq_lengths** (torch.IntTensor): The actual length of each sequence in the batch + + Returns: output, seq_lengths + - **output**: Masked output from the sequential + - **seq_lengths**: Sequence length of output from the sequential + """ + def __init__(self, sequential: nn.Sequential) -> None: + super(MaskConv2d, self).__init__() + self.sequential = sequential + + def forward(self, inputs: Tensor, seq_lengths: Tensor) -> Tuple[Tensor, Tensor]: + output = None + + for module in self.sequential: + output = module(inputs) + mask = torch.BoolTensor(output.size()).fill_(0) + + if output.is_cuda: + mask = mask.cuda() + + seq_lengths = self._get_sequence_lengths(module, seq_lengths) + + for idx, length in enumerate(seq_lengths): + length = length.item() + + if (mask[idx].size(2) - length) > 0: + mask[idx].narrow(dim=2, start=length, length=mask[idx].size(2) - length).fill_(1) + + output = output.masked_fill(mask, 0) + inputs = output + + return output, seq_lengths + + def _get_sequence_lengths(self, module: nn.Module, seq_lengths: Tensor) -> Tensor: + r""" + Calculate convolutional neural network receptive formula + + Args: + module (torch.nn.Module): module of CNN + seq_lengths (torch.IntTensor): The actual length of each sequence in the batch + + Returns: seq_lengths + - **seq_lengths**: Sequence length of output from the module + """ + if isinstance(module, nn.Conv2d): + numerator = seq_lengths + 2 * module.padding[1] - module.dilation[1] * (module.kernel_size[1] - 1) - 1 + seq_lengths = numerator.float() / float(module.stride[1]) + seq_lengths = seq_lengths.int() + 1 + + elif isinstance(module, nn.MaxPool2d): + seq_lengths >>= 1 + + return seq_lengths.int() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/multi_head_attention.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/multi_head_attention.py new file mode 100644 index 000000000..ca84ef505 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/multi_head_attention.py @@ -0,0 +1,89 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Tuple, Optional + +from openspeech.modules.wrapper import Linear +from openspeech.modules.dot_product_attention import DotProductAttention + + +class MultiHeadAttention(nn.Module): + r""" + Multi-Head Attention proposed in "Attention Is All You Need" + Instead of performing a single attention function with d_model-dimensional keys, values, and queries, + project the queries, keys and values h times with different, learned linear projections to d_head dimensions. + These are concatenated and once again projected, resulting in the final values. + Multi-head attention allows the model to jointly attend to information from different representation + subspaces at different positions. + + MultiHead(Q, K, V) = Concat(head_1, ..., head_h) · W_o + where head_i = Attention(Q · W_q, K · W_k, V · W_v) + + Args: + dim (int): The dimension of model (default: 512) + num_heads (int): The number of attention heads. (default: 8) + + Inputs: query, key, value, mask + - **query** (batch, q_len, d_model): tensor containing projection vector for decoders. + - **key** (batch, k_len, d_model): tensor containing projection vector for encoders. + - **value** (batch, v_len, d_model): tensor containing features of the encoded input sequence. + - **mask** (-): tensor containing indices to be masked + + Returns: output, attn + - **output** (batch, output_len, dimensions): tensor containing the attended output features. + - **attn** (batch * num_heads, v_len): tensor containing the attention (alignment) from the encoders outputs. + """ + def __init__(self, dim: int = 512, num_heads: int = 8) -> None: + super(MultiHeadAttention, self).__init__() + + assert dim % num_heads == 0, "hidden_dim % num_heads should be zero." + + self.d_head = int(dim / num_heads) + self.num_heads = num_heads + self.query_proj = Linear(dim, self.d_head * num_heads) + self.key_proj = Linear(dim, self.d_head * num_heads) + self.value_proj = Linear(dim, self.d_head * num_heads) + self.scaled_dot_attn = DotProductAttention(dim, scale=True) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Tensor]: + batch_size = value.size(0) + + query = self.query_proj(query).view(batch_size, -1, self.num_heads, self.d_head).transpose(1, 2) + key = self.key_proj(key).view(batch_size, -1, self.num_heads, self.d_head).transpose(1, 2) + value = self.value_proj(value).view(batch_size, -1, self.num_heads, self.d_head).transpose(1, 2) + + if mask is not None: + mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1) + + context, attn = self.scaled_dot_attn(query, key, value, mask) + + context = context.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.d_head) + + return context, attn diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/pointwise_conv1d.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/pointwise_conv1d.py new file mode 100644 index 000000000..d272025ac --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/pointwise_conv1d.py @@ -0,0 +1,66 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules.conv_base import BaseConv1d + + +class PointwiseConv1d(BaseConv1d): + r""" + When kernel size == 1 conv1d, this operation is termed in literature as pointwise convolution. + This operation often used to match dimensions. + + Args: + in_channels (int): Number of channels in the input + out_channels (int): Number of channels produced by the convolution + stride (int, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + bias (bool, optional): If True, adds a learnable bias to the output. Default: True + + Inputs: inputs + - **inputs** (batch, in_channels, time): Tensor containing input vector + + Returns: outputs + - **outputs** (batch, out_channels, time): Tensor produces by pointwise 1-D convolution. + """ + def __init__( + self, + in_channels: int, + out_channels: int, + stride: int = 1, + padding: int = 0, + bias: bool = True, + ) -> None: + super(PointwiseConv1d, self).__init__() + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=padding, + bias=bias, + ) + + def forward(self, inputs: Tensor) -> Tensor: + return self.conv(inputs) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/positional_encoding.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/positional_encoding.py new file mode 100644 index 000000000..445759d5d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/positional_encoding.py @@ -0,0 +1,50 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch +import torch.nn as nn +from torch import Tensor + + +class PositionalEncoding(nn.Module): + r""" + Positional Encoding proposed in "Attention Is All You Need". + Since transformer contains no recurrence and no convolution, in order for the model to make + use of the order of the sequence, we must add some positional information. + + "Attention Is All You Need" use sine and cosine functions of different frequencies: + PE_(pos, 2i) = sin(pos / power(10000, 2i / d_model)) + PE_(pos, 2i+1) = cos(pos / power(10000, 2i / d_model)) + """ + def __init__(self, d_model: int = 512, max_len: int = 5000) -> None: + super(PositionalEncoding, self).__init__() + pe = torch.zeros(max_len, d_model, requires_grad=False) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, length: int) -> Tensor: + return self.pe[:, :length] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/positionwise_feed_forward.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/positionwise_feed_forward.py new file mode 100644 index 000000000..9aaa8c36b --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/positionwise_feed_forward.py @@ -0,0 +1,47 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + +from openspeech.modules.wrapper import Linear + + +class PositionwiseFeedForward(nn.Module): + """ + Position-wise Feedforward Networks proposed in "Attention Is All You Need". + Fully connected feed-forward network, which is applied to each position separately and identically. + This consists of two linear transformations with a ReLU activation in between. + Another way of describing this is as two convolutions with kernel size 1. + """ + def __init__(self, d_model: int = 512, d_ff: int = 2048, dropout_p: float = 0.3) -> None: + super(PositionwiseFeedForward, self).__init__() + self.feed_forward = nn.Sequential( + Linear(d_model, d_ff), + nn.Dropout(dropout_p), + nn.ReLU(), + Linear(d_ff, d_model), + nn.Dropout(dropout_p), + ) + + def forward(self, inputs: Tensor) -> Tensor: + return self.feed_forward(inputs) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_block.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_block.py new file mode 100644 index 000000000..81d0b9c42 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_block.py @@ -0,0 +1,104 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.modules.pointwise_conv1d import PointwiseConv1d +from openspeech.modules.quartznet_subblock import QuartzNetSubBlock + + +class QuartzNetBlock(nn.Module): + r""" + QuartzNet’s design is based on the Jasper architecture, which is a convolutional model trained with + Connectionist Temporal Classification (CTC) loss. The main novelty in QuartzNet’s architecture is that QuartzNet + replaced the 1D convolutions with 1D time-channel separable convolutions, an implementation of depthwise separable + convolutions. + + Inputs: inputs, input_lengths + inputs (torch.FloatTensor): tensor contains input sequence vector + input_lengths (torch.LongTensor): tensor contains sequence lengths + + Returns: output, output_lengths + (torch.FloatTensor, torch.LongTensor) + + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + supported_activations = { + 'hardtanh': nn.Hardtanh(0, 20, inplace=True), + 'relu': nn.ReLU(inplace=True), + 'elu': nn.ELU(inplace=True), + 'leaky_relu': nn.LeakyReLU(inplace=True), + 'gelu': nn.GELU(), + } + + def __init__( + self, + num_sub_blocks: int, + in_channels: int, + out_channels: int, + kernel_size: int, + bias: bool = True, + ) -> None: + super(QuartzNetBlock, self).__init__() + padding = self._get_same_padding(kernel_size, stride=1, dilation=1) + self.layers = nn.ModuleList([ + QuartzNetSubBlock( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + padding=padding, + bias=bias, + ) for i in range(num_sub_blocks) + ]) + self.conv1x1 = PointwiseConv1d(in_channels, out_channels) + self.batch_norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1) + + def _get_same_padding(self, kernel_size: int, stride: int, dilation: int): + if stride > 1 and dilation > 1: + raise ValueError("Only stride OR dilation may be greater than 1") + return (kernel_size // 2) * dilation + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Forward propagate of QuartzNet block. + + Inputs: inputs, input_lengths + inputs (torch.FloatTensor): tensor contains input sequence vector + input_lengths (torch.LongTensor): tensor contains sequence lengths + + Returns: output, output_lengths + (torch.FloatTensor, torch.LongTensor) + + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + residual = self.batch_norm(self.conv1x1(inputs)) + + for layer in self.layers[:-1]: + inputs, input_lengths = layer(inputs, input_lengths) + + outputs, output_lengths = self.layers[-1](inputs, input_lengths, residual) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_subblock.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_subblock.py new file mode 100644 index 000000000..2e170c86f --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/quartznet_subblock.py @@ -0,0 +1,96 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Optional, Tuple + +from openspeech.modules.depthwise_conv1d import DepthwiseConv1d +from openspeech.modules.time_channel_separable_conv1d import TimeChannelSeparableConv1d +from openspeech.modules.conv_group_shuffle import ConvGroupShuffle + + +class QuartzNetSubBlock(nn.Module): + r""" + QuartzNet sub-block applies the following operations: a 1D-convolution, batch norm, ReLU, and dropout. + + Args: + in_channels (int): number of channels in the input feature + out_channels (int): number of channels produced by the convolution + kernel_size (int): size of the convolving kernel + padding (int): zero-padding added to both sides of the input. (default: 0) + bias (bool): if True, adds a learnable bias to the output. (default: False) + + Inputs: inputs, input_lengths, residual + - **inputs**: tensor contains input sequence vector + - **input_lengths**: tensor contains sequence lengths + - **residual**: tensor contains residual vector + + Returns: output, output_lengths + * output (torch.FloatTensor): tensor contains output sequence vector + * output_lengths (torch.LongTensor): tensor contains output sequence lengths + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + bias: bool = False, + padding: int = 0, + groups: int = 1, + ) -> None: + super(QuartzNetSubBlock, self).__init__() + self.depthwise_conf1d = DepthwiseConv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + padding=padding, + ) + self.tcs_conv = TimeChannelSeparableConv1d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=1, + padding=0, + groups=groups, + bias=bias, + ) + self.group_shuffle = ConvGroupShuffle(groups, out_channels) + self.batch_norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1) + self.relu = nn.ReLU() + + def forward( + self, + inputs: torch.Tensor, + input_lengths: torch.Tensor, + residual: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, torch.Tensor]: + outputs, output_lengths = self.depthwise_conf1d(inputs, input_lengths) + outputs, output_lengths = self.tcs_conv(outputs, output_lengths) + outputs = self.group_shuffle(outputs) + outputs = self.batch_norm(outputs) + + if residual is not None: + outputs += residual + + outputs = self.relu(outputs) + + return outputs, output_lengths diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/relative_multi_head_attention.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/relative_multi_head_attention.py new file mode 100644 index 000000000..6dafad1c3 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/relative_multi_head_attention.py @@ -0,0 +1,121 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from typing import Optional + +from openspeech.modules.wrapper import Linear + + +class RelativeMultiHeadAttention(nn.Module): + r""" + Multi-head attention with relative positional encoding. + This concept was proposed in the "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + + Args: + dim (int): The dimension of model + num_heads (int): The number of attention heads. + dropout_p (float): probability of dropout + + Inputs: query, key, value, pos_embedding, mask + - **query** (batch, time, dim): Tensor containing query vector + - **key** (batch, time, dim): Tensor containing key vector + - **value** (batch, time, dim): Tensor containing value vector + - **pos_embedding** (batch, time, dim): Positional embedding tensor + - **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked + + Returns: + - **outputs**: Tensor produces by relative multi head attention module. + """ + def __init__( + self, + dim: int = 512, + num_heads: int = 16, + dropout_p: float = 0.1, + ) -> None: + super(RelativeMultiHeadAttention, self).__init__() + assert dim % num_heads == 0, "d_model % num_heads should be zero." + + self.dim = dim + self.d_head = int(dim / num_heads) + self.num_heads = num_heads + self.sqrt_dim = math.sqrt(dim) + + self.query_proj = Linear(dim, dim) + self.key_proj = Linear(dim, dim) + self.value_proj = Linear(dim, dim) + self.pos_proj = Linear(dim, dim, bias=False) + + self.dropout = nn.Dropout(p=dropout_p) + self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head)) + self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head)) + torch.nn.init.xavier_uniform_(self.u_bias) + torch.nn.init.xavier_uniform_(self.v_bias) + + self.out_proj = Linear(dim, dim) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_embedding: Tensor, + mask: Optional[Tensor] = None, + ) -> Tensor: + batch_size = value.size(0) + + query = self.query_proj(query).view(batch_size, -1, self.num_heads, self.d_head) + key = self.key_proj(key).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3) + value = self.value_proj(value).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3) + pos_embedding = self.pos_proj(pos_embedding).view(batch_size, -1, self.num_heads, self.d_head) + + content_score = torch.matmul((query + self.u_bias).transpose(1, 2), key.transpose(2, 3)) + pos_score = torch.matmul((query + self.v_bias).transpose(1, 2), pos_embedding.permute(0, 2, 3, 1)) + pos_score = self._relative_shift(pos_score) + + score = (content_score + pos_score) / self.sqrt_dim + + if mask is not None: + mask = mask.unsqueeze(1) + score.masked_fill_(mask, -1e4) + + attn = F.softmax(score, -1) + attn = self.dropout(attn) + + context = torch.matmul(attn, value).transpose(1, 2) + context = context.contiguous().view(batch_size, -1, self.dim) + + return self.out_proj(context) + + def _relative_shift(self, pos_score: Tensor) -> Tensor: + batch_size, num_heads, seq_length1, seq_length2 = pos_score.size() + zeros = pos_score.new_zeros(batch_size, num_heads, seq_length1, 1) + padded_pos_score = torch.cat([zeros, pos_score], dim=-1) + + padded_pos_score = padded_pos_score.view(batch_size, num_heads, seq_length2 + 1, seq_length1) + pos_score = padded_pos_score[:, :, 1:].view_as(pos_score) + + return pos_score diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/residual_connection_module.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/residual_connection_module.py new file mode 100644 index 000000000..2bb2178b4 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/residual_connection_module.py @@ -0,0 +1,48 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Optional + + +class ResidualConnectionModule(nn.Module): + r""" + Residual Connection Module. + outputs = (module(inputs) x module_factor + inputs x input_factor) + """ + def __init__( + self, + module: nn.Module, + module_factor: float = 1.0, + input_factor: float = 1.0, + ) -> None: + super(ResidualConnectionModule, self).__init__() + self.module = module + self.module_factor = module_factor + self.input_factor = input_factor + + def forward(self, inputs: Tensor, mask: Optional[Tensor] = None) -> Tensor: + if mask is None: + return (self.module(inputs) * self.module_factor) + (inputs * self.input_factor) + else: + return (self.module(inputs, mask) * self.module_factor) + (inputs * self.input_factor) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/swish.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/swish.py new file mode 100644 index 000000000..d4a41a547 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/swish.py @@ -0,0 +1,37 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor + + +class Swish(nn.Module): + r""" + Swish is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks applied + to a variety of challenging domains such as Image classification and Machine translation. + """ + + def __init__(self): + super(Swish, self).__init__() + + def forward(self, inputs: Tensor) -> Tensor: + return inputs * inputs.sigmoid() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/time_channel_separable_conv1d.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/time_channel_separable_conv1d.py new file mode 100644 index 000000000..2c2224f28 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/time_channel_separable_conv1d.py @@ -0,0 +1,61 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from torch import Tensor +from typing import Optional + +from openspeech.modules.conv_base import BaseConv1d + + +class TimeChannelSeparableConv1d(BaseConv1d): + r""" + The total number of weights for a time-channel separable convolution block is K × cin + cin × cout weights. Since K is + generally several times smaller than cout, most weights are + concentrated in the pointwise convolution part. + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 1, + padding: int = 0, + groups: int = 1, + bias: bool = True, + ): + super(TimeChannelSeparableConv1d, self).__init__() + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + dilation=1, + padding=padding, + groups=groups, + bias=bias, + ) + + def forward(self, inputs: Tensor, input_lengths: Optional[Tensor] = None) -> Tensor: + if input_lengths is None: + return self.conv(inputs) + else: + return self.conv(inputs), self._get_sequence_lengths(input_lengths) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/transformer_embedding.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/transformer_embedding.py new file mode 100644 index 000000000..79ac64e85 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/transformer_embedding.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch.nn as nn +from torch import Tensor + + +class TransformerEmbedding(nn.Module): + r""" + Embedding layer. Similarly to other sequence transduction models, transformer use learned embeddings + to convert the input tokens and output tokens to vectors of dimension d_model. + In the embedding layers, transformer multiply those weights by sqrt(d_model) + + Args: + num_embeddings (int): the number of embedding size + pad_id (int): identification of pad token + d_model (int): dimension of model + + Inputs: + inputs (torch.FloatTensor): input of embedding layer + + Returns: + outputs (torch.FloatTensor): output of embedding layer + """ + def __init__(self, num_embeddings: int, pad_id: int, d_model: int = 512) -> None: + super(TransformerEmbedding, self).__init__() + self.sqrt_dim = math.sqrt(d_model) + self.embedding = nn.Embedding(num_embeddings, d_model, padding_idx=pad_id) + + def forward(self, inputs: Tensor) -> Tensor: + r""" + Forward propagate of embedding layer. + + Inputs: + inputs (torch.FloatTensor): input of embedding layer + + Returns: + outputs (torch.FloatTensor): output of embedding layer + """ + return self.embedding(inputs) * self.sqrt_dim diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/vgg_extractor.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/vgg_extractor.py new file mode 100644 index 000000000..4b2af85aa --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/vgg_extractor.py @@ -0,0 +1,81 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Tuple + +from openspeech.modules import Conv2dExtractor + + +class VGGExtractor(Conv2dExtractor): + r""" + VGG extractor for automatic speech recognition described in + "Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM" paper + - https://arxiv.org/pdf/1706.02737.pdf + + Args: + input_dim (int): Dimension of input vector + in_channels (int): Number of channels in the input image + out_channels (int or tuple): Number of channels produced by the convolution + activation (str): Activation function + + Inputs: inputs, input_lengths + - **inputs** (batch, time, dim): Tensor containing input vectors + - **input_lengths**: Tensor containing containing sequence lengths + + Returns: outputs, output_lengths + - **outputs**: Tensor produced by the convolution + - **output_lengths**: Tensor containing sequence lengths produced by the convolution + """ + def __init__( + self, + input_dim: int, + in_channels: int = 1, + out_channels: int or tuple = (64, 128), + activation: str = 'hardtanh', + ): + super(VGGExtractor, self).__init__(input_dim=input_dim, activation=activation) + self.in_channels = in_channels + self.out_channels = out_channels + from openspeech.modules import MaskConv2d + self.conv = MaskConv2d( + nn.Sequential( + nn.Conv2d(in_channels, out_channels[0], kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(num_features=out_channels[0]), + self.activation, + nn.Conv2d(out_channels[0], out_channels[0], kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(num_features=out_channels[0]), + self.activation, + nn.MaxPool2d(2, stride=2), + nn.Conv2d(out_channels[0], out_channels[1], kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(num_features=out_channels[1]), + self.activation, + nn.Conv2d(out_channels[1], out_channels[1], kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(num_features=out_channels[1]), + self.activation, + nn.MaxPool2d(2, stride=2), + ) + ) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + return super().forward(inputs, input_lengths) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/modules/wrapper.py b/audio/speech_recognition/conformer/pytorch/openspeech/modules/wrapper.py new file mode 100644 index 000000000..0aeed9446 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/modules/wrapper.py @@ -0,0 +1,64 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +import torch.nn.init as init +from torch import Tensor + + +class Linear(nn.Module): + r""" + Wrapper class of torch.nn.Linear + Weight initialize by xavier initialization and bias initialize to zeros. + """ + def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: + super(Linear, self).__init__() + self.linear = nn.Linear(in_features, out_features, bias=bias) + init.xavier_uniform_(self.linear.weight) + if bias: + init.zeros_(self.linear.bias) + + def forward(self, x: Tensor) -> Tensor: + return self.linear(x) + + +class View(nn.Module): + r""" Wrapper class of torch.view() for Sequential module. """ + def __init__(self, shape: tuple, contiguous: bool = False): + super(View, self).__init__() + self.shape = shape + self.contiguous = contiguous + + def forward(self, inputs): + if self.contiguous: + inputs = inputs.contiguous() + return inputs.view(*self.shape) + + +class Transpose(nn.Module): + r""" Wrapper class of torch.transpose() for Sequential module. """ + def __init__(self, shape: tuple): + super(Transpose, self).__init__() + self.shape = shape + + def forward(self, inputs: Tensor): + return inputs.transpose(*self.shape) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/__init__.py new file mode 100644 index 000000000..f225e5179 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/__init__.py @@ -0,0 +1,44 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +from openspeech.optim.adamp import AdamP +from openspeech.optim.radam import RAdam +from openspeech.optim.novograd import Novograd + +# automatically import any Python files in the models/ directory +scheduler_dir = os.path.dirname(__file__) +for file in os.listdir(scheduler_dir): + if os.path.isdir(os.path.join(scheduler_dir, file)) and file != '__pycache__': + for subfile in os.listdir(os.path.join(scheduler_dir, file)): + path = os.path.join(scheduler_dir, file, subfile) + if subfile.endswith(".py"): + scheduler_name = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile + module = importlib.import_module(f"openspeech.optim.scheduler.{scheduler_name}") + continue + + path = os.path.join(scheduler_dir, file) + if file.endswith(".py"): + scheduler_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module(f"openspeech.optim.{scheduler_name}") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/adamp.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/adamp.py new file mode 100644 index 000000000..591e6d9b0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/adamp.py @@ -0,0 +1,109 @@ +# AdamP +# Copyright (c) 2020-present NAVER Corp. +# MIT license + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class AdamP(Optimizer): + """ + Paper: "AdamP: Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights" + + Copied from https://github.com/clovaai/AdamP/ + Copyright (c) 2020 Naver Corp. + MIT License + """ + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + def _channel_view(self, x): + return x.view(x.size(0), -1) + + def _layer_view(self, x): + return x.view(1, -1) + + def _cosine_similarity(self, x, y, eps, view_func): + x = view_func(x) + y = view_func(y) + + x_norm = x.norm(dim=1).add_(eps) + y_norm = y.norm(dim=1).add_(eps) + dot = (x * y).sum(dim=1) + + return dot.abs() / x_norm / y_norm + + def _projection(self, p, grad, perturb, delta, wd_ratio, eps): + wd = 1 + expand_size = [-1] + [1] * (len(p.shape) - 1) + for view_func in [self._channel_view, self._layer_view]: + + cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func) + + if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)): + p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size) + wd = wd_ratio + + return perturb, wd + + return perturb, wd + + def step(self, closure=None): + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad.data + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p.data) + state['exp_avg_sq'] = torch.zeros_like(p.data) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1 + if len(p.shape) > 1: + perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], + group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.data.add_(-step_size, perturb) + + return loss diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/novograd.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/novograd.py new file mode 100644 index 000000000..e7b78e42d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/novograd.py @@ -0,0 +1,127 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.optim.optimizer import Optimizer + + +class Novograd(Optimizer): + """ + Novograd algorithm. + + Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper/optimizers.py + Copyright (c) 2019 NVIDIA Corp. + Apache-2.0 License + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if 0.0 > lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if 0.0 > eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(Novograd, self).__init__(params, defaults) + + def __setstate__(self, state): + super(Novograd, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p.data, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.data.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/optimizer.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/optimizer.py new file mode 100644 index 000000000..39f40f3f3 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/optimizer.py @@ -0,0 +1,83 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +from openspeech.optim.scheduler.reduce_lr_on_plateau_scheduler import ReduceLROnPlateauScheduler +from openspeech.optim.scheduler.warmup_reduce_lr_on_plateau_scheduler import WarmupReduceLROnPlateauScheduler + + +class Optimizer(object): + """ + This is wrapper classs of torch.optim.Optimizer. + This class provides functionalities for learning rate scheduling and gradient norm clipping. + + Args: + optim (torch.optim.Optimizer): optimizer object, the parameters to be optimized + should be given when instantiating the object, e.g. torch.optim.Adam, torch.optim.SGD + scheduler (openspeech.optim.scheduler, optional): learning rate scheduler + scheduler_period (int, optional): timestep with learning rate scheduler + max_grad_norm (int, optional): value used for gradient norm clipping + """ + def __init__(self, optim, scheduler=None, scheduler_period=None, max_grad_norm=0): + self.optimizer = optim + self.scheduler = scheduler + self.scheduler_period = scheduler_period + self.max_grad_norm = max_grad_norm + self.count = 0 + + def step(self, model): + if self.max_grad_norm > 0: + torch.nn.utils.clip_grad_norm_(model.parameters(), self.max_grad_norm) + self.optimizer.step() + + if self.scheduler is not None: + self.update() + self.count += 1 + + if self.scheduler_period == self.count: + self.scheduler = None + self.scheduler_period = 0 + self.count = 0 + + def set_scheduler(self, scheduler, scheduler_period): + self.scheduler = scheduler + self.scheduler_period = scheduler_period + self.count = 0 + + def update(self, val_loss=None): + if isinstance(self.scheduler, ReduceLROnPlateauScheduler) \ + or isinstance(self.scheduler, WarmupReduceLROnPlateauScheduler): + self.scheduler.step(val_loss) + else: + self.scheduler.step() + + def zero_grad(self): + self.optimizer.zero_grad() + + def get_lr(self): + for g in self.optimizer.param_groups: + return g['lr'] + + def set_lr(self, lr): + for g in self.optimizer.param_groups: + g['lr'] = lr diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/radam.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/radam.py new file mode 100644 index 000000000..6c973973c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/radam.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020, LiyuanLucasLiu. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + """ + Paper: "On the Variance of the Adaptive Learning Rate and Beyond" + + Refer to https://github.com/LiyuanLucasLiu/RAdam + Copyright (c) LiyuanLucasLiu + Apache 2.0 License + """ + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True): + if lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if eps < 0.0: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + self.degenerated_to_sgd = degenerated_to_sgd + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + def step(self, closure=None): + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( + N_sma_max - 2)) / (1 - beta1 ** state['step']) + elif self.degenerated_to_sgd: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + # more conservative since it's an approximated value + if N_sma >= 5: + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) + p.data.copy_(p_data_fp32) + elif step_size > 0: + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + p_data_fp32.add_(-step_size * group['lr'], exp_avg) + p.data.copy_(p_data_fp32) + + return loss diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/__init__.py new file mode 100644 index 000000000..125842f79 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/__init__.py @@ -0,0 +1,59 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +SCHEDULER_REGISTRY = {} +SCHEDULER_DATACLASS_REGISTRY = {} + + +def register_scheduler(name: str, dataclass=None): + """ + New scheduler types can be added to OpenSpeech with the :func:`register_scheduler` function decorator. + + For example:: + @register_scheduler('reduce_lr_on_plateau') + class ReduceLROnPlateau: + (...) + + .. note:: All scheduler must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the scheduler + """ + + def register_scheduler_cls(cls): + if name in SCHEDULER_REGISTRY: + raise ValueError(f"Cannot register duplicate scheduler ({name})") + + SCHEDULER_REGISTRY[name] = cls + + cls.__dataclass = dataclass + if dataclass is not None: + if name in SCHEDULER_DATACLASS_REGISTRY: + raise ValueError(f"Cannot register duplicate scheduler ({name})") + SCHEDULER_DATACLASS_REGISTRY[name] = dataclass + + return cls + + return register_scheduler_cls diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/lr_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/lr_scheduler.py new file mode 100644 index 000000000..2e8755246 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/lr_scheduler.py @@ -0,0 +1,47 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch.optim.lr_scheduler import _LRScheduler + + +class LearningRateScheduler(_LRScheduler): + r""" + Provides inteface of learning rate scheduler. + + Note: + Do not use this class directly, use one of the sub classes. + """ + def __init__(self, optimizer, init_lr): + self.optimizer = optimizer + self.init_lr = init_lr + + def step(self, *args, **kwargs): + raise NotImplementedError + + @staticmethod + def set_lr(optimizer, lr): + for g in optimizer.param_groups: + g['lr'] = lr + + def get_lr(self): + for g in self.optimizer.param_groups: + return g['lr'] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/reduce_lr_on_plateau_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/reduce_lr_on_plateau_scheduler.py new file mode 100644 index 000000000..789281be0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/reduce_lr_on_plateau_scheduler.py @@ -0,0 +1,82 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from dataclasses import dataclass, field +from torch.optim import Optimizer +from typing import Optional + +from openspeech.dataclass.configurations import LearningRateSchedulerConfigs +from openspeech.optim.scheduler import register_scheduler +from openspeech.optim.scheduler.lr_scheduler import LearningRateScheduler + + +@dataclass +class ReduceLROnPlateauConfigs(LearningRateSchedulerConfigs): + scheduler_name: str = field( + default="reduce_lr_on_plateau", metadata={"help": "Name of learning rate scheduler."} + ) + lr_patience: int = field( + default=1, metadata={"help": "Number of epochs with no improvement after which learning rate will be reduced."} + ) + lr_factor: float = field( + default=0.3, metadata={"help": "Factor by which the learning rate will be reduced. new_lr = lr * factor."} + ) + + +@register_scheduler("reduce_lr_on_plateau", dataclass=ReduceLROnPlateauConfigs) +class ReduceLROnPlateauScheduler(LearningRateScheduler): + r""" + Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by + a factor of 2-10 once learning stagnates. This scheduler reads a metrics quantity and if no improvement is seen + for a ‘patience’ number of epochs, the learning rate is reduced. + + Args: + optimizer (Optimizer): wrapped optimizer. + configs (DictConfig): configuration set. + """ + def __init__( + self, + optimizer: Optimizer, + configs, + ) -> None: + super(ReduceLROnPlateauScheduler, self).__init__(optimizer, configs.lr_scheduler.lr) + self.lr = configs.lr_scheduler.lr + self.lr_patience = configs.lr_scheduler.lr_patience + self.lr_factor = configs.lr_scheduler.lr_factor + self.val_loss = 100.0 + self.count = 0 + + def step(self, val_loss: Optional[float] = None): + if val_loss is not None: + if self.val_loss < val_loss: + self.count += 1 + self.val_loss = val_loss + else: + self.count = 0 + self.val_loss = val_loss + + if self.lr_patience == self.count: + self.count = 0 + self.lr *= self.lr_factor + self.set_lr(self.optimizer, self.lr) + + return self.lr diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/transformer_lr_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/transformer_lr_scheduler.py new file mode 100644 index 000000000..bd2fde7d6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/transformer_lr_scheduler.py @@ -0,0 +1,109 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch +from typing import Optional +from dataclasses import dataclass, field +from torch.optim import Optimizer + +from openspeech.dataclass.configurations import LearningRateSchedulerConfigs +from openspeech.optim.scheduler import register_scheduler +from openspeech.optim.scheduler.lr_scheduler import LearningRateScheduler + + +@dataclass +class TransformerLRSchedulerConfigs(LearningRateSchedulerConfigs): + scheduler_name: str = field( + default="transformer", metadata={"help": "Name of learning rate scheduler."} + ) + peak_lr: float = field( + default=1e-04, metadata={"help": "Maximum learning rate."} + ) + final_lr: float = field( + default=1e-07, metadata={"help": "Final learning rate."} + ) + final_lr_scale: float = field( + default=0.05, metadata={"help": "Final learning rate scale"} + ) + warmup_steps: int = field( + default=10000, metadata={"help": "Warmup the learning rate linearly for the first N updates"} + ) + decay_steps: int = field( + default=150000, metadata={"help": "Steps in decay stages"} + ) + + +@register_scheduler("transformer", dataclass=TransformerLRSchedulerConfigs) +class TransformerLRScheduler(LearningRateScheduler): + r""" + Transformer Learning Rate Scheduler proposed in "Attention Is All You Need" + + Args: + optimizer (Optimizer): wrapped optimizer. + configs (DictConfig): configuration set. + """ + def __init__( + self, + optimizer: Optimizer, + configs, + ) -> None: + assert isinstance(configs.lr_scheduler.warmup_steps, int), "warmup_steps should be inteager type" + assert isinstance(configs.lr_scheduler.decay_steps, int), "total_steps should be inteager type" + + super(TransformerLRScheduler, self).__init__(optimizer, 0.0) + self.final_lr = configs.lr_scheduler.final_lr + self.peak_lr = configs.lr_scheduler.peak_lr + self.warmup_steps = configs.lr_scheduler.warmup_steps + self.decay_steps = configs.lr_scheduler.decay_steps + + self.warmup_rate = self.peak_lr / self.warmup_steps + self.decay_factor = -math.log(configs.lr_scheduler.final_lr_scale) / self.decay_steps + + self.lr = self.init_lr + self.update_step = 0 + + def _decide_stage(self): + if self.update_step < self.warmup_steps: + return 0, self.update_step + + if self.warmup_steps <= self.update_step < self.warmup_steps + self.decay_steps: + return 1, self.update_step - self.warmup_steps + + return 2, None + + def step(self, val_loss: Optional[torch.FloatTensor] = None): + self.update_step += 1 + stage, steps_in_stage = self._decide_stage() + + if stage == 0: + self.lr = self.update_step * self.warmup_rate + elif stage == 1: + self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) + elif stage == 2: + self.lr = self.final_lr + else: + raise ValueError("Undefined stage") + + self.set_lr(self.optimizer, self.lr) + + return self.lr diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/tri_stage_lr_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/tri_stage_lr_scheduler.py new file mode 100644 index 000000000..754081ee0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/tri_stage_lr_scheduler.py @@ -0,0 +1,154 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math +import torch +from dataclasses import dataclass, field +from typing import Optional +from torch.optim import Optimizer + +from openspeech.dataclass.configurations import LearningRateSchedulerConfigs +from openspeech.optim.scheduler import register_scheduler +from openspeech.optim.scheduler.lr_scheduler import LearningRateScheduler + + +@dataclass +class TriStageLRSchedulerConfigs(LearningRateSchedulerConfigs): + scheduler_name: str = field( + default="tri_stage", metadata={"help": "Name of learning rate scheduler."} + ) + init_lr: float = field( + default=1e-7, metadata={"help": "Initial learning rate."} + ) + init_lr_scale: float = field( + default=0.01, metadata={"help": "Initial learning rate scale."} + ) + final_lr_scale: float = field( + default=0.01, metadata={"help": "Final learning rate scale"} + ) + phase_ratio: str = field( + default="(0.1, 0.4, 0.5)", metadata={"help": "Automatically sets warmup/hold/decay steps to the ratio " + "specified here from max_updates. the ratios must add up to 1.0"} + ) + total_steps: int = field( + default=400000, metadata={"help": "Total training steps."} + ) + + +@register_scheduler("tri_stage", dataclass=TriStageLRSchedulerConfigs) +class TriStageLRScheduler(LearningRateScheduler): + r""" + Tri-Stage Learning Rate Scheduler. Implement the learning rate scheduler in "SpecAugment" + + Similar to inverse_squre_root scheduler, but tri_stage learning rate employs + three stages LR scheduling: + + - warmup stage, starting from `lr` * `init_lr_scale`, linearly + increased to `lr` in `warmup_steps` iterations + - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` + iterations + - decay stage, after hold stage, decay LR exponetially to + `lr` * `final_lr_scale` in `decay_steps`; + after that LR is keep as `final_lr_scale` * `lr` + + During warmup:: + init_lr = cfg.init_lr_scale * cfg.lr + lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps) + lr = lrs[update_num] + + During hold:: + lr = cfg.lr + + During decay:: + decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps + lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) + + After that:: + lr = cfg.lr * cfg.final_lr_scale + + Args: + optimizer (Optimizer): wrapped optimizer. + configs (DictConfig): configuration set. + """ + def __init__( + self, + optimizer: Optimizer, + configs, + ): + super(TriStageLRScheduler, self).__init__(optimizer, configs.lr_scheduler.init_lr) + + self.phase_ratio = eval(configs.lr_scheduler.phase_ratio) + + self.warmup_steps = int(configs.lr_scheduler.total_steps * self.phase_ratio[0]) + self.hold_steps = int(configs.lr_scheduler.total_steps * self.phase_ratio[1]) + self.decay_steps = int(configs.lr_scheduler.total_steps * self.phase_ratio[2]) + + self.peak_lr = configs.lr_scheduler.lr + self.init_lr = configs.lr_scheduler.init_lr_scale * configs.lr_scheduler.lr + self.final_lr = configs.lr_scheduler.final_lr_scale * configs.lr_scheduler.lr + + self.warmup_rate = ( + (self.peak_lr - self.init_lr) / self.warmup_steps + if self.warmup_steps != 0 + else 0 + ) + self.decay_factor = -math.log(configs.lr_scheduler.final_lr_scale) / self.decay_steps + self.update_step = 0 + self.lr = self.init_lr + + def _decide_stage(self): + if self.update_step < self.warmup_steps: + return 0, self.update_step + + offset = self.warmup_steps + + if self.update_step < offset + self.hold_steps: + return 1, self.update_step - offset + + offset += self.hold_steps + + if self.update_step <= offset + self.decay_steps: + # decay stage + return 2, self.update_step - offset + + offset += self.decay_steps + + return 3, self.update_step - offset + + def step(self, val_loss: Optional[torch.FloatTensor] = None): + stage, steps_in_stage = self._decide_stage() + + if stage == 0: + self.lr = self.init_lr + self.warmup_rate * steps_in_stage + elif stage == 1: + self.lr = self.peak_lr + elif stage == 2: + self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) + elif stage == 3: + self.lr = self.final_lr + else: + raise ValueError("Undefined stage") + + self.set_lr(self.optimizer, self.lr) + self.update_step += 1 + + return self.lr diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_reduce_lr_on_plateau_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_reduce_lr_on_plateau_scheduler.py new file mode 100644 index 000000000..1031d9eef --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_reduce_lr_on_plateau_scheduler.py @@ -0,0 +1,102 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from torch.optim import Optimizer +from dataclasses import dataclass, field +from typing import Optional + +from openspeech.dataclass.configurations import LearningRateSchedulerConfigs +from openspeech.optim.scheduler import register_scheduler +from openspeech.optim.scheduler.lr_scheduler import LearningRateScheduler +from openspeech.optim.scheduler.reduce_lr_on_plateau_scheduler import ReduceLROnPlateauScheduler +from openspeech.optim.scheduler.warmup_scheduler import WarmupLRScheduler + + +@dataclass +class WarmupReduceLROnPlateauConfigs(LearningRateSchedulerConfigs): + scheduler_name: str = field( + default="warmup_reduce_lr_on_plateau", metadata={"help": "Name of learning rate scheduler."} + ) + lr_patience: int = field( + default=1, metadata={"help": "Number of epochs with no improvement after which learning rate will be reduced."} + ) + lr_factor: float = field( + default=0.3, metadata={"help": "Factor by which the learning rate will be reduced. new_lr = lr * factor."} + ) + peak_lr: float = field( + default=1e-04, metadata={"help": "Maximum learning rate."} + ) + init_lr: float = field( + default=1e-10, metadata={"help": "Initial learning rate."} + ) + warmup_steps: int = field( + default=4000, metadata={"help": "Warmup the learning rate linearly for the first N updates"} + ) + + +@register_scheduler("warmup_reduce_lr_on_plateau", dataclass=WarmupReduceLROnPlateauConfigs) +class WarmupReduceLROnPlateauScheduler(LearningRateScheduler): + r""" + Warmup learning rate until `warmup_steps` and reduce learning rate on plateau after. + + Args: + optimizer (Optimizer): wrapped optimizer. + configs (DictConfig): configuration set. + """ + def __init__( + self, + optimizer: Optimizer, + configs, + ) -> None: + super(WarmupReduceLROnPlateauScheduler, self).__init__(optimizer, configs.lr_scheduler.lr) + self.warmup_steps = configs.lr_scheduler.warmup_steps + self.update_steps = 0 + self.warmup_rate = (configs.lr_scheduler.peak_lr - configs.lr_scheduler.init_lr) / self.warmup_steps \ + if self.warmup_steps != 0 else 0 + self.schedulers = [ + WarmupLRScheduler( + optimizer, + configs, + ), + ReduceLROnPlateauScheduler( + optimizer, + configs, + ), + ] + + def _decide_stage(self): + if self.update_steps < self.warmup_steps: + return 0, self.update_steps + else: + return 1, None + + def step(self, val_loss: Optional[float] = None): + stage, steps_in_stage = self._decide_stage() + + if stage == 0: + self.schedulers[0].step() + elif stage == 1: + self.schedulers[1].step(val_loss) + + self.update_steps += 1 + + return self.get_lr() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_scheduler.py b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_scheduler.py new file mode 100644 index 000000000..9c589b005 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/optim/scheduler/warmup_scheduler.py @@ -0,0 +1,82 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from dataclasses import dataclass, field +from typing import Optional +from torch.optim import Optimizer + +from openspeech.dataclass.configurations import LearningRateSchedulerConfigs +from openspeech.optim.scheduler import register_scheduler +from openspeech.optim.scheduler.lr_scheduler import LearningRateScheduler + + +@dataclass +class WarmupLRSchedulerConfigs(LearningRateSchedulerConfigs): + scheduler_name: str = field( + default="warmup", metadata={"help": "Name of learning rate scheduler."} + ) + peak_lr: float = field( + default=1e-04, metadata={"help": "Maximum learning rate."} + ) + init_lr: float = field( + default=1e-7, metadata={"help": "Initial learning rate."} + ) + warmup_steps: int = field( + default=4000, metadata={"help": "Warmup the learning rate linearly for the first N updates"} + ) + total_steps: int = field( + default=200000, metadata={"help": "Total training steps."} + ) + + +@register_scheduler("warmup", dataclass=WarmupLRSchedulerConfigs) +class WarmupLRScheduler(LearningRateScheduler): + """ + Warmup learning rate until `total_steps` + + Args: + optimizer (Optimizer): wrapped optimizer. + configs (DictConfig): configuration set. + """ + def __init__( + self, + optimizer: Optimizer, + configs, + ) -> None: + super(WarmupLRScheduler, self).__init__(optimizer, configs.lr_scheduler.init_lr) + if configs.lr_scheduler.warmup_steps != 0: + warmup_rate = configs.lr_scheduler.peak_lr - configs.lr_scheduler.init_lr + self.warmup_rate = warmup_rate / configs.lr_scheduler.warmup_steps + else: + self.warmup_rate = 0 + self.update_steps = 1 + self.lr = configs.lr_scheduler.init_lr + self.warmup_steps = configs.lr_scheduler.warmup_steps + + def step(self, val_loss: Optional[torch.FloatTensor] = None): + if self.update_steps < self.warmup_steps: + lr = self.init_lr + self.warmup_rate * self.update_steps + self.set_lr(self.optimizer, lr) + self.lr = lr + self.update_steps += 1 + return self.lr diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/__init__.py new file mode 100644 index 000000000..c40825eb7 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/__init__.py @@ -0,0 +1,28 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .beam_search_base import OpenspeechBeamSearchBase +from .beam_search_ctc import BeamSearchCTC +from .beam_search_lstm import BeamSearchLSTM +from .beam_search_rnn_transducer import BeamSearchRNNTransducer +from .beam_search_transformer import BeamSearchTransformer +from .beam_search_transformer_transducer import BeamSearchTransformerTransducer diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_base.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_base.py new file mode 100644 index 000000000..13d8f6d56 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_base.py @@ -0,0 +1,134 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn + + +class OpenspeechBeamSearchBase(nn.Module): + """ + Openspeech's beam-search base class. Implement the methods required for beamsearch. + You have to implement `forward` method. + + Note: + Do not use this class directly, use one of the sub classes. + """ + def __init__(self, decoder, beam_size: int): + super(OpenspeechBeamSearchBase, self).__init__() + self.decoder = decoder + self.beam_size = beam_size + self.sos_id = decoder.sos_id + self.pad_id = decoder.pad_id + self.eos_id = decoder.eos_id + self.ongoing_beams = None + self.cumulative_ps = None + self.forward_step = decoder.forward_step + + def _inflate(self, tensor: torch.Tensor, n_repeat: int, dim: int) -> torch.Tensor: + repeat_dims = [1] * len(tensor.size()) + repeat_dims[dim] *= n_repeat + return tensor.repeat(*repeat_dims) + + def _get_successor( + self, + current_ps: torch.Tensor, + current_vs: torch.Tensor, + finished_ids: tuple, + num_successor: int, + eos_count: int, + k: int + ) -> int: + finished_batch_idx, finished_idx = finished_ids + + successor_ids = current_ps.topk(k + num_successor)[1] + successor_idx = successor_ids[finished_batch_idx, -1] + + successor_p = current_ps[finished_batch_idx, successor_idx] + successor_v = current_vs[finished_batch_idx, successor_idx] + + prev_status_idx = (successor_idx // k) + prev_status = self.ongoing_beams[finished_batch_idx, prev_status_idx] + prev_status = prev_status.view(-1)[:-1] + + successor = torch.cat([prev_status, successor_v.view(1)]) + + if int(successor_v) == self.eos_id: + self.finished[finished_batch_idx].append(successor) + self.finished_ps[finished_batch_idx].append(successor_p) + eos_count = self._get_successor( + current_ps=current_ps, + current_vs=current_vs, + finished_ids=finished_ids, + num_successor=num_successor + eos_count, + eos_count=eos_count + 1, + k=k, + ) + + else: + self.ongoing_beams[finished_batch_idx, finished_idx] = successor + self.cumulative_ps[finished_batch_idx, finished_idx] = successor_p + + return eos_count + + def _get_hypothesis(self): + predictions = list() + + for batch_idx, batch in enumerate(self.finished): + # if there is no terminated sentences, bring ongoing sentence which has the highest probability instead + if len(batch) == 0: + prob_batch = self.cumulative_ps[batch_idx] + top_beam_idx = int(prob_batch.topk(1)[1]) + predictions.append(self.ongoing_beams[batch_idx, top_beam_idx]) + + # bring highest probability sentence + else: + top_beam_idx = int(torch.FloatTensor(self.finished_ps[batch_idx]).topk(1)[1]) + predictions.append(self.finished[batch_idx][top_beam_idx]) + + predictions = self._fill_sequence(predictions) + return predictions + + def _is_all_finished(self, k: int) -> bool: + for done in self.finished: + if len(done) < k: + return False + + return True + + def _fill_sequence(self, y_hats: list) -> torch.Tensor: + batch_size = len(y_hats) + max_length = -1 + + for y_hat in y_hats: + if len(y_hat) > max_length: + max_length = len(y_hat) + + matched = torch.zeros((batch_size, max_length), dtype=torch.long) + + for batch_idx, y_hat in enumerate(y_hats): + matched[batch_idx, :len(y_hat)] = y_hat + matched[batch_idx, len(y_hat):] = int(self.pad_id) + + return matched + + def forward(self, *args, **kwargs): + raise NotImplementedError diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_ctc.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_ctc.py new file mode 100644 index 000000000..105bb82bf --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_ctc.py @@ -0,0 +1,84 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn +from openspeech.utils import CTCDECODE_IMPORT_ERROR + + +class BeamSearchCTC(nn.Module): + r""" + Decodes probability output using ctcdecode package. + + Args: + labels (list): the tokens you used to train your model + lm_path (str): the path to your external kenlm language model(LM). + alpha (int): weighting associated with the LMs probabilities. + beta (int): weight associated with the number of words within our beam + cutoff_top_n (int): cutoff number in pruning. Only the top cutoff_top_n characters with the highest probability + in the vocab will be used in beam search. + cutoff_prob (float): cutoff probability in pruning. 1.0 means no pruning. + beam_size (int): this controls how broad the beam search is. + num_processes (int): parallelize the batch using num_processes workers. + blank_id (int): this should be the index of the CTC blank token + + Inputs: logits, sizes + - logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t + - sizes: Size of each sequence in the mini-batch + + Returns: + - outputs: sequences of the model's best prediction + """ + def __init__( + self, + labels: list, + lm_path: str = None, + alpha: int = 0, + beta: int = 0, + cutoff_top_n: int = 40, + cutoff_prob: float = 1.0, + beam_size: int = 3, + num_processes: int = 4, + blank_id: int = 0, + ) -> None: + super(BeamSearchCTC, self).__init__() + try: + from ctcdecode import CTCBeamDecoder + except ImportError: + raise ImportError(CTCDECODE_IMPORT_ERROR) + assert isinstance(labels, list), "labels must instance of list" + self.decoder = CTCBeamDecoder(labels, lm_path, alpha, beta, cutoff_top_n, + cutoff_prob, beam_size, num_processes, blank_id) + + def forward(self, logits, sizes=None): + r""" + Decodes probability output using ctcdecode package. + + Inputs: logits, sizes + logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t + sizes: Size of each sequence in the mini-batch + + Returns: + outputs: sequences of the model's best prediction + """ + logits = logits.cpu() + outputs, scores, offsets, seq_lens = self.decoder.decode(logits, sizes) + return outputs diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_lstm.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_lstm.py new file mode 100644 index 000000000..87f92c2ec --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_lstm.py @@ -0,0 +1,154 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +from openspeech.search.beam_search_base import OpenspeechBeamSearchBase +from openspeech.decoders import LSTMAttentionDecoder + + +class BeamSearchLSTM(OpenspeechBeamSearchBase): + r""" + LSTM Beam Search Decoder + + Args: decoder, beam_size, batch_size + decoder (DecoderLSTM): base decoder of lstm model. + beam_size (int): size of beam. + + Inputs: encoder_outputs, targets, encoder_output_lengths, teacher_forcing_ratio + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + targets (torch.LongTensor): A target sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + encoder_output_lengths (torch.LongTensor): A encoder output lengths sequence. `LongTensor` of size ``(batch)`` + teacher_forcing_ratio (float): Ratio of teacher forcing. + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + def __init__(self, decoder: LSTMAttentionDecoder, beam_size: int): + super(BeamSearchLSTM, self).__init__(decoder, beam_size) + self.hidden_state_dim = decoder.hidden_state_dim + self.num_layers = decoder.num_layers + self.validate_args = decoder.validate_args + + def forward( + self, + encoder_outputs: torch.Tensor, + encoder_output_lengths: torch.Tensor, + ) -> torch.Tensor: + r""" + Beam search decoding. + + Inputs: encoder_outputs + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + batch_size, hidden_states = encoder_outputs.size(0), None + + self.finished = [[] for _ in range(batch_size)] + self.finished_ps = [[] for _ in range(batch_size)] + + inputs, batch_size, max_length = self.validate_args(None, encoder_outputs, teacher_forcing_ratio=0.0) + + step_outputs, hidden_states, attn = self.forward_step(inputs, hidden_states, encoder_outputs) + self.cumulative_ps, self.ongoing_beams = step_outputs.topk(self.beam_size) + + self.ongoing_beams = self.ongoing_beams.view(batch_size * self.beam_size, 1) + self.cumulative_ps = self.cumulative_ps.view(batch_size * self.beam_size, 1) + + input_var = self.ongoing_beams + + encoder_dim = encoder_outputs.size(2) + encoder_outputs = self._inflate(encoder_outputs, self.beam_size, dim=0) + encoder_outputs = encoder_outputs.view(self.beam_size, batch_size, -1, encoder_dim) + encoder_outputs = encoder_outputs.transpose(0, 1) + encoder_outputs = encoder_outputs.reshape(batch_size * self.beam_size, -1, encoder_dim) + + if attn is not None: + attn = self._inflate(attn, self.beam_size, dim=0) + + if isinstance(hidden_states, tuple): + hidden_states = tuple([self._inflate(h, self.beam_size, 1) for h in hidden_states]) + else: + hidden_states = self._inflate(hidden_states, self.beam_size, 1) + + for di in range(max_length - 1): + if self._is_all_finished(self.beam_size): + break + + if isinstance(hidden_states, tuple): + tuple(h.view(self.num_layers, batch_size * self.beam_size, self.hidden_state_dim) for h in hidden_states) + else: + hidden_states = hidden_states.view(self.num_layers, batch_size * self.beam_size, self.hidden_state_dim) + step_outputs, hidden_states, attn = self.forward_step(input_var, hidden_states, encoder_outputs, attn) + + step_outputs = step_outputs.view(batch_size, self.beam_size, -1) + current_ps, current_vs = step_outputs.topk(self.beam_size) + + self.cumulative_ps = self.cumulative_ps.view(batch_size, self.beam_size) + self.ongoing_beams = self.ongoing_beams.view(batch_size, self.beam_size, -1) + + current_ps = (current_ps.permute(0, 2, 1) + self.cumulative_ps.unsqueeze(1)).permute(0, 2, 1) + current_ps = current_ps.view(batch_size, self.beam_size ** 2) + current_vs = current_vs.view(batch_size, self.beam_size ** 2) + + self.cumulative_ps = self.cumulative_ps.view(batch_size, self.beam_size) + self.ongoing_beams = self.ongoing_beams.view(batch_size, self.beam_size, -1) + + topk_current_ps, topk_status_ids = current_ps.topk(self.beam_size) + prev_status_ids = (topk_status_ids // self.beam_size) + + topk_current_vs = torch.zeros((batch_size, self.beam_size), dtype=torch.long) + prev_status = torch.zeros(self.ongoing_beams.size(), dtype=torch.long) + + for batch_idx, batch in enumerate(topk_status_ids): + for idx, topk_status_idx in enumerate(batch): + topk_current_vs[batch_idx, idx] = current_vs[batch_idx, topk_status_idx] + prev_status[batch_idx, idx] = self.ongoing_beams[batch_idx, prev_status_ids[batch_idx, idx]] + + self.ongoing_beams = torch.cat([prev_status, topk_current_vs.unsqueeze(2)], dim=2) + self.cumulative_ps = topk_current_ps + + if torch.any(topk_current_vs == self.eos_id): + finished_ids = torch.where(topk_current_vs == self.eos_id) + num_successors = [1] * batch_size + + for (batch_idx, idx) in zip(*finished_ids): + self.finished[batch_idx].append(self.ongoing_beams[batch_idx, idx]) + self.finished_ps[batch_idx].append(self.cumulative_ps[batch_idx, idx]) + + if self.beam_size != 1: + eos_count = self._get_successor( + current_ps=current_ps, + current_vs=current_vs, + finished_ids=(batch_idx, idx), + num_successor=num_successors[batch_idx], + eos_count=1, + k=self.beam_size, + ) + num_successors[batch_idx] += eos_count + + input_var = self.ongoing_beams[:, :, -1] + input_var = input_var.view(batch_size * self.beam_size, -1) + + return self._get_hypothesis() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_rnn_transducer.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_rnn_transducer.py new file mode 100644 index 000000000..f752ee187 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_rnn_transducer.py @@ -0,0 +1,156 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +from openspeech.search.beam_search_base import OpenspeechBeamSearchBase +from openspeech.decoders import RNNTransducerDecoder + + +class BeamSearchRNNTransducer(OpenspeechBeamSearchBase): + r""" + RNN Transducer Beam Search + Reference: RNN-T FOR LATENCY CONTROLLED ASR WITH IMPROVED BEAM SEARCH (https://arxiv.org/pdf/1911.01629.pdf) + + Args: joint, decoder, beam_size, expand_beam, state_beam, blank_id + joint: joint `encoder_outputs` and `decoder_outputs` + decoder (TransformerTransducerDecoder): base decoder of transformer transducer model. + beam_size (int): size of beam. + expand_beam (int): The threshold coefficient to limit the number of expanded hypotheses. + state_beam (int): The threshold coefficient to decide if hyps in A (process_hyps) + is likely to compete with hyps in B (ongoing_beams) + blank_id (int): blank id + + Inputs: encoder_output, max_length + encoder_output (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + * predictions (torch.LongTensor): model predictions. + """ + def __init__( + self, + joint, + decoder: RNNTransducerDecoder, + beam_size: int = 3, + expand_beam: float = 2.3, + state_beam: float = 4.6, + blank_id: int = 3, + ) -> None: + super(BeamSearchRNNTransducer, self).__init__(decoder, beam_size) + self.joint = joint + self.expand_beam = expand_beam + self.state_beam = state_beam + self.blank_id = blank_id + + def forward(self, encoder_outputs: torch.Tensor, max_length: int): + r""" + Beam search decoding. + + Inputs: encoder_output, max_length + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + * predictions (torch.LongTensor): model predictions. + """ + hypothesis = list() + hypothesis_score = list() + + for batch_idx in range(encoder_outputs.size(0)): + blank = ( + torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.blank_id + ) + step_input = ( + torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.sos_id + ) + hyp = { + "prediction": [self.sos_id], + "logp_score": 0.0, + "hidden_states": None, + } + ongoing_beams = [hyp] + + for t_step in range(max_length): + process_hyps = ongoing_beams + ongoing_beams = list() + + while True: + if len(ongoing_beams) >= self.beam_size: + break + + a_best_hyp = max(process_hyps, key=lambda x: x["logp_score"] / len(x["prediction"])) + + if len(ongoing_beams) > 0: + b_best_hyp = max( + ongoing_beams, + key=lambda x: x["logp_score"] / len(x["prediction"]), + ) + + a_best_prob = a_best_hyp["logp_score"] + b_best_prob = b_best_hyp["logp_score"] + + if b_best_prob >= self.state_beam + a_best_prob: + break + + process_hyps.remove(a_best_hyp) + + step_input[0, 0] = a_best_hyp["prediction"][-1] + + step_outputs, hidden_states = self.decoder(step_input, a_best_hyp["hidden_states"]) + log_probs = self.joint(encoder_outputs[batch_idx, t_step, :], step_outputs.view(-1)) + + topk_targets, topk_idx = log_probs.topk(k=self.beam_size) + + if topk_idx[0] != blank: + best_logp = topk_targets[0] + else: + best_logp = topk_targets[1] + + for j in range(topk_targets.size(0)): + topk_hyp = { + "prediction": a_best_hyp["prediction"][:], + "logp_score": a_best_hyp["logp_score"] + topk_targets[j], + "hidden_states": a_best_hyp["hidden_states"], + } + + if topk_idx[j] == self.blank_id: + ongoing_beams.append(topk_hyp) + continue + + if topk_targets[j] >= best_logp - self.expand_beam: + topk_hyp["prediction"].append(topk_idx[j].item()) + topk_hyp["hidden_states"] = hidden_states + process_hyps.append(topk_hyp) + + ongoing_beams = sorted( + ongoing_beams, + key=lambda x: x["logp_score"] / len(x["prediction"]), + reverse=True, + )[0] + + hypothesis.append(torch.LongTensor(ongoing_beams["prediction"][1:])) + hypothesis_score.append(ongoing_beams["logp_score"] / len(ongoing_beams["prediction"])) + + return self._fill_sequence(hypothesis) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer.py new file mode 100644 index 000000000..33e12a784 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer.py @@ -0,0 +1,155 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +from openspeech.search.beam_search_base import OpenspeechBeamSearchBase +from openspeech.decoders import TransformerDecoder + + +class BeamSearchTransformer(OpenspeechBeamSearchBase): + r""" + Transformer Beam Search Decoder + + Args: decoder, beam_size, batch_size + decoder (DecoderLSTM): base decoder of lstm model. + beam_size (int): size of beam. + + Inputs: encoder_outputs, targets, encoder_output_lengths, teacher_forcing_ratio + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)`` + targets (torch.LongTensor): A target sequence passed to decoders. `IntTensor` of size ``(batch, seq_length)`` + encoder_output_lengths (torch.LongTensor): A encoder output lengths sequence. `LongTensor` of size ``(batch)`` + teacher_forcing_ratio (float): Ratio of teacher forcing. + + Returns: + * logits (torch.FloatTensor): Log probability of model predictions. + """ + def __init__(self, decoder: TransformerDecoder, beam_size: int = 3) -> None: + super(BeamSearchTransformer, self).__init__(decoder, beam_size) + self.use_cuda = True if torch.cuda.is_available() else False + + def forward( + self, + encoder_outputs: torch.FloatTensor, + encoder_output_lengths: torch.FloatTensor, + ): + batch_size = encoder_outputs.size(0) + + self.finished = [[] for _ in range(batch_size)] + self.finished_ps = [[] for _ in range(batch_size)] + + decoder_inputs = torch.IntTensor(batch_size, self.decoder.max_length).fill_(self.sos_id).long() + decoder_input_lengths = torch.IntTensor(batch_size).fill_(1) + + outputs = self.forward_step( + decoder_inputs=decoder_inputs[:, :1], + decoder_input_lengths=decoder_input_lengths, + encoder_outputs=encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + positional_encoding_length=1, + ) + step_outputs = self.decoder.fc(outputs).log_softmax(dim=-1) + self.cumulative_ps, self.ongoing_beams = step_outputs.topk(self.beam_size) + + self.ongoing_beams = self.ongoing_beams.view(batch_size * self.beam_size, 1) + self.cumulative_ps = self.cumulative_ps.view(batch_size * self.beam_size, 1) + + decoder_inputs = torch.IntTensor(batch_size * self.beam_size, 1).fill_(self.sos_id) + decoder_inputs = torch.cat((decoder_inputs, self.ongoing_beams), dim=-1) # bsz * beam x 2 + + encoder_dim = encoder_outputs.size(2) + encoder_outputs = self._inflate(encoder_outputs, self.beam_size, dim=0) + encoder_outputs = encoder_outputs.view(self.beam_size, batch_size, -1, encoder_dim) + encoder_outputs = encoder_outputs.transpose(0, 1) + encoder_outputs = encoder_outputs.reshape(batch_size * self.beam_size, -1, encoder_dim) + + encoder_output_lengths = encoder_output_lengths.unsqueeze(1).repeat(1, self.beam_size).view(-1) + + for di in range(2, self.decoder.max_length): + if self._is_all_finished(self.beam_size): + break + + decoder_input_lengths = torch.LongTensor(batch_size * self.beam_size).fill_(di) + + step_outputs = self.forward_step( + decoder_inputs=decoder_inputs[:, :di], + decoder_input_lengths=decoder_input_lengths, + encoder_outputs=encoder_outputs, + encoder_output_lengths=encoder_output_lengths, + positional_encoding_length=di, + ) + step_outputs = self.decoder.fc(step_outputs).log_softmax(dim=-1) + + step_outputs = step_outputs.view(batch_size, self.beam_size, -1, 10) + current_ps, current_vs = step_outputs.topk(self.beam_size) + + # TODO: Check transformer's beam search + current_ps = current_ps[:, :, -1, :] + current_vs = current_vs[:, :, -1, :] + + self.cumulative_ps = self.cumulative_ps.view(batch_size, self.beam_size) + self.ongoing_beams = self.ongoing_beams.view(batch_size, self.beam_size, -1) + + current_ps = (current_ps.permute(0, 2, 1) + self.cumulative_ps.unsqueeze(1)).permute(0, 2, 1) + current_ps = current_ps.view(batch_size, self.beam_size ** 2) + current_vs = current_vs.contiguous().view(batch_size, self.beam_size ** 2) + + self.cumulative_ps = self.cumulative_ps.view(batch_size, self.beam_size) + self.ongoing_beams = self.ongoing_beams.view(batch_size, self.beam_size, -1) + + topk_current_ps, topk_status_ids = current_ps.topk(self.beam_size) + prev_status_ids = (topk_status_ids // self.beam_size) + + topk_current_vs = torch.zeros((batch_size, self.beam_size), dtype=torch.long) + prev_status = torch.zeros(self.ongoing_beams.size(), dtype=torch.long) + + for batch_idx, batch in enumerate(topk_status_ids): + for idx, topk_status_idx in enumerate(batch): + topk_current_vs[batch_idx, idx] = current_vs[batch_idx, topk_status_idx] + prev_status[batch_idx, idx] = self.ongoing_beams[batch_idx, prev_status_ids[batch_idx, idx]] + + self.ongoing_beams = torch.cat([prev_status, topk_current_vs.unsqueeze(2)], dim=2) + self.cumulative_ps = topk_current_ps + + if torch.any(topk_current_vs == self.eos_id): + finished_ids = torch.where(topk_current_vs == self.eos_id) + num_successors = [1] * batch_size + + for (batch_idx, idx) in zip(*finished_ids): + self.finished[batch_idx].append(self.ongoing_beams[batch_idx, idx]) + self.finished_ps[batch_idx].append(self.cumulative_ps[batch_idx, idx]) + + if self.beam_size != 1: + eos_count = self._get_successor( + current_ps=current_ps, + current_vs=current_vs, + finished_ids=(batch_idx, idx), + num_successor=num_successors[batch_idx], + eos_count=1, + k=self.beam_size, + ) + num_successors[batch_idx] += eos_count + + ongoing_beams = self.ongoing_beams.clone().view(batch_size * self.beam_size, -1) + decoder_inputs = torch.cat((decoder_inputs, ongoing_beams[:, :-1]), dim=-1) + + return self._get_hypothesis() diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer_transducer.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer_transducer.py new file mode 100644 index 000000000..8dfc01048 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/beam_search_transformer_transducer.py @@ -0,0 +1,156 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +from openspeech.search.beam_search_base import OpenspeechBeamSearchBase +from openspeech.decoders import TransformerTransducerDecoder + + +class BeamSearchTransformerTransducer(OpenspeechBeamSearchBase): + r""" + Transformer Transducer Beam Search + Reference: RNN-T FOR LATENCY CONTROLLED ASR WITH IMPROVED BEAM SEARCH (https://arxiv.org/pdf/1911.01629.pdf) + + Args: joint, decoder, beam_size, expand_beam, state_beam, blank_id + joint: joint `encoder_outputs` and `decoder_outputs` + decoder (TransformerTransducerDecoder): base decoder of transformer transducer model. + beam_size (int): size of beam. + expand_beam (int): The threshold coefficient to limit the number + of expanded hypotheses that are added in A (process_hyp). + state_beam (int): The threshold coefficient in log space to decide if hyps in A (process_hyps) + is likely to compete with hyps in B (ongoing_beams) + blank_id (int): blank id + + Inputs: encoder_outputs, max_length + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + * predictions (torch.LongTensor): model predictions. + """ + def __init__( + self, + joint, + decoder: TransformerTransducerDecoder, + beam_size: int = 3, + expand_beam: float = 2.3, + state_beam: float = 4.6, + blank_id: int = 3, + ) -> None: + super(BeamSearchTransformerTransducer, self).__init__(decoder, beam_size) + self.joint = joint + self.forward_step = self.decoder.forward_step + self.expand_beam = expand_beam + self.state_beam = state_beam + self.blank_id = blank_id + + def forward(self, encoder_outputs: torch.Tensor, max_length: int): + r""" + Beam search decoding. + + Inputs: encoder_outputs, max_length + encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size + ``(batch, seq_length, dimension)`` + max_length (int): max decoding time step + + Returns: + * predictions (torch.LongTensor): model predictions. + """ + hypothesis = list() + hypothesis_score = list() + + for batch_idx in range(encoder_outputs.size(0)): + blank = ( + torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.blank_id + ) + step_input = ( + torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.sos_id + ) + hyp = { + "prediction": [self.sos_id], + "logp_score": 0.0, + } + ongoing_beams = [hyp] + + for t_step in range(max_length): + process_hyps = ongoing_beams + ongoing_beams = list() + + while True: + if len(ongoing_beams) >= self.beam_size: + break + + a_best_hyp = max(process_hyps, key=lambda x: x["logp_score"] / len(x["prediction"])) + + if len(ongoing_beams) > 0: + b_best_hyp = max( + ongoing_beams, + key=lambda x: x["logp_score"] / len(x["prediction"]), + ) + + a_best_prob = a_best_hyp["logp_score"] + b_best_prob = b_best_hyp["logp_score"] + + if b_best_prob >= self.state_beam + a_best_prob: + break + + process_hyps.remove(a_best_hyp) + + step_input[0, 0] = a_best_hyp["prediction"][-1] + step_lengths = encoder_outputs.new_tensor([0], dtype=torch.long) + + step_outputs = self.forward_step(step_input, step_lengths).squeeze(0).squeeze(0) + log_probs = self.joint(encoder_outputs[batch_idx, t_step, :], step_outputs) + + topk_targets, topk_idx = log_probs.topk(k=self.beam_size) + + if topk_idx[0] != blank: + best_logp = topk_targets[0] + else: + best_logp = topk_targets[1] + + for j in range(topk_targets.size(0)): + topk_hyp = { + "prediction": a_best_hyp["prediction"][:], + "logp_score": a_best_hyp["logp_score"] + topk_targets[j], + } + + if topk_idx[j] == self.blank_id: + ongoing_beams.append(topk_hyp) + continue + + if topk_targets[j] >= best_logp - self.expand_beam: + topk_hyp["prediction"].append(topk_idx[j].item()) + process_hyps.append(topk_hyp) + + ongoing_beams = sorted( + ongoing_beams, + key=lambda x: x["logp_score"] / len(x["prediction"]), + reverse=True, + )[0] + + hypothesis.append(torch.LongTensor(ongoing_beams["prediction"][1:])) + hypothesis_score.append(ongoing_beams["logp_score"] / len(ongoing_beams["prediction"])) + + return self._fill_sequence(hypothesis) \ No newline at end of file diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/search/ensemble_search.py b/audio/speech_recognition/conformer/pytorch/openspeech/search/ensemble_search.py new file mode 100644 index 000000000..5114c7207 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/search/ensemble_search.py @@ -0,0 +1,96 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from typing import Union + + +class EnsembleSearch(nn.Module): + """ + Class for ensemble search. + + Args: + models (tuple): list of ensemble model + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be + a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + * predictions (torch.LongTensor): prediction of ensemble models + """ + def __init__(self, models: Union[list, tuple]): + super(EnsembleSearch, self).__init__() + assert len(models) > 1, "Ensemble search should be multiple models." + self.models = models + + def forward(self, inputs: torch.FloatTensor, input_lengths: torch.LongTensor): + logits = list() + + for model in self.models: + output = model(inputs, input_lengths) + logits.append(output["logits"]) + + output = logits[0] + + for logit in logits[1:]: + output += logit + + return output.max(-1)[1] + + +class WeightedEnsembleSearch(nn.Module): + """ + Args: + models (tuple): list of ensemble model + weights (tuple: list of ensemble's weight + + Inputs: + - **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be + a padded `FloatTensor` of size ``(batch, seq_length, dimension)``. + - **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)`` + + Returns: + * predictions (torch.LongTensor): prediction of ensemble models + """ + def __init__(self, models: Union[list, tuple], weights: Union[list, tuple]): + super(WeightedEnsembleSearch, self).__init__() + assert len(models) > 1, "Ensemble search should be multiple models." + assert len(models) == len(weights), "len(models), len(weight) should be same." + self.models = models + self.weights = weights + + def forward(self, inputs: torch.FloatTensor, input_lengths: torch.LongTensor): + logits = list() + + for model in self.models: + output = model(inputs, input_lengths) + logits.append(output["logits"]) + + output = logits[0] * self.weights[0] + + for idx, logit in enumerate(logits[1:]): + output += logit * self.weights[1] + + return output.max(-1)[1] diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/__init__.py new file mode 100644 index 000000000..792899417 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/__init__.py @@ -0,0 +1,75 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import importlib + +TOKENIZER_REGISTRY = dict() +TOKENIZER_DATACLASS_REGISTRY = dict() + + +def register_tokenizer(name: str, dataclass=None): + """ + New vocab types can be added to OpenSpeech with the :func:`register_tokenizer` function decorator. + + For example:: + @register_tokenizer('kspon_character') + class KsponSpeechCharacterTokenizer: + (...) + + .. note:: All vocabs must implement the :class:`cls.__name__` interface. + + Args: + name (str): the name of the tokenizer + """ + + def register_tokenizer_cls(cls): + if name in TOKENIZER_REGISTRY: + raise ValueError(f"Cannot register duplicate tokenizer ({name})") + + TOKENIZER_REGISTRY[name] = cls + + cls.__dataclass = dataclass + if dataclass is not None: + if name in TOKENIZER_DATACLASS_REGISTRY: + raise ValueError(f"Cannot register duplicate tokenizer ({name})") + TOKENIZER_DATACLASS_REGISTRY[name] = dataclass + + return cls + + return register_tokenizer_cls + + +tokenizer_dir = os.path.dirname(__file__) +for file in os.listdir(tokenizer_dir): + if os.path.isdir(os.path.join(tokenizer_dir, file)) and file != '__pycache__': + for subfile in os.listdir(os.path.join(tokenizer_dir, file)): + path = os.path.join(tokenizer_dir, file, subfile) + if subfile.endswith(".py"): + tokenizer_name = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile + module = importlib.import_module(f"openspeech.tokenizers.{file}.{tokenizer_name}") + continue + + path = os.path.join(tokenizer_dir, file) + if file.endswith(".py"): + vocab_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module(f"openspeech.tokenizers.{vocab_name}") diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/__init__.py new file mode 100644 index 000000000..2a79fa44c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/character.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/character.py new file mode 100644 index 000000000..ebcfb9830 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/aishell/character.py @@ -0,0 +1,138 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import csv +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class AIShellCharacterTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="aishell_character", metadata={"help": "Unit of vocabulary."} + ) + vocab_path: str = field( + default="../../../data_aishell/aishell_labels.csv", metadata={"help": "Path of vocabulary file."} + ) + + +@register_tokenizer("aishell_character", dataclass=AIShellCharacterTokenizerConfigs) +class AIShellCharacterTokenizer(Tokenizer): + r""" + Tokenizer class in Character-units for AISHELL. + + Args: + configs (DictConfig): configuration set. + """ + + def __init__(self, configs): + super(AIShellCharacterTokenizer, self).__init__() + self.vocab_dict, self.id_dict = self.load_vocab( + vocab_path=configs.tokenizer.vocab_path, + encoding=configs.tokenizer.encoding, + ) + self.labels = self.vocab_dict.keys() + self.sos_token = configs.tokenizer.sos_token + self.eos_token = configs.tokenizer.eos_token + self.pad_token = configs.tokenizer.pad_token + self.sos_id = int(self.vocab_dict[configs.tokenizer.sos_token]) + self.eos_id = int(self.vocab_dict[configs.tokenizer.eos_token]) + self.pad_id = int(self.vocab_dict[configs.tokenizer.pad_token]) + self.blank_id = int(self.vocab_dict[configs.tokenizer.blank_token]) + self.vocab_path = configs.tokenizer.vocab_path + + def __len__(self): + return len(self.labels) + + def decode(self, labels): + r""" + Converts label to string. + + Args: + labels (numpy.ndarray): number label + + Returns: sentence + - **sentence** (str or list): symbol of labels + """ + if len(labels.shape) == 1: + sentence = str() + for label in labels: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + return sentence + + sentences = list() + for batch in labels: + sentence = str() + for label in batch: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + sentences.append(sentence) + return sentences + + def encode(self, sentence): + label = str() + + for ch in sentence: + try: + label += (str(self.vocab_dict[ch]) + ' ') + except KeyError: + continue + + return label[:-1] + + def load_vocab(self, vocab_path, encoding='utf-8'): + r""" + Provides char2id, id2char + + Args: + vocab_path (str): csv file with character labels + encoding (str): encoding method + + Returns: unit2id, id2unit + - **unit2id** (dict): unit2id[unit] = id + - **id2unit** (dict): id2unit[id] = unit + """ + unit2id = dict() + id2unit = dict() + + try: + with open(vocab_path, 'r', encoding=encoding) as f: + labels = csv.reader(f, delimiter=',') + next(labels) + + for row in labels: + unit2id[row[1]] = row[0] + id2unit[int(row[0])] = row[1] + + return unit2id, id2unit + except IOError: + raise IOError("Character label file (csv format) doesn`t exist : {0}".format(vocab_path)) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/__init__.py new file mode 100644 index 000000000..2a79fa44c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/character.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/character.py new file mode 100644 index 000000000..935ce99cf --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/character.py @@ -0,0 +1,134 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import csv +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class KsponSpeechCharacterTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="kspon_character", metadata={"help": "Unit of vocabulary."} + ) + vocab_path: str = field( + default="../../../aihub_labels.csv", metadata={"help": "Path of vocabulary file."} + ) + + +@register_tokenizer("kspon_character", dataclass=KsponSpeechCharacterTokenizerConfigs) +class KsponSpeechCharacterTokenizer(Tokenizer): + r""" + Tokenizer class in Character-units for KsponSpeech. + + Args: + configs (DictConfig): configuration set. + """ + def __init__(self, configs): + super(KsponSpeechCharacterTokenizer, self).__init__() + self.vocab_dict, self.id_dict = self.load_vocab( + vocab_path=configs.tokenizer.vocab_path, + encoding=configs.tokenizer.encoding, + ) + self.labels = self.vocab_dict.keys() + self.sos_id = int(self.vocab_dict[configs.tokenizer.sos_token]) + self.eos_id = int(self.vocab_dict[configs.tokenizer.eos_token]) + self.pad_id = int(self.vocab_dict[configs.tokenizer.pad_token]) + self.blank_id = int(self.vocab_dict[configs.tokenizer.blank_token]) + self.vocab_path = configs.tokenizer.vocab_path + + def __len__(self): + return len(self.labels) + + def decode(self, labels): + r""" + Converts label to string (number => Hangeul) + + Args: + labels (numpy.ndarray): number label + + Returns: sentence + - **sentence** (str or list): symbol of labels + """ + if len(labels.shape) == 1: + sentence = str() + for label in labels: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + return sentence + + sentences = list() + for batch in labels: + sentence = str() + for label in batch: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + sentences.append(sentence) + return sentences + + def encode(self, sentence): + label = str() + + for ch in sentence: + try: + label += (str(self.vocab_dict[ch]) + ' ') + except KeyError: + continue + + return label[:-1] + + def load_vocab(self, vocab_path, encoding='utf-8'): + r""" + Provides char2id, id2char + + Args: + vocab_path (str): csv file with character labels + encoding (str): encoding method + + Returns: unit2id, id2unit + - **unit2id** (dict): unit2id[unit] = id + - **id2unit** (dict): id2unit[id] = unit + """ + unit2id = dict() + id2unit = dict() + + try: + with open(vocab_path, 'r', encoding=encoding) as f: + labels = csv.reader(f, delimiter=',') + next(labels) + + for row in labels: + unit2id[row[1]] = row[0] + id2unit[int(row[0])] = row[1] + + return unit2id, id2unit + except IOError: + raise IOError("Character label file (csv format) doesn`t exist : {0}".format(vocab_path)) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/grapheme.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/grapheme.py new file mode 100644 index 000000000..72245e302 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/grapheme.py @@ -0,0 +1,134 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import csv +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class KsponSpeechGraphemeTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="kspon_grapheme", metadata={"help": "Unit of vocabulary."} + ) + vocab_path: str = field( + default="../../../aihub_labels.csv", metadata={"help": "Path of vocabulary file."} + ) + + +@register_tokenizer("kspon_grapheme", dataclass=KsponSpeechGraphemeTokenizerConfigs) +class KsponSpeechGraphemeTokenizer(Tokenizer): + """ + Tokenizer class in Grapheme-units for KsponSpeech. + + Args: + configs (DictConfig): configuration set. + """ + def __init__(self, configs): + super(KsponSpeechGraphemeTokenizer, self).__init__() + self.vocab_dict, self.id_dict = self.load_vocab( + vocab_path=configs.tokenizer.vocab_path, + encoding=configs.tokenizer.encoding, + ) + self.labels = self.vocab_dict.keys() + self.sos_id = int(self.vocab_dict[configs.tokenizer.sos_token]) + self.eos_id = int(self.vocab_dict[configs.tokenizer.eos_token]) + self.pad_id = int(self.vocab_dict[configs.tokenizer.pad_token]) + self.blank_id = int(self.vocab_dict[configs.tokenizer.blank_token]) + self.vocab_path = configs.tokenizer.vocab_path + + def __len__(self): + return len(self.vocab_dict) + + def decode(self, labels): + """ + Converts label to string (number => Hangeul) + + Args: + labels (numpy.ndarray): number label + + Returns: sentence + - **sentence** (str or list): symbol of labels + """ + if len(labels.shape) == 1: + sentence = str() + for label in labels: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + return sentence + + sentences = list() + for batch in labels: + sentence = str() + for label in batch: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + sentences.append(sentence) + return sentences + + def encode(self, sentence): + label = str() + + for ch in sentence: + try: + label += (str(self.vocab_dict[ch]) + ' ') + except KeyError: + continue + + return label[:-1] + + def load_vocab(self, vocab_path, encoding='utf-8'): + """ + Provides char2id, id2char + + Args: + vocab_path (str): csv file with character labels + encoding (str): encoding method + + Returns: unit2id, id2unit + - **unit2id** (dict): unit2id[unit] = id + - **id2unit** (dict): id2unit[id] = unit + """ + unit2id = dict() + id2unit = dict() + + try: + with open(vocab_path, 'r', encoding=encoding) as f: + labels = csv.reader(f, delimiter=',') + next(labels) + + for row in labels: + unit2id[row[1]] = row[0] + id2unit[int(row[0])] = row[1] + + return unit2id, id2unit + except IOError: + raise IOError("Character label file (csv format) doesn`t exist : {0}".format(vocab_path)) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/subword.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/subword.py new file mode 100644 index 000000000..8b56dfe6d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/ksponspeech/subword.py @@ -0,0 +1,100 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sentencepiece as spm +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class KsponSpeechSubwordTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="kspon_subword", metadata={"help": "Unit of vocabulary."} + ) + sp_model_path: str = field( + default="sp.model", metadata={"help": "Path of sentencepiece model."} + ) + sos_token: str = field( + default="", metadata={"help": "Start of sentence token"} + ) + eos_token: str = field( + default="", metadata={"help": "End of sentence token"} + ) + vocab_size: int = field( + default=3200, metadata={"help": "Size of vocabulary."} + ) + + +@register_tokenizer("kspon_subword", dataclass=KsponSpeechSubwordTokenizerConfigs) +class KsponSpeechSubwordTokenizer(Tokenizer): + """ + Tokenizer class in Subword-units for KsponSpeech. + + Args: + configs (DictConfig): configuration set. + """ + def __init__(self, configs): + super(KsponSpeechSubwordTokenizer, self).__init__() + self.sp = spm.SentencePieceProcessor() + self.sp.Load(configs.tokenizer.sp_model_path) + + self.vocab_dict = [[self.sp.id_to_piece(id), id] for id in range(self.sp.get_piece_size())] + self.labels = [item[0] for item in self.vocab_dict] + + self.pad_id = self.sp.PieceToId(configs.tokenizer.pad_token) + self.sos_id = self.sp.PieceToId(configs.tokenizer.sos_token) + self.eos_id = self.sp.PieceToId(configs.tokenizer.eos_token) + self.blank_id = self.sp.PieceToId(configs.tokenizer.blank_token) + self.vocab_size = configs.tokenizer.vocab_size + + def __len__(self): + return self.vocab_size + + def decode(self, labels): + """ + Converts label to string (number => Hangeul) + + Args: + labels (numpy.ndarray): number label + + Returns: sentence + - **sentence** (str or list): symbol of labels + """ + if len(labels.shape) == 1: + return self.sp.DecodeIds([int(l) for l in labels]) + + sentences = list() + for batch in labels: + sentence = str() + for label in batch: + sentence = self.sp.DecodeIds([int(l) for l in label]) + sentences.append(sentence) + return sentences + + def encode(self, sentence): + text = " ".join(self.sp.EncodeAsPieces(sentence)) + label = " ".join([str(self.sp.PieceToId(token)) for token in text]) + return label + diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/__init__.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/__init__.py new file mode 100644 index 000000000..2a79fa44c --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/character.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/character.py new file mode 100644 index 000000000..1ec98d7b6 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/character.py @@ -0,0 +1,135 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import csv +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class LibriSpeechCharacterTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="libri_character", metadata={"help": "Unit of vocabulary."} + ) + vocab_path: str = field( + default="../../../LibriSpeech/libri_labels.csv", metadata={"help": "Path of vocabulary file."} + ) + + +@register_tokenizer("libri_character", dataclass=LibriSpeechCharacterTokenizerConfigs) +class LibriSpeechCharacterTokenizer(Tokenizer): + r""" + Tokenizer class in Character-units for LibriSpeech. + + Args: + configs (DictConfig): configuration set. + """ + def __init__(self, configs): + super(LibriSpeechCharacterTokenizer, self).__init__() + self.vocab_dict, self.id_dict = self.load_vocab( + vocab_path=configs.tokenizer.vocab_path, + encoding=configs.tokenizer.encoding, + ) + self.labels = self.vocab_dict.keys() + self.sos_id = int(self.vocab_dict[configs.tokenizer.sos_token]) + self.eos_id = int(self.vocab_dict[configs.tokenizer.eos_token]) + self.pad_id = int(self.vocab_dict[configs.tokenizer.pad_token]) + self.blank_id = int(self.vocab_dict[configs.tokenizer.blank_token]) + self.vocab_path = configs.tokenizer.vocab_path + + def __len__(self): + return len(self.labels) + + def decode(self, labels): + r""" + Converts label to string (number => Hangeul) + + Args: + labels (numpy.ndarray): number label + + Returns: sentence + - **sentence** (str or list): symbol of labels + """ + if len(labels.shape) == 1: + sentence = str() + for label in labels: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + return sentence + + sentences = list() + for batch in labels: + sentence = str() + for label in batch: + if label.item() == self.eos_id: + break + elif label.item() == self.blank_id: + continue + sentence += self.id_dict[label.item()] + sentences.append(sentence) + return sentences + + def encode(self, sentence): + label = str() + + for ch in sentence: + try: + label += (str(self.vocab_dict[ch]) + ' ') + except KeyError: + continue + + return label[:-1] + + def load_vocab(self, vocab_path, encoding='utf-8'): + r""" + Provides char2id, id2char + + Args: + vocab_path (str): csv file with character labels + encoding (str): encoding method + + Returns: unit2id, id2unit + - **unit2id** (dict): unit2id[unit] = id + - **id2unit** (dict): id2unit[id] = unit + """ + unit2id = dict() + id2unit = dict() + + try: + with open(vocab_path, 'r', encoding=encoding) as f: + labels = csv.reader(f, delimiter=',') + next(labels) + + for row in labels: + unit2id[row[1]] = row[0] + id2unit[int(row[0])] = row[1] + + return unit2id, id2unit + except IOError: + raise IOError("Character label file (csv format) doesn`t exist : {0}".format(vocab_path)) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/subword.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/subword.py new file mode 100644 index 000000000..be9280886 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/librispeech/subword.py @@ -0,0 +1,95 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +from dataclasses import dataclass, field + +from openspeech.dataclass.configurations import TokenizerConfigs +from openspeech.datasets.librispeech.preprocess.subword import SENTENCEPIECE_MODEL_NAME +from openspeech.utils import SENTENCEPIECE_IMPORT_ERROR +from openspeech.tokenizers import register_tokenizer +from openspeech.tokenizers.tokenizer import Tokenizer + + +@dataclass +class LibriSpeechSubwordTokenizerConfigs(TokenizerConfigs): + unit: str = field( + default="libri_subword", metadata={"help": "Unit of vocabulary."} + ) + sos_token: str = field( + default="", metadata={"help": "Start of sentence token"} + ) + eos_token: str = field( + default="", metadata={"help": "End of sentence token"} + ) + vocab_size: int = field( + default=5000, metadata={"help": "Size of vocabulary."} + ) + vocab_path: str = field( + default="../../../LibriSpeech/", metadata={"help": "Path of vocabulary file."} + ) + + +@register_tokenizer("libri_subword", dataclass=LibriSpeechSubwordTokenizerConfigs) +class LibriSpeechSubwordTokenizer(Tokenizer): + """ + Tokenizer class in Subword-units for LibriSpeech. + + Args: + configs (DictConfig): configuration set. + """ + def __init__(self, configs): + super(LibriSpeechSubwordTokenizer, self).__init__() + try: + import sentencepiece as spm + except ImportError: + raise ImportError(SENTENCEPIECE_IMPORT_ERROR) + + self.sp = spm.SentencePieceProcessor() + self.sp.Load(os.path.join(configs.tokenizer.vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model")) + self.pad_id = self.sp.PieceToId(configs.tokenizer.pad_token) + self.sos_id = self.sp.PieceToId(configs.tokenizer.sos_token) + self.eos_id = self.sp.PieceToId(configs.tokenizer.eos_token) + self.blank_id = self.sp.PieceToId(configs.tokenizer.blank_token) + self.vocab_size = configs.tokenizer.vocab_size + + def __len__(self): + return self.vocab_size + + def decode(self, labels): + if len(labels.shape) == 1: + return self.sp.DecodeIds([l.item() for l in labels]) + + elif len(labels.shape) == 2: + sentences = list() + + for label in labels: + sentence = self.sp.DecodeIds([l.item() for l in label]) + sentences.append(sentence) + return sentences + else: + raise ValueError("Unsupported label's shape") + + def encode(self, sentence): + text = " ".join(self.sp.EncodeAsPieces(sentence)) + label = " ".join([str(self.sp.PieceToId(token)) for token in text]) + return label diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/tokenizer.py b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/tokenizer.py new file mode 100644 index 000000000..76f5e1307 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/tokenizers/tokenizer.py @@ -0,0 +1,44 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +class Tokenizer(object): + r""" + A tokenizer is in charge of preparing the inputs for a model. + + Note: + Do not use this class directly, use one of the sub classes. + """ + def __init__(self, *args, **kwargs): + self.sos_id = None + self.eos_id = None + self.pad_id = None + self.blank_id = None + + def decode(self, labels): + raise NotImplementedError + + def encode(self, labels): + raise NotImplementedError + + def __call__(self, sentence): + return self.encode(sentence) diff --git a/audio/speech_recognition/conformer/pytorch/openspeech/utils.py b/audio/speech_recognition/conformer/pytorch/openspeech/utils.py new file mode 100644 index 000000000..7dd7d713d --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/openspeech/utils.py @@ -0,0 +1,211 @@ +# MIT License +# +# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import logging +import torch +import platform +import importlib +from collections import OrderedDict +from typing import Tuple, Union, Iterable + + +PYTORCH_IMPORT_ERROR = """ +Openspeech requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +""" + +TORCHAUDIO_IMPORT_ERROR = """ +Openspeech requires the torchaudio library but it was not found in your environment. You can install it with pip: +`pip install torchaudio` +""" + +LIBROSA_IMPORT_ERROR = """ +Openspeech requires the librosa library but it was not found in your environment. You can install it with pip: +`pip install librosa` +""" + +SENTENCEPIECE_IMPORT_ERROR = """ +Openspeech requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones +that match your environment. +""" + +WARPRNNT_IMPORT_ERROR = """ +Openspeech requires the warp-rnnt library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/1ytic/warp-rnnt and follow the ones that match your environment. +""" + +CTCDECODE_IMPORT_ERROR = """ +Openspeech requires the ctcdecode library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/parlance/ctcdecode and follow the ones that match your environment. +""" + +try: + import librosa +except ImportError: + raise ValueError(LIBROSA_IMPORT_ERROR) + +DUMMY_SIGNALS, _ = librosa.load(librosa.ex('choice')) +DUMMY_FEATURES = librosa.feature.melspectrogram(y=DUMMY_SIGNALS, n_mels=80) +DUMMY_INPUTS = torch.FloatTensor(DUMMY_FEATURES).transpose(0, 1).unsqueeze(0).expand(3, -1, -1) +DUMMY_INPUT_LENGTHS = torch.IntTensor([1070, 900, 800]) +DUMMY_TARGETS = torch.LongTensor([ + [2, 3, 3, 3, 3, 3, 2, 2, 1, 0], + [2, 3, 3, 3, 3, 3, 2, 1, 2, 0], + [2, 3, 3, 3, 3, 3, 2, 2, 0, 1], +]) +DUMMY_TARGET_LENGTHS = torch.IntTensor([9, 8, 7]) +DUMMY_TRANSCRIPTS = "OPENSPEECH IS AWESOME" + +DUMMY_LM_INPUTS = torch.LongTensor([ + [2, 3, 3, 3, 3, 3, 2, 2, 0], + [2, 3, 3, 3, 3, 3, 2, 3, 2], + [2, 3, 3, 3, 3, 3, 2, 2, 0], +]) +DYMMY_LM_INPUT_LENGTHS = torch.IntTensor([9, 8, 7]) +DUMMY_LM_TARGETS = torch.LongTensor([ + [3, 3, 3, 3, 3, 2, 2, 1, 0], + [3, 3, 3, 3, 3, 2, 1, 2, 0], + [3, 3, 3, 3, 3, 2, 2, 0, 1], +]) + + +def is_pytorch_available(): + return importlib.util.find_spec("torch") is not None + + +def is_librosa_available(): + return importlib.util.find_spec("librosa") is not None + + +def is_apex_available(): + return importlib.util.find_spec("apex") is not None + + +def is_sentencepiece_available(): + return importlib.util.find_spec("sentencepiece") is not None + + +def is_torchaudio_available(): + return importlib.util.find_spec("torchaudio") is not None + + +BACKENDS_MAPPING = OrderedDict( + [ + ("torch", (is_pytorch_available, PYTORCH_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("torchaudio", (is_torchaudio_available, TORCHAUDIO_IMPORT_ERROR)), + ] +) + + +def check_backends(): + backends = BACKENDS_MAPPING.keys() + + if not all(BACKENDS_MAPPING[backend][0]() for backend in backends): + raise ImportError("".join([BACKENDS_MAPPING[backend][1] for backend in backends])) + + +def get_class_name(obj): + return obj.__class__.__name__ + + +def _check_environment(use_cuda: bool, logger) -> int: + r""" + Check execution envirionment. + OS, Processor, CUDA version, Pytorch version, ... etc. + """ + check_backends() + + cuda = use_cuda and torch.cuda.is_available() + device = torch.device('cuda' if cuda else 'cpu') + + logger.info(f"Operating System : {platform.system()} {platform.release()}") + logger.info(f"Processor : {platform.processor()}") + + num_devices = torch.cuda.device_count() + + if str(device) == 'cuda': + for idx in range(torch.cuda.device_count()): + logger.info(f"device : {torch.cuda.get_device_name(idx)}") + logger.info(f"CUDA is available : {torch.cuda.is_available()}") + logger.info(f"CUDA version : {torch.version.cuda}") + logger.info(f"PyTorch version : {torch.__version__}") + + else: + logger.info(f"CUDA is available : {torch.cuda.is_available()}") + logger.info(f"PyTorch version : {torch.__version__}") + + return num_devices + + +class DotDict(dict): + """dot.notation access to dictionary attributes""" + __getattr__ = dict.get + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + +def build_dummy_configs( + model_configs=None, + vocab_configs=None, + criterion_configs=None, + scheduler_configs=None, + trainer_configs=None, + audio_configs=None, +): + from openspeech.models import ConformerConfigs + from openspeech.criterion import CrossEntropyLossConfigs + from openspeech.tokenizers.ksponspeech.character import KsponSpeechCharacterTokenizerConfigs + from openspeech.data.audio.melspectrogram.melspectrogram import MelSpectrogramConfigs + from openspeech.dataclass import GPUTrainerConfigs + from openspeech.optim.scheduler.warmup_reduce_lr_on_plateau_scheduler import WarmupReduceLROnPlateauConfigs + + if model_configs is None: + model_configs = ConformerConfigs() + + if vocab_configs is None: + vocab_configs = KsponSpeechCharacterTokenizerConfigs() + vocab_configs.vocab_path = "labels.csv" + + if criterion_configs is None: + criterion_configs = CrossEntropyLossConfigs + + if trainer_configs is None: + trainer_configs = GPUTrainerConfigs() + + if scheduler_configs is None: + scheduler_configs = WarmupReduceLROnPlateauConfigs() + + if audio_configs is None: + audio_configs = MelSpectrogramConfigs() + + return DotDict({ + 'model': model_configs, + 'vocab': vocab_configs, + 'criterion': criterion_configs, + 'trainer': trainer_configs, + 'audio': audio_configs, + 'lr_scheduler': scheduler_configs, + }) + diff --git a/audio/speech_recognition/conformer/pytorch/requirements.txt b/audio/speech_recognition/conformer/pytorch/requirements.txt new file mode 100644 index 000000000..159e7b7fc --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/requirements.txt @@ -0,0 +1,6 @@ +addict +python-Levenshtein +sentencepiece +librosa +tqdm +#torchaudio==0.8.1 diff --git a/audio/speech_recognition/conformer/pytorch/run_training.sh b/audio/speech_recognition/conformer/pytorch/run_training.sh new file mode 100644 index 000000000..b246a032e --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/run_training.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +get_lscpu_value() { + awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}" +} +lscpu_out=$(lscpu) + +n_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}") +n_cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}") + +echo "Number of CPU sockets on a node: ${n_sockets}" +echo "Number of CPU cores per socket: ${n_cores_per_socket}" + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +python3 -u -m bind_pyt \ + --nsockets_per_node ${n_sockets} \ + --ncores_per_socket ${n_cores_per_socket} \ + --no_hyperthreads \ + --no_membind "$@" train.py; check_status + +exit ${EXIT_STATUS} diff --git a/audio/speech_recognition/conformer/pytorch/test/test_build_conformer.py b/audio/speech_recognition/conformer/pytorch/test/test_build_conformer.py new file mode 100644 index 000000000..504b02144 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/test/test_build_conformer.py @@ -0,0 +1,32 @@ +from addict import Dict + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + return super(ConfigDict, self).__getattr__(name) + + +from openspeech.tokenizers import TOKENIZER_REGISTRY +from openspeech.models import MODEL_REGISTRY + + +# PYTHONPATH=$PYTHONPATH:./ python test/test_build_conformer.py +if __name__ == '__main__': + config_file = 'configs/conformer_lstm.json' + + import json + + with open(config_file) as f: + configs = json.load(f) + + configs = ConfigDict(configs) + print(configs.model.model_name) + + tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs) + + model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer) + model.build_model() + print("model:", model) diff --git a/audio/speech_recognition/conformer/pytorch/test/test_dataloader.py b/audio/speech_recognition/conformer/pytorch/test/test_dataloader.py new file mode 100644 index 000000000..47dbbddc0 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/test/test_dataloader.py @@ -0,0 +1,43 @@ +from addict import Dict + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + return super(ConfigDict, self).__getattr__(name) + + +from openspeech.tokenizers import TOKENIZER_REGISTRY +from openspeech.datasets import DATA_MODULE_REGISTRY + + +# PYTHONPATH=$PYTHONPATH:./ python test/test_dataloader.py +if __name__ == '__main__': + config_file = 'configs/conformer_lstm.json' + + import json + + with open(config_file) as f: + configs = json.load(f) + + configs = ConfigDict(configs) + print(configs.model.model_name) + + data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs) + data_module.prepare_data() + tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs) + + data_module.setup(tokenizer=tokenizer) + + train_dataloader = data_module.train_dataloader() + print(f'iters_per_epoch: {len(train_dataloader)}') + steps = 0 + for batch_data in train_dataloader: + inputs, targets, input_lengths, target_lengths = batch_data + print(f'inputs: {inputs.size()} input_lengths: {input_lengths.size()} ' + f'targets: {targets.size()} target_lengths: {target_lengths.size()}') + steps += 1 + if steps > 10: + break diff --git a/audio/speech_recognition/conformer/pytorch/train.py b/audio/speech_recognition/conformer/pytorch/train.py new file mode 100644 index 000000000..e1b0f9fb5 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/train.py @@ -0,0 +1,320 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +from collections import OrderedDict +import numpy as np +import os +import sys +import time +from tqdm import trange + +import torch +from torch.cuda.amp import GradScaler +from torch.cuda.amp import autocast +from torch.nn.parallel import DistributedDataParallel as DDP + +from openspeech.tokenizers import TOKENIZER_REGISTRY +from openspeech.datasets import DATA_MODULE_REGISTRY +from openspeech.models import MODEL_REGISTRY + +from dataloader import create_dataloader, WorkerInitializer +from utils import ConfigDict, dist, manual_seed, TrainingLogger, TrainingState +import math + +def parse_args(): + import argparse + parser = argparse.ArgumentParser( + description="This is an example for using TrainingLogger.") + parser.add_argument("--config_file", type=str, default="configs/conformer_lstm.json") + parser.add_argument("--data_dir", type=str, default="data") + parser.add_argument("--max_steps", type=int, default=10000, + help="Total number of training steps to perform.") + parser.add_argument("--max_epochs", type=int, default=None, + help="Total number of training epochs to perform.") + parser.add_argument("--batch_size", type=int, default=8, + help="Total batch size for training.") + parser.add_argument("--quality_metric", type=str, default="wer") + parser.add_argument("--quality_target", type=float, default=0.99) + parser.add_argument("--quality_judgement", type=str, default='<=') + parser.add_argument("--num_train_samples", type=int, default=None, + help="Number of train samples to run train on.") + parser.add_argument("--num_eval_samples", type=int, default=None, + help="Number of eval samples to run eval on.") + parser.add_argument("--eval_batch_size", type=int, default=None, + help="Total batch size for evaluation.") + parser.add_argument("--eval_freq", type=int, default=1000, + help="Evaluate every eval_freq steps during training.") + parser.add_argument("--log_freq", type=int, default=10, + help="Frequency of logging training state.") + parser.add_argument('--seed', type=int, default=1234, + help="Random seed for initialization") + parser.add_argument("--amp", action="store_true", + help="Use mixed accuracy training.") + parser.add_argument("--ddp", action="store_true", + help="Use distributed training.") + parser.add_argument("--local_rank", "--local-rank", type=int, default=-1, + help="Local rank for distributed training on gpus.") + parser.add_argument("--ddp_type", default='native', type=str) + parser.add_argument("--dist_backend", type=str, default="nccl", + help="Communication backend for distributed training on gpus.") + + args = parser.parse_args() + return args + + +def h2d_tensors(tensors, device): + return [t.to(device) for t in tensors] + + +class StrFormatter(): + fmt_dict = dict( + stage=' [{0: <12}]', + progress=' [{0: <19}]', + metrics=' [{0: <15}]', + perf=' [{0: <12}]', + default=' {0: <10}' + ) + + def __call__(self, key, str_msg): + if key not in self.fmt_dict: + key = 'default' + return self.fmt_dict[key].format(str_msg) + + +def main(args): + import json + + with open(args.config_file) as f: + configs = json.load(f) + + configs = ConfigDict(configs) + configs.dataset.dataset_path = os.path.join( + args.data_dir, configs.dataset.dataset_path) + configs.dataset.train_manifest_file = os.path.join( + args.data_dir, configs.dataset.train_manifest_file) + configs.dataset.eval_manifest_file = os.path.join( + args.data_dir, configs.dataset.eval_manifest_file) + configs.tokenizer.vocab_path = os.path.join( + args.data_dir, configs.tokenizer.vocab_path) + + logger = TrainingLogger( + flush_freq=1, + json_flush_freq=10, + filepath='training_log.json', + str_formatter=StrFormatter() + ) + + args.device, args.num_gpus = dist.init_dist_training_env(args) + + worker_seeds, shuffling_seeds = dist.setup_seeds( + args.seed, 1, args.device) + if args.ddp: + worker_seed = worker_seeds[torch.distributed.get_rank()] + else: + worker_seed = worker_seeds[0] + manual_seed(worker_seed) + worker_init = WorkerInitializer.default(worker_seed) + + tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs) + + model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer) + model.build_model() + optimizer, lr_scheduler = model.configure_optimizers() + if args.amp: + scaler = GradScaler() + + data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs) + if dist.is_main_process(): + data_module.prepare_data() + dist.barrier() + data_module_kwargs = dict(tokenizer=tokenizer) + if args.num_train_samples is not None: + data_module_kwargs['num_train_samples'] = args.num_train_samples + if args.num_eval_samples is not None: + data_module_kwargs['num_eval_samples'] = args.num_eval_samples + data_module.setup(**data_module_kwargs) + + dist.barrier() + + if args.num_train_samples is None: + args.num_train_samples = len(data_module.dataset['train']) + if args.num_eval_samples is None: + args.num_eval_samples = len(data_module.dataset['val']) + if args.eval_batch_size is None: + args.eval_batch_size = args.batch_size + + train_dataloader = create_dataloader( + data_module.dataset['train'], + batch_size=args.batch_size, + sampler_type='Distributed' if args.ddp else 'Random', + worker_init_fn=worker_init) + + val_dataloader = create_dataloader( + data_module.dataset['val'], + batch_size=args.eval_batch_size, + sampler_type='Distributed' if args.ddp else 'Sequential', + worker_init_fn=worker_init) + + if args.max_steps is not None and args.max_steps > 0: + max_epochs = args.max_steps // len(train_dataloader) + args.max_epochs = max_epochs + 1 if (args.max_steps % len(train_dataloader)) \ + else max_epochs + else: + assert(args.max_epochs > 0) + args.max_steps = args.max_epochs * len(train_dataloader) + + args.num_eval_steps = len(val_dataloader) + + training_state = TrainingState( + max_steps=args.max_steps, + quality_target=args.quality_target, + quality_judgement=args.quality_judgement) + + model.to(args.device) + + if args.ddp: + model = DDP(model, device_ids=[args.local_rank]) + training_step = model.module.training_step + validation_step = model.module.validation_step + else: + training_step = model.training_step + validation_step = model.validation_step + + model.train() + + dist.barrier() + train_start_time = time.perf_counter() + train_time = 0 + start_time = train_start_time + for epoch in range(1, args.max_epochs+1): + + if args.ddp: + train_dataloader.sampler.set_epoch(epoch) + + train_data_iterator = iter(train_dataloader) + for step in range(1, len(train_dataloader)+1): + if training_state.end_training(): + break + + batch_data = next(train_data_iterator) + training_state.global_step += 1 + + if args.amp: + with autocast(): + batch_outputs = training_step( + batch=h2d_tensors(batch_data, args.device), batch_idx=step) + else: + batch_outputs = training_step( + batch=h2d_tensors(batch_data, args.device), batch_idx=step) + + optimizer.zero_grad() + if args.amp: + scaler.scale(batch_outputs['loss']).backward() + scaler.step(optimizer) + scaler.update() + else: + batch_outputs['loss'].backward() + optimizer.step() + loss_value = batch_outputs['loss'] + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + lr_scheduler.step() + + training_state.num_trained_samples += args.batch_size * args.num_gpus + + if training_state.global_step % args.log_freq == 0: + dist.barrier() + elapse = time.perf_counter() - start_time + train_time += elapse + if dist.is_main_process(): + logger.log( + OrderedDict( + stage='train', + progress=OrderedDict( + epoch=epoch, + step=step), + metrics=OrderedDict( + loss=float(batch_outputs['loss'].detach().cpu()), + wer=batch_outputs['wer'], + cer=batch_outputs['cer']), + perf=OrderedDict( + tps=training_state.num_trained_samples / train_time) + ) + ) + start_time = time.perf_counter() + + + if training_state.global_step % args.eval_freq == 0: + with torch.no_grad(): + model.eval() + val_data_iterator = iter(val_dataloader) + metric_list = [] + dist.barrier() + eval_start_time = time.perf_counter() + for eval_step in trange(1, args.num_eval_steps+1): + batch_data = next(val_data_iterator) + batch_outputs = validation_step( + batch=h2d_tensors(batch_data, args.device), batch_idx=eval_step) + metric_list.append(batch_outputs[args.quality_metric]) + + dist.barrier() + eval_duration = time.perf_counter() - eval_start_time + metric_value = np.mean(metric_list) + if args.ddp: + metric_value = np.mean(dist.all_gather(metric_value)) + dist.barrier() + if dist.is_main_process(): + logger.log( + OrderedDict( + stage='val', + progress=OrderedDict( + epoch=epoch, + step=args.num_eval_steps), + metrics={ + args.quality_metric: metric_value}, + perf=OrderedDict( + tps=args.num_eval_samples / eval_duration) + ) + ) + if training_state.meet_quality_target(metric_value): + training_state.status = training_state.Status.success + break + + model.train() + start_time = time.perf_counter() + + if training_state.end_training(): + break + + dist.barrier() + raw_train_time = time.perf_counter() - train_start_time + final_state = OrderedDict( + global_step=training_state.global_step, + raw_train_time=raw_train_time, + tps=training_state.num_trained_samples / raw_train_time, + status={ + training_state.Status.success: 'success', + training_state.Status.aborted: 'aborted' + }[training_state.status]) + final_state[args.quality_metric] = metric_value + + if dist.is_main_process(): + logger.log(final_state) + sys.exit(training_state.status) + + +if __name__ == '__main__': + args = parse_args() + try: + from dltest import show_training_arguments + show_training_arguments(args) + except: + pass + main(args) diff --git a/audio/speech_recognition/conformer/pytorch/utils/__init__.py b/audio/speech_recognition/conformer/pytorch/utils/__init__.py new file mode 100644 index 000000000..b198bd010 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/utils/__init__.py @@ -0,0 +1,24 @@ +import random + +import numpy as np + +from .config import * +from .dist import * +from .logger import * +from .misc import * + + +def manual_seed(seed, deterministic=False): + random.seed(seed) + np.random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + else: + torch.backends.cudnn.deterministic = False + torch.backends.cudnn.benchmark = True diff --git a/audio/speech_recognition/conformer/pytorch/utils/config.py b/audio/speech_recognition/conformer/pytorch/utils/config.py new file mode 100644 index 000000000..94ecabc74 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/utils/config.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +from addict import Dict + + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + return super(ConfigDict, self).__getattr__(name) diff --git a/audio/speech_recognition/conformer/pytorch/utils/dist.py b/audio/speech_recognition/conformer/pytorch/utils/dist.py new file mode 100644 index 000000000..b7ae91e11 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/utils/dist.py @@ -0,0 +1,203 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from contextlib import contextmanager +import logging.config +import os +import random + +import torch + + +def generate_seeds(rng, size): + """ + Generate list of random seeds + + :param rng: random number generator + :param size: length of the returned list + """ + seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)] + return seeds + + +def broadcast_seeds(seeds, device): + """ + Broadcasts random seeds to all distributed workers. + Returns list of random seeds (broadcasted from workers with rank 0). + + :param seeds: list of seeds (integers) + :param device: torch.device + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + seeds_tensor = torch.LongTensor(seeds).to(device) + torch.distributed.broadcast(seeds_tensor, 0) + seeds = seeds_tensor.tolist() + return seeds + + +def setup_seeds(master_seed, epochs, device): + """ + Generates seeds from one master_seed. + Function returns (worker_seeds, shuffling_seeds), worker_seeds are later + used to initialize per-worker random number generators (mostly for + dropouts), shuffling_seeds are for RNGs resposible for reshuffling the + dataset before each epoch. + Seeds are generated on worker with rank 0 and broadcasted to all other + workers. + + :param master_seed: master RNG seed used to initialize other generators + :param epochs: number of epochs + :param device: torch.device (used for distributed.broadcast) + """ + if master_seed is None: + # random master seed, random.SystemRandom() uses /dev/urandom on Unix + master_seed = random.SystemRandom().randint(0, 2**32 - 1) + if get_rank() == 0: + # master seed is reported only from rank=0 worker, it's to avoid + # confusion, seeds from rank=0 are later broadcasted to other + # workers + logging.info(f'Using random master seed: {master_seed}') + else: + # master seed was specified from command line + logging.info(f'Using master seed from command line: {master_seed}') + + # initialize seeding RNG + seeding_rng = random.Random(master_seed) + + # generate worker seeds, one seed for every distributed worker + worker_seeds = generate_seeds(seeding_rng, get_world_size()) + + # generate seeds for data shuffling, one seed for every epoch + shuffling_seeds = generate_seeds(seeding_rng, epochs) + + # broadcast seeds from rank=0 to other workers + worker_seeds = broadcast_seeds(worker_seeds, device) + shuffling_seeds = broadcast_seeds(shuffling_seeds, device) + return worker_seeds, shuffling_seeds + + +def barrier(): + """ + Works as a temporary distributed barrier, currently pytorch + doesn't implement barrier for NCCL backend. + Calls all_reduce on dummy tensor and synchronizes with GPU. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + torch.distributed.all_reduce(torch.cuda.FloatTensor(1)) + torch.cuda.synchronize() + + +def get_rank(default=0): + """ + Gets distributed rank or returns zero if distributed is not initialized. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + rank = torch.distributed.get_rank() + else: + rank = default + return rank + + +def get_world_size(): + """ + Gets total number of distributed workers or returns one if distributed is + not initialized. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + world_size = torch.distributed.get_world_size() + else: + world_size = 1 + return world_size + + +def main_proc_print(*args, **kwargs): + if is_main_process(): + print(*args, **kwargs) + + +def set_device(cuda, local_rank): + """ + Sets device based on local_rank and returns instance of torch.device. + + :param cuda: if True: use cuda + :param local_rank: local rank of the worker + """ + if cuda: + torch.cuda.set_device(local_rank) + device = torch.device('cuda') + else: + device = torch.device('cpu') + return device + + +def init_dist_training_env(config): + if config.local_rank == -1: + device = torch.device("cuda") + num_gpus = torch.cuda.device_count() + else: + torch.cuda.set_device(config.local_rank) + device = torch.device("cuda", config.local_rank) + host_addr_full = 'tcp://' + os.environ["MASTER_ADDR"] + ':' + os.environ["MASTER_PORT"] + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + + dist_backend = config.dist_backend + DIST_BACKEND_ENV = "PT_DIST_BACKEND" + if DIST_BACKEND_ENV in os.environ: + print("WARN: Use the distributed backend of the environment.") + dist_backend = os.environ[DIST_BACKEND_ENV] + + torch.distributed.init_process_group(backend=dist_backend, init_method=host_addr_full, rank=rank, world_size=world_size) + num_gpus = torch.distributed.get_world_size() + + return device, num_gpus + + +def global_batch_size(config): + return config.train_batch_size * config.n_gpu + + +@contextmanager +def sync_workers(): + """ + Yields distributed rank and synchronizes all workers on exit. + """ + rank = get_rank() + yield rank + barrier() + + +def is_main_process(): + if torch.distributed.is_initialized(): + if "LOCAL_RANK" in os.environ: + return int(os.environ["LOCAL_RANK"]) == 0 + else: + return get_rank() == 0 + + return True + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + data_list = [None] * world_size + torch.distributed.all_gather_object(data_list, data) + return data_list diff --git a/audio/speech_recognition/conformer/pytorch/utils/logger.py b/audio/speech_recognition/conformer/pytorch/utils/logger.py new file mode 100644 index 000000000..70b07aae5 --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/utils/logger.py @@ -0,0 +1 @@ +from ixpylogger import TrainingLogger, TrainingState diff --git a/audio/speech_recognition/conformer/pytorch/utils/misc.py b/audio/speech_recognition/conformer/pytorch/utils/misc.py new file mode 100644 index 000000000..dcfa4894a --- /dev/null +++ b/audio/speech_recognition/conformer/pytorch/utils/misc.py @@ -0,0 +1,20 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +import os +import sys +import errno + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/tests/executables/conformer/init_torch.sh b/tests/executables/conformer/init_torch.sh new file mode 100644 index 000000000..627d953a3 --- /dev/null +++ b/tests/executables/conformer/init_torch.sh @@ -0,0 +1,50 @@ +#!/bin/bash +ROOT_DIR="$(cd "$(dirname "$0")/../.."; pwd)" +SRC_DIR=$ROOT_DIR/audio/speech_recognition/conformer/pytorch +DATA_DIR=$ROOT_DIR/data + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + $prefix_sudo apt install -y numactl + $prefix_sudo apt install -y libsndfile1 +elif [[ ${ID} == "centos" ]]; then + $prefix_sudo yum install -y numactl + $prefix_sudo yum install -y libsndfile-devel +else + echo "Unable to determine OS, assumed to be similar to CentOS" + $prefix_sudo yum install -y numactl + $prefix_sudo yum install -y libsndfile-devel +fi + +pip3 list | grep "torchaudio" || \ + pip3 install torchaudio==0.8.1 + +pip3 install --no-index --find-links=$DATA_DIR/packages IXPyLogger==1.0.0 +pip3 install -r $SRC_DIR/requirements.txt --cache-dir=$DATA_DIR/packages +pip3 install numpy==1.26.4 +if [ ! -f "${HOME}/.cache/librosa/admiralbob77_-_Choice_-_Drum-bass.ogg" ]; then + wget https://librosa.org/data/audio/admiralbob77_-_Choice_-_Drum-bass.ogg + mkdir -p ~/.cache/librosa/ + mv admiralbob77_-_Choice_-_Drum-bass.ogg ~/.cache/librosa/ +fi + +DATASET_DIR=$DATA_DIR/datasets/LibriSpeech + +cd $DATASET_DIR +GZS=`find . -type f -name '*.gz'` +for path in $GZS; do + cd ${path%/*} + tar zxf ${path##*/} +done + +mv $DATASET_DIR/LibriSpeech/* $DATASET_DIR/ diff --git a/tests/executables/conformer/train_conformer_librispeech_dist_1x8_torch.sh b/tests/executables/conformer/train_conformer_librispeech_dist_1x8_torch.sh new file mode 100644 index 000000000..d9500486f --- /dev/null +++ b/tests/executables/conformer/train_conformer_librispeech_dist_1x8_torch.sh @@ -0,0 +1,31 @@ +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=8} + +ROOT_DIR="$(cd "$(dirname "$0")/../.."; pwd)" +SRC_DIR=$ROOT_DIR/audio/speech_recognition/conformer/pytorch +DATA_DIR=$ROOT_DIR/data +export DRT_MEMCPYUSEKERNEL=20000000000 + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +source ../_utils/fix_import_sklearn_error_libgomp_d22c30c5.sh + +cd $SRC_DIR +bash run_training.sh --data_dir=$DATA_DIR \ + --max_steps=800 \ + --quality_target=1.6 \ + --batch_size=${BATCH_SIZE} \ + --eval_freq=400 \ + --ddp \ + --max_steps=800 \ + --quality_target=1.6 \ + --eval_freq=400; check_status + +exit ${EXIT_STATUS} \ No newline at end of file diff --git a/tests/executables/ssd/init_torch.sh b/tests/executables/ssd/init_torch.sh new file mode 100644 index 000000000..5065ae4b7 --- /dev/null +++ b/tests/executables/ssd/init_torch.sh @@ -0,0 +1,63 @@ +#!/bin/bash +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + + +: ${CXX:="g++"} +export CXX + +source $(cd `dirname $0`; pwd)/../_utils/which_install_tool.sh + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +if command_exists apt; then + $prefix_sudo apt install -y git numactl +elif command_exists dnf; then + $prefix_sudo dnf install -y git numactl +else + $prefix_sudo yum install -y git numactl +fi +sys_name_str=`uname -a` +if [[ "${sys_name_str}" =~ "aarch64" ]]; then + pip3 install "git+https://github.com/mlperf/logging.git@1.0-branch" pybind11 ujson +else + pip3 install "git+https://github.com/mlperf/logging.git@1.0-branch" pybind11==2.9.2 ujson==1.35 +fi + +pip3 list | grep -w "wheel" || pip3 install wheel +# pip3 list | grep -w "numpy" | grep -w "1.23.5" || pip3 install numpy==1.23.5 +pip3 install numpy>=1.26.4 +pip3 install cython +# pip3 install "git+https://github.com/NVIDIA/cocoapi.git@v0.6.0#subdirectory=PythonAPI" +pip3 install pycocotools==2.0.8 + +CUR_PATH=$(cd `dirname $0`; pwd) +DATA_PATH=$CUR_PATH/../../data/datasets/coco2017/ + +if [[ "$(uname -m)" == "aarch64" ]]; then + source /opt/rh/gcc-toolset-11/enable +fi + +cd ../../research/cv/detection/ssd && bash ./clean_ssd.sh && bash ./build_ssd.sh && bash ./install_ssd.sh "$@"; check_status +DATA_PATH_BBOX=../../../.. + +python3 prepare-json.py --keep-keys ${DATA_PATH}/annotations/instances_val2017.json ${DATA_PATH_BBOX}/bbox_only_instances_val2017.json "$@"; check_status +python3 prepare-json.py ${DATA_PATH}/annotations/instances_train2017.json ${DATA_PATH_BBOX}/bbox_only_instances_train2017.json "$@"; check_status + + +cd - +#echo "init finished!" +exit ${EXIT_STATUS} diff --git a/tests/executables/ssd/train_ssd_amp_torch.sh b/tests/executables/ssd/train_ssd_amp_torch.sh new file mode 100644 index 000000000..6d348a988 --- /dev/null +++ b/tests/executables/ssd/train_ssd_amp_torch.sh @@ -0,0 +1,25 @@ + +COCO_PATH="`pwd`/../../data/datasets/coco2017" + +: ${BATCH_SIZE:=160} + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd ../../research/cv/detection/ssd + +echo "python3 train.py --no-dali --dali-cache 0 --data=${COCO_PATH} \ +--batch-size=${BATCH_SIZE} --warmup-factor=0 --warmup=650 --lr=2.92e-3 --threshold=0.08 --epochs 5 --eval-batch-size=160 \ +--wd=1.6e-4 --use-fp16 --delay-allreduce --lr-decay-factor=0.2 --lr-decay-epochs 34 45 --opt-level O2 --seed 1769250163" + +python3 train.py --dali --dali-cache 0 --data=${COCO_PATH} \ +--batch-size=${BATCH_SIZE} --warmup-factor=0 --warmup=650 --lr=2.92e-3 --threshold=0.08 --epochs 5 --eval-batch-size=160 \ +--wd=1.6e-4 --use-fp16 --jit --nhwc --pad-input --delay-allreduce --lr-decay-factor=0.2 --lr-decay-epochs 34 45 --opt-level O2 --seed 1769250163 "$@"; check_status + +cd - +exit ${EXIT_STATUS} -- Gitee From 13249707f0bd25b6ed4ffd210a4ac9282029ee6d Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 14:01:31 +0800 Subject: [PATCH 11/18] sync resnet dali --- tests/executables/dali/init_torch.sh | 1 + .../dali/train_resnet50_dali_torch.sh | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 tests/executables/dali/init_torch.sh create mode 100644 tests/executables/dali/train_resnet50_dali_torch.sh diff --git a/tests/executables/dali/init_torch.sh b/tests/executables/dali/init_torch.sh new file mode 100644 index 000000000..01a0f2d5e --- /dev/null +++ b/tests/executables/dali/init_torch.sh @@ -0,0 +1 @@ +bash ../_utils/init_classification_torch.sh ../_utils \ No newline at end of file diff --git a/tests/executables/dali/train_resnet50_dali_torch.sh b/tests/executables/dali/train_resnet50_dali_torch.sh new file mode 100644 index 000000000..6c814901c --- /dev/null +++ b/tests/executables/dali/train_resnet50_dali_torch.sh @@ -0,0 +1,20 @@ +source ../_utils/global_environment_variables.sh +source ../_utils/get_num_devices.sh + +: ${BATCH_SIZE:=256} + +OUTPUT_DIR=${PROJECT_DIR}/output/resnet/$0 +if [[ -d ${OUTPUT_DIR} ]]; then + mkdir -p ${OUTPUT_DIR} +fi + + +ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 8 --run_script \ +python3 ${PROJECT_DIR}/cv/classification/resnet50/pytorch/train.py \ +--data-path ${PROJECT_DIR}/data/datasets/imagenette \ +--batch-size ${BATCH_SIZE} \ +--output-dir ${OUTPUT_DIR} \ +"$@" ;check_status + +rm -fr ${OUTPUT_DIR} +exit ${EXIT_STATUS} \ No newline at end of file -- Gitee From d59d84fa89f7b26583e43793561f60dc956c314e Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 14:11:03 +0800 Subject: [PATCH 12/18] sync bert tf --- tests/executables/bert/init_tf.sh | 29 +++++++++++ .../train_bert_pretraining_amp_dist_1x8_tf.sh | 48 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 tests/executables/bert/init_tf.sh create mode 100644 tests/executables/bert/train_bert_pretraining_amp_dist_1x8_tf.sh diff --git a/tests/executables/bert/init_tf.sh b/tests/executables/bert/init_tf.sh new file mode 100644 index 000000000..14d072411 --- /dev/null +++ b/tests/executables/bert/init_tf.sh @@ -0,0 +1,29 @@ +#/bin/bash +source $(cd `dirname $0`; pwd)/../_utils/which_install_tool.sh + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +if command_exists apt; then + $prefix_sudo apt install -y git numactl +elif command_exists dnf; then + $prefix_sudo dnf install -y git numactl +else + $prefix_sudo yum install -y git numactl +fi +if [ "$(ulimit -n)" -lt "1048576" ]; then + ulimit -n 1048576 +fi +pip3 uninstall -y protobuf +pip3 install "protobuf<4.0.0" +pip3 install git+https://github.com/mlperf/logging.git +pip3 install git+https://github.com/NVIDIA/dllogger.git +pip3 install pandas==1.3.5 +pip3 install numpy==1.26.4 diff --git a/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_tf.sh b/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_tf.sh new file mode 100644 index 000000000..f909c31d7 --- /dev/null +++ b/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_tf.sh @@ -0,0 +1,48 @@ +source ../_utils/global_environment_variables.sh + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +source ../_utils/get_num_devices.sh + +# export LD_PRELOAD=/usr/local/lib/libmpi.so:${LD_PRELOAD} + +: ${BATCH_SIZE:=6} + +sys_name_str=`uname -a` +if [[ "${sys_name_str}" =~ "aarch64" ]]; then + export TF_FORCE_SINGLE_THREAD=1 +fi + +# if [ "${CI}" == "true" ]; then +# if [ ! -d "/usr/local/lib/openmpi" ]; then +# echo "Not found /usr/local/lib/openmpi, Installing mpi ......" +# install-mpi +# fi +# export HOROVOD_RUN_ARGS=" " +# export LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib/openmpi:${LD_LIBRARY_PATH} +# fi + +set -euox pipefail + +current_path=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +ROOT_DIR=${current_path}"/../../" +SRC_DIR=${ROOT_DIR}nlp/language_model/bert/tensorflow/base +DATA_DIR=${ROOT_DIR}data/datasets +MODEL_DIR=${ROOT_DIR}data/model_zoo + +nonstrict_mode_args="" +if [ "${RUN_MODE}" != "strict" ]; then + nonstrict_mode_args="--stop_threshold 0.6" +fi + +cd $SRC_DIR +bash init.sh +bash run_multi_card_FPS.sh \ + --input_files_dir=${DATA_DIR}/bert_pretrain_tf_records/train_data \ + --init_checkpoint=${MODEL_DIR}/bert_pretrain_tf_ckpt/model.ckpt-28252 \ + --eval_files_dir=${DATA_DIR}/bert_pretrain_tf_records/eval_data \ + --train_batch_size=${BATCH_SIZE} \ + --bert_config_file=${MODEL_DIR}/bert_pretrain_tf_ckpt/bert_config.json \ + --display_loss_steps=10 ${nonstrict_mode_args} \ + "$@" + +exit $? -- Gitee From 1bc0de80681d7ce1a6d502f0d2e02f44a1062d02 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 14:41:21 +0800 Subject: [PATCH 13/18] sync stable-diffusion all --- .../pytorch/.gitignore | 178 + .../pytorch/LICENSE | 201 + .../pytorch/MANIFEST.in | 3 + .../pytorch/README.md | 45 + .../pytorch/build_diffusers.sh | 22 + .../pytorch/clean_diffusers.sh | 12 + .../pytorch/examples/text_to_image/README.md | 326 ++ .../examples/text_to_image/README_sdxl.md | 286 ++ .../text_to_image/default_config.yaml | 20 + .../examples/text_to_image/requirements.txt | 6 + .../text_to_image/requirements_flax.txt | 9 + .../text_to_image/requirements_sdxl.txt | 8 + .../examples/text_to_image/single_config.yaml | 20 + .../text_to_image/test_text_to_image.py | 365 ++ .../text_to_image/test_text_to_image_lora.py | 300 ++ .../text_to_image/train_text_to_image.py | 1137 +++++ .../text_to_image/train_text_to_image_flax.py | 620 +++ .../text_to_image/train_text_to_image_lora.py | 976 +++++ .../train_text_to_image_lora_sdxl.py | 1317 ++++++ .../text_to_image/train_text_to_image_sdxl.py | 1374 ++++++ .../examples/text_to_image/zero2_config.yaml | 23 + .../pytorch/install_diffusers.sh | 33 + .../pytorch/run_sd_1.5.sh | 32 + .../pytorch/run_sd_1.5_single.sh | 32 + .../pytorch/run_sd_2.1.sh | 32 + .../pytorch/run_sd_2.1_single.sh | 32 + .../pytorch/run_sd_xl.sh | 37 + .../pytorch/setup.py | 304 ++ .../pytorch/src/diffusers/__init__.py | 787 ++++ .../src/diffusers/commands/__init__.py | 27 + .../src/diffusers/commands/diffusers_cli.py | 43 + .../pytorch/src/diffusers/commands/env.py | 84 + .../diffusers/commands/fp16_safetensors.py | 132 + .../src/diffusers/configuration_utils.py | 703 ++++ .../diffusers/dependency_versions_check.py | 34 + .../diffusers/dependency_versions_table.py | 45 + .../src/diffusers/experimental/README.md | 5 + .../src/diffusers/experimental/__init__.py | 1 + .../src/diffusers/experimental/rl/__init__.py | 1 + .../experimental/rl/value_guided_sampling.py | 153 + .../pytorch/src/diffusers/image_processor.py | 990 +++++ .../pytorch/src/diffusers/loaders/__init__.py | 88 + .../src/diffusers/loaders/autoencoder.py | 146 + .../src/diffusers/loaders/controlnet.py | 136 + .../src/diffusers/loaders/ip_adapter.py | 281 ++ .../pytorch/src/diffusers/loaders/lora.py | 1349 ++++++ .../loaders/lora_conversion_utils.py | 284 ++ .../pytorch/src/diffusers/loaders/peft.py | 186 + .../src/diffusers/loaders/single_file.py | 318 ++ .../diffusers/loaders/single_file_utils.py | 1617 +++++++ .../diffusers/loaders/textual_inversion.py | 562 +++ .../pytorch/src/diffusers/loaders/unet.py | 1003 +++++ .../pytorch/src/diffusers/loaders/utils.py | 59 + .../pytorch/src/diffusers/models/README.md | 3 + .../pytorch/src/diffusers/models/__init__.py | 103 + .../src/diffusers/models/activations.py | 145 + .../pytorch/src/diffusers/models/adapter.py | 584 +++ .../pytorch/src/diffusers/models/attention.py | 681 +++ .../src/diffusers/models/attention_flax.py | 494 +++ .../diffusers/models/attention_processor.py | 2507 +++++++++++ .../diffusers/models/autoencoders/__init__.py | 5 + .../autoencoders/autoencoder_asym_kl.py | 186 + .../models/autoencoders/autoencoder_kl.py | 489 +++ .../autoencoder_kl_temporal_decoder.py | 399 ++ .../models/autoencoders/autoencoder_tiny.py | 347 ++ .../autoencoders/consistency_decoder_vae.py | 435 ++ .../src/diffusers/models/autoencoders/vae.py | 992 +++++ .../src/diffusers/models/controlnet.py | 868 ++++ .../src/diffusers/models/controlnet_flax.py | 395 ++ .../src/diffusers/models/downsampling.py | 334 ++ .../diffusers/models/dual_transformer_2d.py | 20 + .../src/diffusers/models/embeddings.py | 914 ++++ .../src/diffusers/models/embeddings_flax.py | 97 + .../pytorch/src/diffusers/models/lora.py | 457 ++ .../models/modeling_flax_pytorch_utils.py | 134 + .../diffusers/models/modeling_flax_utils.py | 566 +++ .../src/diffusers/models/modeling_outputs.py | 17 + .../models/modeling_pytorch_flax_utils.py | 161 + .../src/diffusers/models/modeling_utils.py | 1021 +++++ .../diffusers/models/nhwc_groupnorm/Welford.h | 94 + .../models/nhwc_groupnorm/__init__.py | 0 .../models/nhwc_groupnorm/custom_gn.cpp | 90 + .../models/nhwc_groupnorm/custom_gn.py | 367 ++ .../models/nhwc_groupnorm/gn_kernel.cu | 1051 +++++ .../models/nhwc_groupnorm/gn_kernel.h | 39 + .../models/nhwc_groupnorm/nchw_kernel.cu | 994 +++++ .../diffusers/models/nhwc_groupnorm/vecs.h | 35 + .../src/diffusers/models/normalization.py | 254 ++ .../src/diffusers/models/prior_transformer.py | 12 + .../pytorch/src/diffusers/models/resnet.py | 814 ++++ .../src/diffusers/models/resnet_flax.py | 124 + .../diffusers/models/t5_film_transformer.py | 70 + .../src/diffusers/models/transformer_2d.py | 25 + .../diffusers/models/transformer_temporal.py | 34 + .../diffusers/models/transformers/__init__.py | 9 + .../transformers/dual_transformer_2d.py | 155 + .../models/transformers/prior_transformer.py | 380 ++ .../transformers/t5_film_transformer.py | 438 ++ .../models/transformers/transformer_2d.py | 460 ++ .../transformers/transformer_temporal.py | 379 ++ .../pytorch/src/diffusers/models/unet_1d.py | 26 + .../src/diffusers/models/unet_1d_blocks.py | 203 + .../pytorch/src/diffusers/models/unet_2d.py | 27 + .../src/diffusers/models/unet_2d_blocks.py | 375 ++ .../src/diffusers/models/unet_2d_condition.py | 25 + .../src/diffusers/models/unets/__init__.py | 18 + .../src/diffusers/models/unets/unet_1d.py | 255 ++ .../diffusers/models/unets/unet_1d_blocks.py | 702 ++++ .../src/diffusers/models/unets/unet_2d.py | 346 ++ .../diffusers/models/unets/unet_2d_blocks.py | 3731 +++++++++++++++++ .../models/unets/unet_2d_blocks_flax.py | 400 ++ .../models/unets/unet_2d_condition.py | 1319 ++++++ .../models/unets/unet_2d_condition_flax.py | 453 ++ .../diffusers/models/unets/unet_3d_blocks.py | 2405 +++++++++++ .../models/unets/unet_3d_condition.py | 753 ++++ .../diffusers/models/unets/unet_i2vgen_xl.py | 724 ++++ .../diffusers/models/unets/unet_kandinsky3.py | 535 +++ .../models/unets/unet_motion_model.py | 948 +++++ .../unets/unet_spatio_temporal_condition.py | 489 +++ .../models/unets/unet_stable_cascade.py | 610 +++ .../src/diffusers/models/unets/uvit_2d.py | 470 +++ .../src/diffusers/models/upsampling.py | 448 ++ .../pytorch/src/diffusers/models/vae_flax.py | 876 ++++ .../pytorch/src/diffusers/models/vq_model.py | 181 + .../pytorch/src/diffusers/optimization.py | 361 ++ .../pytorch/src/diffusers/pipelines/README.md | 171 + .../src/diffusers/pipelines/__init__.py | 581 +++ .../diffusers/pipelines/amused/__init__.py | 62 + .../pipelines/amused/pipeline_amused.py | 328 ++ .../amused/pipeline_amused_img2img.py | 347 ++ .../amused/pipeline_amused_inpaint.py | 378 ++ .../pipelines/animatediff/__init__.py | 49 + .../animatediff/pipeline_animatediff.py | 847 ++++ .../pipeline_animatediff_video2video.py | 997 +++++ .../pipelines/animatediff/pipeline_output.py | 23 + .../diffusers/pipelines/audioldm/__init__.py | 51 + .../pipelines/audioldm/pipeline_audioldm.py | 546 +++ .../diffusers/pipelines/audioldm2/__init__.py | 50 + .../pipelines/audioldm2/modeling_audioldm2.py | 1511 +++++++ .../pipelines/audioldm2/pipeline_audioldm2.py | 980 +++++ .../src/diffusers/pipelines/auto_pipeline.py | 987 +++++ .../pipelines/blip_diffusion/__init__.py | 20 + .../blip_diffusion/blip_image_processing.py | 318 ++ .../blip_diffusion/modeling_blip2.py | 642 +++ .../blip_diffusion/modeling_ctx_clip.py | 223 + .../blip_diffusion/pipeline_blip_diffusion.py | 348 ++ .../pipelines/consistency_models/__init__.py | 24 + .../pipeline_consistency_models.py | 275 ++ .../pipelines/controlnet/__init__.py | 80 + .../pipelines/controlnet/multicontrolnet.py | 187 + .../controlnet/pipeline_controlnet.py | 1318 ++++++ .../pipeline_controlnet_blip_diffusion.py | 413 ++ .../controlnet/pipeline_controlnet_img2img.py | 1310 ++++++ .../controlnet/pipeline_controlnet_inpaint.py | 1620 +++++++ .../pipeline_controlnet_inpaint_sd_xl.py | 1818 ++++++++ .../controlnet/pipeline_controlnet_sd_xl.py | 1499 +++++++ .../pipeline_controlnet_sd_xl_img2img.py | 1626 +++++++ .../controlnet/pipeline_flax_controlnet.py | 532 +++ .../pipelines/dance_diffusion/__init__.py | 18 + .../pipeline_dance_diffusion.py | 156 + .../src/diffusers/pipelines/ddim/__init__.py | 18 + .../diffusers/pipelines/ddim/pipeline_ddim.py | 154 + .../src/diffusers/pipelines/ddpm/__init__.py | 22 + .../diffusers/pipelines/ddpm/pipeline_ddpm.py | 127 + .../pipelines/deepfloyd_if/__init__.py | 85 + .../pipelines/deepfloyd_if/pipeline_if.py | 788 ++++ .../deepfloyd_if/pipeline_if_img2img.py | 910 ++++ .../pipeline_if_img2img_superresolution.py | 1029 +++++ .../deepfloyd_if/pipeline_if_inpainting.py | 1030 +++++ .../pipeline_if_inpainting_superresolution.py | 1137 +++++ .../pipeline_if_superresolution.py | 885 ++++ .../pipelines/deepfloyd_if/pipeline_output.py | 28 + .../pipelines/deepfloyd_if/safety_checker.py | 59 + .../pipelines/deepfloyd_if/timesteps.py | 579 +++ .../pipelines/deepfloyd_if/watermark.py | 46 + .../diffusers/pipelines/deprecated/README.md | 3 + .../pipelines/deprecated/__init__.py | 153 + .../deprecated/alt_diffusion/__init__.py | 53 + .../alt_diffusion/modeling_roberta_series.py | 124 + .../alt_diffusion/pipeline_alt_diffusion.py | 946 +++++ .../pipeline_alt_diffusion_img2img.py | 1018 +++++ .../alt_diffusion/pipeline_output.py | 28 + .../deprecated/audio_diffusion/__init__.py | 23 + .../deprecated/audio_diffusion/mel.py | 179 + .../pipeline_audio_diffusion.py | 329 ++ .../latent_diffusion_uncond/__init__.py | 18 + .../pipeline_latent_diffusion_uncond.py | 130 + .../pipelines/deprecated/pndm/__init__.py | 18 + .../deprecated/pndm/pipeline_pndm.py | 121 + .../pipelines/deprecated/repaint/__init__.py | 19 + .../deprecated/repaint/pipeline_repaint.py | 230 + .../deprecated/score_sde_ve/__init__.py | 19 + .../score_sde_ve/pipeline_score_sde_ve.py | 109 + .../spectrogram_diffusion/__init__.py | 75 + .../continuous_encoder.py | 92 + .../spectrogram_diffusion/midi_utils.py | 667 +++ .../spectrogram_diffusion/notes_encoder.py | 86 + .../pipeline_spectrogram_diffusion.py | 269 ++ .../stable_diffusion_variants/__init__.py | 55 + .../pipeline_cycle_diffusion.py | 948 +++++ ...ne_onnx_stable_diffusion_inpaint_legacy.py | 542 +++ ...ipeline_stable_diffusion_inpaint_legacy.py | 786 ++++ ...pipeline_stable_diffusion_model_editing.py | 824 ++++ .../pipeline_stable_diffusion_paradigms.py | 786 ++++ .../pipeline_stable_diffusion_pix2pix_zero.py | 1304 ++++++ .../stochastic_karras_ve/__init__.py | 19 + .../pipeline_stochastic_karras_ve.py | 128 + .../versatile_diffusion/__init__.py | 71 + .../versatile_diffusion/modeling_text_unet.py | 2508 +++++++++++ .../pipeline_versatile_diffusion.py | 421 ++ ...ipeline_versatile_diffusion_dual_guided.py | 556 +++ ...ine_versatile_diffusion_image_variation.py | 397 ++ ...eline_versatile_diffusion_text_to_image.py | 475 +++ .../deprecated/vq_diffusion/__init__.py | 57 + .../vq_diffusion/pipeline_vq_diffusion.py | 325 ++ .../src/diffusers/pipelines/dit/__init__.py | 19 + .../diffusers/pipelines/dit/pipeline_dit.py | 233 + .../diffusers/pipelines/free_init_utils.py | 184 + .../diffusers/pipelines/i2vgen_xl/__init__.py | 46 + .../pipelines/i2vgen_xl/pipeline_i2vgen_xl.py | 798 ++++ .../diffusers/pipelines/kandinsky/__init__.py | 66 + .../pipelines/kandinsky/pipeline_kandinsky.py | 407 ++ .../kandinsky/pipeline_kandinsky_combined.py | 814 ++++ .../kandinsky/pipeline_kandinsky_img2img.py | 500 +++ .../kandinsky/pipeline_kandinsky_inpaint.py | 635 +++ .../kandinsky/pipeline_kandinsky_prior.py | 547 +++ .../pipelines/kandinsky/text_encoder.py | 27 + .../pipelines/kandinsky2_2/__init__.py | 70 + .../kandinsky2_2/pipeline_kandinsky2_2.py | 320 ++ .../pipeline_kandinsky2_2_combined.py | 851 ++++ .../pipeline_kandinsky2_2_controlnet.py | 320 ++ ...ipeline_kandinsky2_2_controlnet_img2img.py | 381 ++ .../pipeline_kandinsky2_2_img2img.py | 399 ++ .../pipeline_kandinsky2_2_inpainting.py | 556 +++ .../pipeline_kandinsky2_2_prior.py | 549 +++ .../pipeline_kandinsky2_2_prior_emb2emb.py | 563 +++ .../pipelines/kandinsky3/__init__.py | 49 + .../kandinsky3/convert_kandinsky3_unet.py | 98 + .../kandinsky3/pipeline_kandinsky3.py | 589 +++ .../kandinsky3/pipeline_kandinsky3_img2img.py | 654 +++ .../latent_consistency_models/__init__.py | 50 + .../pipeline_latent_consistency_img2img.py | 956 +++++ .../pipeline_latent_consistency_text2img.py | 888 ++++ .../pipelines/latent_diffusion/__init__.py | 50 + .../pipeline_latent_diffusion.py | 746 ++++ ...peline_latent_diffusion_superresolution.py | 189 + .../diffusers/pipelines/ledits_pp/__init__.py | 55 + .../pipeline_leditspp_stable_diffusion.py | 1505 +++++++ .../pipeline_leditspp_stable_diffusion_xl.py | 1797 ++++++++ .../pipelines/ledits_pp/pipeline_output.py | 43 + .../diffusers/pipelines/musicldm/__init__.py | 49 + .../pipelines/musicldm/pipeline_musicldm.py | 635 +++ .../src/diffusers/pipelines/onnx_utils.py | 215 + .../pipelines/paint_by_example/__init__.py | 55 + .../paint_by_example/image_encoder.py | 67 + .../pipeline_paint_by_example.py | 621 +++ .../src/diffusers/pipelines/pia/__init__.py | 46 + .../diffusers/pipelines/pia/pipeline_pia.py | 1034 +++++ .../pipelines/pipeline_flax_utils.py | 616 +++ .../pipelines/pipeline_loading_utils.py | 508 +++ .../src/diffusers/pipelines/pipeline_utils.py | 1771 ++++++++ .../pipelines/pixart_alpha/__init__.py | 48 + .../pixart_alpha/pipeline_pixart_alpha.py | 979 +++++ .../semantic_stable_diffusion/__init__.py | 49 + .../pipeline_output.py | 25 + .../pipeline_semantic_stable_diffusion.py | 718 ++++ .../diffusers/pipelines/shap_e/__init__.py | 71 + .../src/diffusers/pipelines/shap_e/camera.py | 147 + .../pipelines/shap_e/pipeline_shap_e.py | 334 ++ .../shap_e/pipeline_shap_e_img2img.py | 321 ++ .../diffusers/pipelines/shap_e/renderer.py | 1050 +++++ .../pipelines/stable_cascade/__init__.py | 50 + .../stable_cascade/pipeline_stable_cascade.py | 482 +++ .../pipeline_stable_cascade_combined.py | 311 ++ .../pipeline_stable_cascade_prior.py | 638 +++ .../pipelines/stable_diffusion/README.md | 176 + .../pipelines/stable_diffusion/__init__.py | 203 + .../clip_image_project_model.py | 29 + .../stable_diffusion/convert_from_ckpt.py | 1860 ++++++++ .../pipeline_flax_stable_diffusion.py | 473 +++ .../pipeline_flax_stable_diffusion_img2img.py | 532 +++ .../pipeline_flax_stable_diffusion_inpaint.py | 589 +++ .../pipeline_onnx_stable_diffusion.py | 487 +++ .../pipeline_onnx_stable_diffusion_img2img.py | 549 +++ .../pipeline_onnx_stable_diffusion_inpaint.py | 563 +++ .../pipeline_onnx_stable_diffusion_upscale.py | 586 +++ .../stable_diffusion/pipeline_output.py | 45 + .../pipeline_stable_diffusion.py | 1032 +++++ .../pipeline_stable_diffusion_depth2img.py | 860 ++++ ...peline_stable_diffusion_image_variation.py | 420 ++ .../pipeline_stable_diffusion_img2img.py | 1113 +++++ .../pipeline_stable_diffusion_inpaint.py | 1430 +++++++ ...eline_stable_diffusion_instruct_pix2pix.py | 807 ++++ ...ipeline_stable_diffusion_latent_upscale.py | 495 +++ .../pipeline_stable_diffusion_upscale.py | 808 ++++ .../pipeline_stable_unclip.py | 932 ++++ .../pipeline_stable_unclip_img2img.py | 839 ++++ .../stable_diffusion/safety_checker.py | 125 + .../stable_diffusion/safety_checker_flax.py | 112 + .../stable_unclip_image_normalizer.py | 57 + .../__init__.py | 48 + ...line_stable_diffusion_attend_and_excite.py | 1088 +++++ .../stable_diffusion_diffedit/__init__.py | 48 + .../pipeline_stable_diffusion_diffedit.py | 1530 +++++++ .../stable_diffusion_gligen/__init__.py | 50 + .../pipeline_stable_diffusion_gligen.py | 845 ++++ ...line_stable_diffusion_gligen_text_image.py | 1017 +++++ .../stable_diffusion_k_diffusion/__init__.py | 62 + .../pipeline_stable_diffusion_k_diffusion.py | 664 +++ ...ipeline_stable_diffusion_xl_k_diffusion.py | 891 ++++ .../stable_diffusion_ldm3d/__init__.py | 48 + .../pipeline_stable_diffusion_ldm3d.py | 985 +++++ .../stable_diffusion_panorama/__init__.py | 48 + .../pipeline_stable_diffusion_panorama.py | 933 +++++ .../stable_diffusion_safe/__init__.py | 99 + .../stable_diffusion_safe/pipeline_output.py | 34 + .../pipeline_stable_diffusion_safe.py | 764 ++++ .../stable_diffusion_safe/safety_checker.py | 109 + .../stable_diffusion_sag/__init__.py | 48 + .../pipeline_stable_diffusion_sag.py | 886 ++++ .../pipelines/stable_diffusion_xl/__init__.py | 76 + .../pipeline_flax_stable_diffusion_xl.py | 308 ++ .../stable_diffusion_xl/pipeline_output.py | 37 + .../pipeline_stable_diffusion_xl.py | 1266 ++++++ .../pipeline_stable_diffusion_xl_img2img.py | 1442 +++++++ .../pipeline_stable_diffusion_xl_inpaint.py | 1812 ++++++++ ...ne_stable_diffusion_xl_instruct_pix2pix.py | 976 +++++ .../stable_diffusion_xl/watermark.py | 36 + .../stable_video_diffusion/__init__.py | 58 + .../pipeline_stable_video_diffusion.py | 673 +++ .../pipelines/t2i_adapter/__init__.py | 47 + .../pipeline_stable_diffusion_adapter.py | 912 ++++ .../pipeline_stable_diffusion_xl_adapter.py | 1258 ++++++ .../text_to_video_synthesis/__init__.py | 54 + .../pipeline_output.py | 25 + .../pipeline_text_to_video_synth.py | 663 +++ .../pipeline_text_to_video_synth_img2img.py | 760 ++++ .../pipeline_text_to_video_zero.py | 969 +++++ .../pipeline_text_to_video_zero_sdxl.py | 1315 ++++++ .../diffusers/pipelines/unclip/__init__.py | 52 + .../pipelines/unclip/pipeline_unclip.py | 493 +++ .../unclip/pipeline_unclip_image_variation.py | 420 ++ .../diffusers/pipelines/unclip/text_proj.py | 86 + .../pipelines/unidiffuser/__init__.py | 58 + .../unidiffuser/modeling_text_decoder.py | 296 ++ .../pipelines/unidiffuser/modeling_uvit.py | 1197 ++++++ .../unidiffuser/pipeline_unidiffuser.py | 1419 +++++++ .../pipelines/wuerstchen/__init__.py | 56 + .../wuerstchen/modeling_paella_vq_model.py | 172 + .../wuerstchen/modeling_wuerstchen_common.py | 81 + .../modeling_wuerstchen_diffnext.py | 254 ++ .../wuerstchen/modeling_wuerstchen_prior.py | 200 + .../wuerstchen/pipeline_wuerstchen.py | 438 ++ .../pipeline_wuerstchen_combined.py | 306 ++ .../wuerstchen/pipeline_wuerstchen_prior.py | 516 +++ .../pytorch/src/diffusers/py.typed | 0 .../src/diffusers/schedulers/README.md | 3 + .../src/diffusers/schedulers/__init__.py | 211 + .../schedulers/deprecated/__init__.py | 50 + .../deprecated/scheduling_karras_ve.py | 243 ++ .../deprecated/scheduling_sde_vp.py | 109 + .../diffusers/schedulers/scheduling_amused.py | 162 + .../scheduling_consistency_decoder.py | 180 + .../scheduling_consistency_models.py | 448 ++ .../diffusers/schedulers/scheduling_ddim.py | 520 +++ .../schedulers/scheduling_ddim_flax.py | 313 ++ .../schedulers/scheduling_ddim_inverse.py | 374 ++ .../schedulers/scheduling_ddim_parallel.py | 645 +++ .../diffusers/schedulers/scheduling_ddpm.py | 562 +++ .../schedulers/scheduling_ddpm_flax.py | 299 ++ .../schedulers/scheduling_ddpm_parallel.py | 653 +++ .../schedulers/scheduling_ddpm_wuerstchen.py | 230 + .../schedulers/scheduling_deis_multistep.py | 786 ++++ .../scheduling_dpmsolver_multistep.py | 1029 +++++ .../scheduling_dpmsolver_multistep_flax.py | 643 +++ .../scheduling_dpmsolver_multistep_inverse.py | 921 ++++ .../schedulers/scheduling_dpmsolver_sde.py | 557 +++ .../scheduling_dpmsolver_singlestep.py | 979 +++++ .../scheduling_edm_dpmsolver_multistep.py | 683 +++ .../schedulers/scheduling_edm_euler.py | 381 ++ .../scheduling_euler_ancestral_discrete.py | 481 +++ .../schedulers/scheduling_euler_discrete.py | 576 +++ .../scheduling_euler_discrete_flax.py | 265 ++ .../schedulers/scheduling_heun_discrete.py | 482 +++ .../diffusers/schedulers/scheduling_ipndm.py | 224 + .../scheduling_k_dpm_2_ancestral_discrete.py | 508 +++ .../schedulers/scheduling_k_dpm_2_discrete.py | 483 +++ .../schedulers/scheduling_karras_ve_flax.py | 238 ++ .../diffusers/schedulers/scheduling_lcm.py | 660 +++ .../schedulers/scheduling_lms_discrete.py | 475 +++ .../scheduling_lms_discrete_flax.py | 283 ++ .../diffusers/schedulers/scheduling_pndm.py | 476 +++ .../schedulers/scheduling_pndm_flax.py | 509 +++ .../schedulers/scheduling_repaint.py | 361 ++ .../schedulers/scheduling_sasolver.py | 1124 +++++ .../diffusers/schedulers/scheduling_sde_ve.py | 301 ++ .../schedulers/scheduling_sde_ve_flax.py | 280 ++ .../diffusers/schedulers/scheduling_tcd.py | 686 +++ .../diffusers/schedulers/scheduling_unclip.py | 352 ++ .../schedulers/scheduling_unipc_multistep.py | 880 ++++ .../diffusers/schedulers/scheduling_utils.py | 186 + .../schedulers/scheduling_utils_flax.py | 293 ++ .../schedulers/scheduling_vq_diffusion.py | 467 +++ .../pytorch/src/diffusers/training_utils.py | 453 ++ .../pytorch/src/diffusers/utils/__init__.py | 124 + .../src/diffusers/utils/accelerate_utils.py | 48 + .../pytorch/src/diffusers/utils/constants.py | 55 + .../src/diffusers/utils/deprecation_utils.py | 49 + .../pytorch/src/diffusers/utils/doc_utils.py | 38 + .../dummy_flax_and_transformers_objects.py | 77 + .../src/diffusers/utils/dummy_flax_objects.py | 212 + .../diffusers/utils/dummy_note_seq_objects.py | 17 + .../src/diffusers/utils/dummy_onnx_objects.py | 17 + .../src/diffusers/utils/dummy_pt_objects.py | 1170 ++++++ .../utils/dummy_torch_and_librosa_objects.py | 32 + .../utils/dummy_torch_and_scipy_objects.py | 17 + .../utils/dummy_torch_and_torchsde_objects.py | 17 + ...nd_transformers_and_k_diffusion_objects.py | 32 + ...torch_and_transformers_and_onnx_objects.py | 92 + .../dummy_torch_and_transformers_objects.py | 1607 +++++++ ...sformers_and_torch_and_note_seq_objects.py | 17 + .../diffusers/utils/dynamic_modules_utils.py | 452 ++ .../src/diffusers/utils/export_utils.py | 140 + .../pytorch/src/diffusers/utils/hub_utils.py | 493 +++ .../src/diffusers/utils/import_utils.py | 726 ++++ .../src/diffusers/utils/loading_utils.py | 49 + .../pytorch/src/diffusers/utils/logging.py | 339 ++ .../diffusers/utils/model_card_template.md | 24 + .../pytorch/src/diffusers/utils/outputs.py | 137 + .../pytorch/src/diffusers/utils/peft_utils.py | 268 ++ .../pytorch/src/diffusers/utils/pil_utils.py | 67 + .../src/diffusers/utils/state_dict_utils.py | 324 ++ .../src/diffusers/utils/testing_utils.py | 967 +++++ .../src/diffusers/utils/torch_utils.py | 147 + .../pytorch/src/diffusers/utils/versions.py | 117 + .../stable-diffusion/init_torch.sh | 26 + .../train_sd2.1_pokemon_dist_1x8_torch.sh | 63 + 437 files changed, 194613 insertions(+) create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/.gitignore create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/LICENSE create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/MANIFEST.in create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/build_diffusers.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/clean_diffusers.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README_sdxl.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/default_config.yaml create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements.txt create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_flax.txt create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_sdxl.txt create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/single_config.yaml create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image_lora.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora_sdxl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_sdxl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/zero2_config.yaml create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/install_diffusers.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5_single.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1_single.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_xl.sh create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/setup.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/diffusers_cli.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/env.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/fp16_safetensors.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/configuration_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_check.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_table.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/value_guided_sampling.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/image_processor.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/autoencoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/controlnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/ip_adapter.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora_conversion_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/peft.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/textual_inversion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/unet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/activations.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/adapter.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_processor.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_asym_kl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_tiny.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/consistency_decoder_vae.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/vae.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/downsampling.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/dual_transformer_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/lora.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_pytorch_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_outputs.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_pytorch_flax_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/Welford.h create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.cpp create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.cu create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.h create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/nchw_kernel.cu create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/vecs.h create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/normalization.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/prior_transformer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/t5_film_transformer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_temporal.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/dual_transformer_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/prior_transformer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/t5_film_transformer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_temporal.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d_blocks.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_blocks.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_condition.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d_blocks.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_blocks.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_condition.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_i2vgen_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_kandinsky3.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_motion_model.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_spatio_temporal_condition.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_stable_cascade.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/uvit_2d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/upsampling.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vae_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vq_model.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/optimization.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/pipeline_audioldm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/auto_pipeline.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/multicontrolnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/pipeline_ddim.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/pipeline_ddpm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/safety_checker.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/timesteps.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/watermark.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/pipeline_dit.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/free_init_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/text_encoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/pipeline_musicldm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/onnx_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/image_encoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/pipeline_pia.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_flax_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_loading_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/camera.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/renderer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/watermark.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/text_proj.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_uvit.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/py.typed create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/README.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_amused.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_decoder.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_models.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_inverse.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_parallel.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_parallel.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_deis_multistep.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_sde.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_euler.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_heun_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ipndm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_karras_ve_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lcm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_repaint.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sasolver.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_tcd.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unclip.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unipc_multistep.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils_flax.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_vq_diffusion.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/training_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/__init__.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/accelerate_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/constants.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/deprecation_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/doc_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_and_transformers_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_note_seq_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_onnx_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_pt_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_librosa_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_scipy_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_torchsde_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dynamic_modules_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/export_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/hub_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/import_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/loading_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/logging.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/model_card_template.md create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/outputs.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/peft_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/pil_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/state_dict_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/testing_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/torch_utils.py create mode 100644 multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/versions.py create mode 100644 tests/executables/stable-diffusion/init_torch.sh create mode 100644 tests/executables/stable-diffusion/train_sd2.1_pokemon_dist_1x8_torch.sh diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/.gitignore b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/.gitignore new file mode 100644 index 000000000..9d74fe840 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/.gitignore @@ -0,0 +1,178 @@ +# Initially taken from GitHub's Python gitignore file + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# tests and logs +tests/fixtures/cached_*_text.txt +logs/ +lightning_logs/ +lang_code_data/ + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a Python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vs +.vscode + +# Pycharm +.idea + +# TF code +tensorflow_code + +# Models +proc_data + +# examples +runs +/runs_old +/wandb +/examples/runs +/examples/**/*.args +/examples/rag/sweep + +# data +/data +serialization_dir + +# emacs +*.*~ +debug.env + +# vim +.*.swp + +# ctags +tags + +# pre-commit +.pre-commit* + +# .lock +*.lock + +# DS_Store (MacOS) +.DS_Store + +# RL pipelines may produce mp4 outputs +*.mp4 + +# dependencies +/transformers + +# ruff +.ruff_cache + +# wandb +wandb diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/LICENSE b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/MANIFEST.in b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/MANIFEST.in new file mode 100644 index 000000000..bc6260161 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/MANIFEST.in @@ -0,0 +1,3 @@ +include LICENSE +include src/diffusers/models/nhwc_groupnorm/* +include src/diffusers/utils/model_card_template.md diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/README.md new file mode 100644 index 000000000..3bfe91c9f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/README.md @@ -0,0 +1,45 @@ +## 安装环境 +### 安装 transformers +```bash +git clone ssh://git@bitbucket.iluvatar.ai:7999/apptp/transformers.git +cd transformers +python3 setup.py install +``` +### 安装diffusers +```bash +cd diffusers +pip3 install pip3 install -r examples/text_to_image/requirements.txt +bash build_diffusers.sh && bash install_diffusers.sh +``` +_默认已经安装的包有:torchvision,ixformer,flash-attn,deepspeed,apex_ +_上述包最好使用较新的daily,不然可能会有功能不支持_ + +## 下载数据 +```bash +mkdir -p pokemon-blip-captions +download here: http://10.150.9.95/swapp/datasets/multimodal/stable_diffusion/pokemon-blip-captions +wget http://10.150.9.95/swapp/datasets/multimodal/stable_diffusion/stabilityai.tar # sd2.1 权重 +tar -xvf stabilityai.tar +``` + +*sdxl权重链接:http://sw.iluvatar.ai/download/apps/datasets/aigc/xl/stable-diffusion-xl-base-1.0.tar.gz* + +*sd1.5权重链接:http://10.150.9.95/swapp/pretrained/multimodal/stable-diffusion/stable-diffusion-v1-5.zip* + + +## 训练 +*以下脚本中包含的数据和预训练权重位置需要根据实际存放位置调整* +### sd2.1 训练 +```bash +$ bash run_sd_2.1.sh # 多卡 +$ bash run_sd_2.1_single.sh # 单卡 +``` +### sd1.5 训练 +```bash +$ bash run_sd_1.5.sh # 多卡 +$ bash run_sd_1.5_single.sh # 单卡 +``` +### sdxl 训练 +```bash +$ bash run_sd_xl.sh # 多卡 +``` diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/build_diffusers.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/build_diffusers.sh new file mode 100644 index 000000000..74e8d129e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/build_diffusers.sh @@ -0,0 +1,22 @@ +SCRIPTPATH=$(dirname $(realpath "$0")) +cd $(dirname $(realpath "$0")) +COREX_VERSION=${COREX_VERSION:-latest} +MAX_JOBS=${MAX_JOBS:-$(nproc --all)} +PYTHON_PATH=$(which python3) + +export MAX_JOBS=${MAX_JOBS} + +echo "Python cmd1: ${PYTHON_PATH} setup.py build" +${PYTHON_PATH} setup.py build 2>&1 | tee compile.log; [[ ${PIPESTATUS[0]} == 0 ]] || exit + +if [[ "${COREX_VERSION}" == "latest" ]]; then + COREX_VERSION=`date --utc +%Y%m%d%H%M%S` +fi + +export LOCAL_VERSION_IDENTIFIER="corex.${COREX_VERSION}" + +echo "Python cmd2: ${PYTHON_PATH} setup.py bdist_wheel -d build_pip" +${PYTHON_PATH} setup.py bdist_wheel -d build_pip || exit + +# Return 0 status if all finished +exit 0 \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/clean_diffusers.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/clean_diffusers.sh new file mode 100644 index 000000000..a04335f67 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/clean_diffusers.sh @@ -0,0 +1,12 @@ +#!/bin/bash +PROJPATH=$(dirname $(realpath "$0")) +cd $(dirname $(realpath "$0")) +PYTHON_PATH=$(which python3) + +rm -rf build +${PYTHON_PATH} setup.py clean || true +rm -rf build_pip +rm -rf ${PROJPATH}/dist + +# Return 0 status if all finished +exit 0 \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README.md new file mode 100644 index 000000000..f2931d3f3 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README.md @@ -0,0 +1,326 @@ +# Stable Diffusion text-to-image fine-tuning + +The `train_text_to_image.py` script shows how to fine-tune stable diffusion model on your own dataset. + +___Note___: + +___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ + + +## Running locally with PyTorch +### Installing the dependencies + +Before running the scripts, make sure to install the library's training dependencies: + +**Important** + +To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install . +``` + +Then cd in the example folder and run +```bash +pip install -r requirements.txt +``` + +And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: + +```bash +accelerate config +``` + +Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. + +### Pokemon example + +You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-4`, so you'll need to visit [its card](https://huggingface.co/CompVis/stable-diffusion-v1-4), read the license and tick the checkbox if you agree. + +You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). + +Run the following command to authenticate your token + +```bash +huggingface-cli login +``` + +If you have already cloned the repo, then you won't need to go through these steps. + +
+ +#### Hardware +With `gradient_checkpointing` and `mixed_precision` it should be possible to fine tune the model on a single 24GB GPU. For higher `batch_size` and faster training it's better to use GPUs with >30GB memory. + +**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** + +```bash +export MODEL_NAME="CompVis/stable-diffusion-v1-4" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" + +accelerate launch --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --use_ema \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model" +``` + + + +To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). +If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. + +```bash +export MODEL_NAME="CompVis/stable-diffusion-v1-4" +export TRAIN_DIR="path_to_your_dataset" + +accelerate launch --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$TRAIN_DIR \ + --use_ema \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model" +``` + + +Once the training is finished the model will be saved in the `output_dir` specified in the command. In this example it's `sd-pokemon-model`. To load the fine-tuned model for inference just pass that path to `StableDiffusionPipeline` + +```python +import torch +from diffusers import StableDiffusionPipeline + +model_path = "path_to_saved_model" +pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) +pipe.to("cuda") + +image = pipe(prompt="yoda").images[0] +image.save("yoda-pokemon.png") +``` + +Checkpoints only save the unet, so to run inference from a checkpoint, just load the unet + +```python +import torch +from diffusers import StableDiffusionPipeline, UNet2DConditionModel + +model_path = "path_to_saved_model" +unet = UNet2DConditionModel.from_pretrained(model_path + "/checkpoint-/unet", torch_dtype=torch.float16) + +pipe = StableDiffusionPipeline.from_pretrained("", unet=unet, torch_dtype=torch.float16) +pipe.to("cuda") + +image = pipe(prompt="yoda").images[0] +image.save("yoda-pokemon.png") +``` + +#### Training with multiple GPUs + +`accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) +for running distributed training with `accelerate`. Here is an example command: + +```bash +export MODEL_NAME="CompVis/stable-diffusion-v1-4" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" + +accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --use_ema \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model" +``` + + +#### Training with Min-SNR weighting + +We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence +by rebalancing the loss. In order to use it, one needs to set the `--snr_gamma` argument. The recommended +value when using it is 5.0. + +You can find [this project on Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) that compares the loss surfaces of the following setups: + +* Training without the Min-SNR weighting strategy +* Training with the Min-SNR weighting strategy (`snr_gamma` set to 5.0) +* Training with the Min-SNR weighting strategy (`snr_gamma` set to 1.0) + +For our small Pokemons dataset, the effects of Min-SNR weighting strategy might not appear to be pronounced, but for larger datasets, we believe the effects will be more pronounced. + +Also, note that in this example, we either predict `epsilon` (i.e., the noise) or the `v_prediction`. For both of these cases, the formulation of the Min-SNR weighting strategy that we have used holds. + +## Training with LoRA + +Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. + +In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: + +- Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). +- Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. +- LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. + +[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. + +With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset +on consumer GPUs like Tesla T4, Tesla V100. + +### Training + +First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). + +**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** + +**___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** + +```bash +export MODEL_NAME="CompVis/stable-diffusion-v1-4" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" +``` + +For this example we want to directly store the trained LoRA embeddings on the Hub, so +we need to be logged in and add the `--push_to_hub` flag. + +```bash +huggingface-cli login +``` + +Now we can start training! + +```bash +accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME --caption_column="text" \ + --resolution=512 --random_flip \ + --train_batch_size=1 \ + --num_train_epochs=100 --checkpointing_steps=5000 \ + --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ + --seed=42 \ + --output_dir="sd-pokemon-model-lora" \ + --validation_prompt="cute dragon creature" --report_to="wandb" +``` + +The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. + +**___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___** + +The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___** + +You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw). + +### Inference + +Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You +need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-pokemon-model-lora`. + +```python +from diffusers import StableDiffusionPipeline +import torch + +model_path = "sayakpaul/sd-model-finetuned-lora-t4" +pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) +pipe.unet.load_attn_procs(model_path) +pipe.to("cuda") + +prompt = "A pokemon with green eyes and red legs." +image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] +image.save("pokemon.png") +``` + +If you are loading the LoRA parameters from the Hub and if the Hub repository has +a `base_model` tag (such as [this](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/README.md?code=true#L4)), then +you can do: + +```py +from huggingface_hub.repocard import RepoCard + +lora_model_id = "sayakpaul/sd-model-finetuned-lora-t4" +card = RepoCard.load(lora_model_id) +base_model_id = card.data.to_dict()["base_model"] + +pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) +... +``` + +## Training with Flax/JAX + +For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. + +**___Note: The flax example doesn't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards or TPU v3.___** + + +Before running the scripts, make sure to install the library's training dependencies: + +```bash +pip install -U -r requirements_flax.txt +``` + +```bash +export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" + +python train_text_to_image_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --mixed_precision="fp16" \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --output_dir="sd-pokemon-model" +``` + +To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). +If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. + +```bash +export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" +export TRAIN_DIR="path_to_your_dataset" + +python train_text_to_image_flax.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$TRAIN_DIR \ + --resolution=512 --center_crop --random_flip \ + --train_batch_size=1 \ + --mixed_precision="fp16" \ + --max_train_steps=15000 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --output_dir="sd-pokemon-model" +``` + +### Training with xFormers: + +You can enable memory efficient attention by [installing xFormers](https://huggingface.co/docs/diffusers/main/en/optimization/xformers) and passing the `--enable_xformers_memory_efficient_attention` argument to the script. + +xFormers training is not available for Flax/JAX. + +**Note**: + +According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training in some GPUs. If you observe that problem, please install a development version as indicated in that comment. + +## Stable Diffusion XL + +* We support fine-tuning the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) via the `train_text_to_image_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). +* We also support fine-tuning of the UNet and Text Encoder shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with LoRA via the `train_text_to_image_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README_sdxl.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README_sdxl.md new file mode 100644 index 000000000..349feef50 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/README_sdxl.md @@ -0,0 +1,286 @@ +# Stable Diffusion XL text-to-image fine-tuning + +The `train_text_to_image_sdxl.py` script shows how to fine-tune Stable Diffusion XL (SDXL) on your own dataset. + +🚨 This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset. 🚨 + +## Running locally with PyTorch + +### Installing the dependencies + +Before running the scripts, make sure to install the library's training dependencies: + +**Important** + +To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install -e . +``` + +Then cd in the `examples/text_to_image` folder and run +```bash +pip install -r requirements_sdxl.txt +``` + +And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: + +```bash +accelerate config +``` + +Or for a default accelerate configuration without answering questions about your environment + +```bash +accelerate config default +``` + +Or if your environment doesn't support an interactive shell (e.g., a notebook) + +```python +from accelerate.utils import write_basic_config +write_basic_config() +``` + +When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. +Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. + +### Training + +```bash +export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" +export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" + +accelerate launch train_text_to_image_sdxl.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --pretrained_vae_model_name_or_path=$VAE_NAME \ + --dataset_name=$DATASET_NAME \ + --enable_xformers_memory_efficient_attention \ + --resolution=512 --center_crop --random_flip \ + --proportion_empty_prompts=0.2 \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 --gradient_checkpointing \ + --max_train_steps=10000 \ + --use_8bit_adam \ + --learning_rate=1e-06 --lr_scheduler="constant" --lr_warmup_steps=0 \ + --mixed_precision="fp16" \ + --report_to="wandb" \ + --validation_prompt="a cute Sundar Pichai creature" --validation_epochs 5 \ + --checkpointing_steps=5000 \ + --output_dir="sdxl-pokemon-model" \ + --push_to_hub +``` + +**Notes**: + +* The `train_text_to_image_sdxl.py` script pre-computes text embeddings and the VAE encodings and keeps them in memory. While for smaller datasets like [`lambdalabs/pokemon-blip-captions`](https://hf.co/datasets/lambdalabs/pokemon-blip-captions), it might not be a problem, it can definitely lead to memory problems when the script is used on a larger dataset. For those purposes, you would want to serialize these pre-computed representations to disk separately and load them during the fine-tuning process. Refer to [this PR](https://github.com/huggingface/diffusers/pull/4505) for a more in-depth discussion. +* The training script is compute-intensive and may not run on a consumer GPU like Tesla T4. +* The training command shown above performs intermediate quality validation in between the training epochs and logs the results to Weights and Biases. `--report_to`, `--validation_prompt`, and `--validation_epochs` are the relevant CLI arguments here. +* SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). + +### Inference + +```python +from diffusers import DiffusionPipeline +import torch + +model_path = "you-model-id-goes-here" # <-- change this +pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) +pipe.to("cuda") + +prompt = "A pokemon with green eyes and red legs." +image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] +image.save("pokemon.png") +``` + +### Inference in Pytorch XLA +```python +from diffusers import DiffusionPipeline +import torch +import torch_xla.core.xla_model as xm + +model_id = "stabilityai/stable-diffusion-xl-base-1.0" +pipe = DiffusionPipeline.from_pretrained(model_id) + +device = xm.xla_device() +pipe.to(device) + +prompt = "A pokemon with green eyes and red legs." +start = time() +image = pipe(prompt, num_inference_steps=inference_steps).images[0] +print(f'Compilation time is {time()-start} sec') +image.save("pokemon.png") + +start = time() +image = pipe(prompt, num_inference_steps=inference_steps).images[0] +print(f'Inference time is {time()-start} sec after compilation') +``` + +Note: There is a warmup step in PyTorch XLA. This takes longer because of +compilation and optimization. To see the real benefits of Pytorch XLA and +speedup, we need to call the pipe again on the input with the same length +as the original prompt to reuse the optimized graph and get the performance +boost. + +## LoRA training example for Stable Diffusion XL (SDXL) + +Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. + +In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: + +- Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). +- Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. +- LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. + +[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. + +With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset +on consumer GPUs like Tesla T4, Tesla V100. + +### Training + +First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables and, optionally, the `VAE_NAME` variable. Here, we will use [Stable Diffusion XL 1.0-base](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). + +**___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** + +```bash +export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" +export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" +``` + +For this example we want to directly store the trained LoRA embeddings on the Hub, so +we need to be logged in and add the `--push_to_hub` flag. + +```bash +huggingface-cli login +``` + +Now we can start training! + +```bash +accelerate launch train_text_to_image_lora_sdxl.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --pretrained_vae_model_name_or_path=$VAE_NAME \ + --dataset_name=$DATASET_NAME --caption_column="text" \ + --resolution=1024 --random_flip \ + --train_batch_size=1 \ + --num_train_epochs=2 --checkpointing_steps=500 \ + --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ + --mixed_precision="fp16" \ + --seed=42 \ + --output_dir="sd-pokemon-model-lora-sdxl" \ + --validation_prompt="cute dragon creature" --report_to="wandb" \ + --push_to_hub +``` + +The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. + +**Notes**: + +* SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). + + +### Using DeepSpeed +Using DeepSpeed one can reduce the consumption of GPU memory, enabling the training of models on GPUs with smaller memory sizes. DeepSpeed is capable of offloading model parameters to the machine's memory, or it can distribute parameters, gradients, and optimizer states across multiple GPUs. This allows for the training of larger models under the same hardware configuration. + +First, you need to use the `accelerate config` command to choose to use DeepSpeed, or manually use the accelerate config file to set up DeepSpeed. + +Here is an example of a config file for using DeepSpeed. For more detailed explanations of the configuration, you can refer to this [link](https://huggingface.co/docs/accelerate/usage_guides/deepspeed). +```yaml +compute_environment: LOCAL_MACHINE +debug: true +deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 1.0 + offload_optimizer_device: none + offload_param_device: none + zero3_init_flag: false + zero_stage: 2 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 1 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false +``` +You need to save the mentioned configuration as an `accelerate_config.yaml` file. Then, you need to input the path of your `accelerate_config.yaml` file into the `ACCELERATE_CONFIG_FILE` parameter. This way you can use DeepSpeed to train your SDXL model in LoRA. Additionally, you can use DeepSpeed to train other SD models in this way. + +```shell +export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" +export VAE_NAME="madebyollin/sdxl-vae-fp16-fix" +export DATASET_NAME="lambdalabs/pokemon-blip-captions" +export ACCELERATE_CONFIG_FILE="your accelerate_config.yaml" + +accelerate launch --config_file $ACCELERATE_CONFIG_FILE train_text_to_image_lora_sdxl.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --pretrained_vae_model_name_or_path=$VAE_NAME \ + --dataset_name=$DATASET_NAME --caption_column="text" \ + --resolution=1024 \ + --train_batch_size=1 \ + --num_train_epochs=2 \ + --checkpointing_steps=2 \ + --learning_rate=1e-04 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --mixed_precision="fp16" \ + --max_train_steps=20 \ + --validation_epochs=20 \ + --seed=1234 \ + --output_dir="sd-pokemon-model-lora-sdxl" \ + --validation_prompt="cute dragon creature" + +``` + + +### Finetuning the text encoder and UNet + +The script also allows you to finetune the `text_encoder` along with the `unet`. + +🚨 Training the text encoder requires additional memory. + +Pass the `--train_text_encoder` argument to the training script to enable finetuning the `text_encoder` and `unet`: + +```bash +accelerate launch train_text_to_image_lora_sdxl.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME --caption_column="text" \ + --resolution=1024 --random_flip \ + --train_batch_size=1 \ + --num_train_epochs=2 --checkpointing_steps=500 \ + --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ + --seed=42 \ + --output_dir="sd-pokemon-model-lora-sdxl-txt" \ + --train_text_encoder \ + --validation_prompt="cute dragon creature" --report_to="wandb" \ + --push_to_hub +``` + +### Inference + +Once you have trained a model using above command, the inference can be done simply using the `DiffusionPipeline` after loading the trained LoRA weights. You +need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-pokemon-model-lora-sdxl`. + +```python +from diffusers import DiffusionPipeline +import torch + +model_path = "takuoko/sd-pokemon-model-lora-sdxl" +pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) +pipe.to("cuda") +pipe.load_lora_weights(model_path) + +prompt = "A pokemon with green eyes and red legs." +image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] +image.save("pokemon.png") +``` diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/default_config.yaml b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/default_config.yaml new file mode 100644 index 000000000..829e0b662 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/default_config.yaml @@ -0,0 +1,20 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_accumulation_steps: 1 + steps_per_print: 1 + zero3_init_flag: true + zero_stage: 0 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 16 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements.txt b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements.txt new file mode 100644 index 000000000..4e079f18c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements.txt @@ -0,0 +1,6 @@ +accelerate>=0.16.0 +datasets +ftfy +tensorboard +Jinja2 +peft==0.7.0 diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_flax.txt b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_flax.txt new file mode 100644 index 000000000..b6eb64e25 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_flax.txt @@ -0,0 +1,9 @@ +transformers>=4.25.1 +datasets +flax +optax +torch +torchvision +ftfy +tensorboard +Jinja2 diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_sdxl.txt b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_sdxl.txt new file mode 100644 index 000000000..64cbc9205 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/requirements_sdxl.txt @@ -0,0 +1,8 @@ +accelerate>=0.22.0 +torchvision +transformers>=4.25.1 +ftfy +tensorboard +Jinja2 +datasets +peft==0.7.0 \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/single_config.yaml b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/single_config.yaml new file mode 100644 index 000000000..a6d6d2e40 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/single_config.yaml @@ -0,0 +1,20 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_accumulation_steps: 1 + steps_per_print: 1 + zero3_init_flag: true + zero_stage: 0 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 1 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image.py new file mode 100644 index 000000000..6231a89b1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image.py @@ -0,0 +1,365 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import shutil +import sys +import tempfile + +from diffusers import DiffusionPipeline, UNet2DConditionModel # noqa: E402 + + +sys.path.append("..") +from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 + + +logging.basicConfig(level=logging.DEBUG) + +logger = logging.getLogger() +stream_handler = logging.StreamHandler(sys.stdout) +logger.addHandler(stream_handler) + + +class TextToImage(ExamplesTestsAccelerate): + def test_text_to_image(self): + with tempfile.TemporaryDirectory() as tmpdir: + test_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + """.split() + + run_command(self._launch_args + test_args) + # save_pretrained smoke test + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) + + def test_text_to_image_checkpointing(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 4, checkpointing_steps == 2 + # Should create checkpoints at steps 2, 4 + + initial_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 4 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --seed=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-2", "checkpoint-4"}, + ) + + # check can run an intermediate checkpoint + unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") + pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming + shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) + + # Run training script for 2 total steps resuming from checkpoint 4 + + resume_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=1 + --resume_from_checkpoint=checkpoint-4 + --seed=0 + """.split() + + run_command(self._launch_args + resume_run_args) + + # check can run new fully trained pipeline + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # no checkpoint-2 -> check old checkpoints do not exist + # check new checkpoints exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-4", "checkpoint-5"}, + ) + + def test_text_to_image_checkpointing_use_ema(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 4, checkpointing_steps == 2 + # Should create checkpoints at steps 2, 4 + + initial_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 4 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --use_ema + --seed=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=2) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-2", "checkpoint-4"}, + ) + + # check can run an intermediate checkpoint + unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") + pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming + shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) + + # Run training script for 2 total steps resuming from checkpoint 4 + + resume_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=1 + --resume_from_checkpoint=checkpoint-4 + --use_ema + --seed=0 + """.split() + + run_command(self._launch_args + resume_run_args) + + # check can run new fully trained pipeline + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # no checkpoint-2 -> check old checkpoints do not exist + # check new checkpoints exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-4", "checkpoint-5"}, + ) + + def test_text_to_image_checkpointing_checkpoints_total_limit(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 + # Should create checkpoints at steps 2, 4, 6 + # with checkpoint at step 2 deleted + + initial_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 6 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --checkpoints_total_limit=2 + --seed=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + # checkpoint-2 should have been deleted + self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) + + def test_text_to_image_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 4, checkpointing_steps == 2 + # Should create checkpoints at steps 2, 4 + + initial_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 4 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --seed=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-2", "checkpoint-4"}, + ) + + # resume and we should try to checkpoint at 6, where we'll have to remove + # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint + + resume_run_args = f""" + examples/text_to_image/train_text_to_image.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 8 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --resume_from_checkpoint=checkpoint-4 + --checkpoints_total_limit=2 + --seed=0 + """.split() + + run_command(self._launch_args + resume_run_args) + + pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-6", "checkpoint-8"}, + ) + + +class TextToImageSDXL(ExamplesTestsAccelerate): + def test_text_to_image_sdxl(self): + with tempfile.TemporaryDirectory() as tmpdir: + test_args = f""" + examples/text_to_image/train_text_to_image_sdxl.py + --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + """.split() + + run_command(self._launch_args + test_args) + # save_pretrained smoke test + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image_lora.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image_lora.py new file mode 100644 index 000000000..4604b9f52 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/test_text_to_image_lora.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import sys +import tempfile + +import safetensors + +from diffusers import DiffusionPipeline # noqa: E402 + + +sys.path.append("..") +from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 + + +logging.basicConfig(level=logging.DEBUG) + +logger = logging.getLogger() +stream_handler = logging.StreamHandler(sys.stdout) +logger.addHandler(stream_handler) + + +class TextToImageLoRA(ExamplesTestsAccelerate): + def test_text_to_image_lora_sdxl_checkpointing_checkpoints_total_limit(self): + prompt = "a prompt" + pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 + # Should create checkpoints at steps 2, 4, 6 + # with checkpoint at step 2 deleted + + initial_run_args = f""" + examples/text_to_image/train_text_to_image_lora_sdxl.py + --pretrained_model_name_or_path {pipeline_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 6 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --checkpoints_total_limit=2 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(pipeline_path) + pipe.load_lora_weights(tmpdir) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + # checkpoint-2 should have been deleted + self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) + + def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 + # Should create checkpoints at steps 2, 4, 6 + # with checkpoint at step 2 deleted + + initial_run_args = f""" + examples/text_to_image/train_text_to_image_lora.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 6 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --checkpoints_total_limit=2 + --seed=0 + --num_validation_images=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None + ) + pipe.load_lora_weights(tmpdir) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + # checkpoint-2 should have been deleted + self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) + + def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): + pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" + prompt = "a prompt" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 4, checkpointing_steps == 2 + # Should create checkpoints at steps 2, 4 + + initial_run_args = f""" + examples/text_to_image/train_text_to_image_lora.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 4 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --seed=0 + --num_validation_images=0 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None + ) + pipe.load_lora_weights(tmpdir) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-2", "checkpoint-4"}, + ) + + # resume and we should try to checkpoint at 6, where we'll have to remove + # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint + + resume_run_args = f""" + examples/text_to_image/train_text_to_image_lora.py + --pretrained_model_name_or_path {pretrained_model_name_or_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --center_crop + --random_flip + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 8 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --resume_from_checkpoint=checkpoint-4 + --checkpoints_total_limit=2 + --seed=0 + --num_validation_images=0 + """.split() + + run_command(self._launch_args + resume_run_args) + + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None + ) + pipe.load_lora_weights(tmpdir) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + self.assertEqual( + {x for x in os.listdir(tmpdir) if "checkpoint" in x}, + {"checkpoint-6", "checkpoint-8"}, + ) + + +class TextToImageLoRASDXL(ExamplesTestsAccelerate): + def test_text_to_image_lora_sdxl(self): + with tempfile.TemporaryDirectory() as tmpdir: + test_args = f""" + examples/text_to_image/train_text_to_image_lora_sdxl.py + --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + """.split() + + run_command(self._launch_args + test_args) + # save_pretrained smoke test + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) + + # make sure the state_dict has the correct naming in the parameters. + lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) + is_lora = all("lora" in k for k in lora_state_dict.keys()) + self.assertTrue(is_lora) + + def test_text_to_image_lora_sdxl_with_text_encoder(self): + with tempfile.TemporaryDirectory() as tmpdir: + test_args = f""" + examples/text_to_image/train_text_to_image_lora_sdxl.py + --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 2 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --lr_warmup_steps 0 + --output_dir {tmpdir} + --train_text_encoder + """.split() + + run_command(self._launch_args + test_args) + # save_pretrained smoke test + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) + + # make sure the state_dict has the correct naming in the parameters. + lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) + is_lora = all("lora" in k for k in lora_state_dict.keys()) + self.assertTrue(is_lora) + + # when not training the text encoder, all the parameters in the state dict should start + # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. + keys = lora_state_dict.keys() + starts_with_unet = all( + k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys + ) + self.assertTrue(starts_with_unet) + + def test_text_to_image_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): + prompt = "a prompt" + pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + + with tempfile.TemporaryDirectory() as tmpdir: + # Run training script with checkpointing + # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 + # Should create checkpoints at steps 2, 4, 6 + # with checkpoint at step 2 deleted + + initial_run_args = f""" + examples/text_to_image/train_text_to_image_lora_sdxl.py + --pretrained_model_name_or_path {pipeline_path} + --dataset_name hf-internal-testing/dummy_image_text_data + --resolution 64 + --train_batch_size 1 + --gradient_accumulation_steps 1 + --max_train_steps 6 + --learning_rate 5.0e-04 + --scale_lr + --lr_scheduler constant + --train_text_encoder + --lr_warmup_steps 0 + --output_dir {tmpdir} + --checkpointing_steps=2 + --checkpoints_total_limit=2 + """.split() + + run_command(self._launch_args + initial_run_args) + + pipe = DiffusionPipeline.from_pretrained(pipeline_path) + pipe.load_lora_weights(tmpdir) + pipe(prompt, num_inference_steps=1) + + # check checkpoint directories exist + # checkpoint-2 should have been deleted + self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image.py new file mode 100644 index 000000000..2052fb6d6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image.py @@ -0,0 +1,1137 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import math +import os +import random +import shutil +from pathlib import Path + +import accelerate +import datasets +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.state import AcceleratorState +from accelerate.utils import ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer +from transformers.utils import ContextManagers + +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel, compute_snr +from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import is_compiled_module + + +if is_wandb_available(): + import wandb + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.27.0") + +logger = get_logger(__name__, log_level="INFO") + +DATASET_NAME_MAPPING = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def save_model_card( + args, + repo_id: str, + images: list = None, + repo_folder: str = None, +): + img_str = "" + if len(images) > 0: + image_grid = make_image_grid(images, 1, len(args.validation_prompts)) + image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) + img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" + + model_description = f""" +# Text-to-image finetuning - {repo_id} + +This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n +{img_str} + +## Pipeline usage + +You can use the pipeline like so: + +```python +from diffusers import DiffusionPipeline +import torch + +pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) +prompt = "{args.validation_prompts[0]}" +image = pipeline(prompt).images[0] +image.save("my_image.png") +``` + +## Training info + +These are the key hyperparameters used during training: + +* Epochs: {args.num_train_epochs} +* Learning rate: {args.learning_rate} +* Batch size: {args.train_batch_size} +* Gradient accumulation steps: {args.gradient_accumulation_steps} +* Image resolution: {args.resolution} +* Mixed-precision: {args.mixed_precision} + +""" + wandb_info = "" + if is_wandb_available(): + wandb_run_url = None + if wandb.run is not None: + wandb_run_url = wandb.run.url + + if wandb_run_url is not None: + wandb_info = f""" +More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). +""" + + model_description += wandb_info + + model_card = load_or_create_model_card( + repo_id_or_path=repo_id, + from_training=True, + license="creativeml-openrail-m", + base_model=args.pretrained_model_name_or_path, + model_description=model_description, + inference=True, + ) + + tags = ["stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "diffusers-training"] + model_card = populate_model_card(model_card, tags=tags) + + model_card.save(os.path.join(repo_folder, "README.md")) + + +def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): + logger.info("Running validation... ") + + pipeline = StableDiffusionPipeline.from_pretrained( + args.pretrained_model_name_or_path, + vae=accelerator.unwrap_model(vae), + text_encoder=accelerator.unwrap_model(text_encoder), + tokenizer=tokenizer, + unet=accelerator.unwrap_model(unet), + safety_checker=None, + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + if args.enable_xformers_memory_efficient_attention: + pipeline.enable_xformers_memory_efficient_attention() + + if args.seed is None: + generator = None + else: + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) + + images = [] + for i in range(len(args.validation_prompts)): + with torch.autocast("cuda"): + image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] + + images.append(image) + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") + elif tracker.name == "wandb": + tracker.log( + { + "validation": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") + for i, image in enumerate(images) + ] + } + ) + else: + logger.warning(f"image logging not implemented for {tracker.name}") + + del pipeline + torch.cuda.empty_cache() + + return images + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." + ) + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--validation_prompts", + type=str, + default=None, + nargs="+", + help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-finetuned", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument( + "--non_ema_revision", + type=str, + default=None, + required=False, + help=( + "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" + " remote repository specified with --pretrained_model_name_or_path." + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=16, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--prediction_type", + type=str, + default=None, + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--validation_epochs", + type=int, + default=5, + help="Run validation every X epochs.", + ) + parser.add_argument( + "--tracker_project_name", + type=str, + default="text2image-fine-tune", + help=( + "The `project_name` argument passed to Accelerator.init_trackers for" + " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" + ), + ) + parser.add_argument( + "--NHWC", + action="store_true", + help="Whether or not using NHWC for training", + ) + parser.add_argument( + "--apex_fused_adam", + action="store_true", + help="Whether or not using fused_adam optimizer", + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + # default to using the same revision for the non-ema model if not specified + if args.non_ema_revision is None: + args.non_ema_revision = args.revision + + return args + + +def main(): + args = parse_args() + + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + if args.non_ema_revision is not None: + deprecate( + "non_ema_revision!=None", + "0.15.0", + message=( + "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" + " use `--variant=non_ema` instead." + ), + ) + logging_dir = os.path.join(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + ) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load scheduler, tokenizer and models. + noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") + tokenizer = CLIPTokenizer.from_pretrained( + args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision + ) + + def deepspeed_zero_init_disabled_context_manager(): + """ + returns either a context list that includes one that will disable zero.Init or an empty context list + """ + deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None + if deepspeed_plugin is None: + return [] + + return [deepspeed_plugin.zero3_init_context_manager(enable=False)] + + # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. + # For this to work properly all models must be run through `accelerate.prepare`. But accelerate + # will try to assign the same optimizer with the same weights to all models during + # `deepspeed.initialize`, which of course doesn't work. + # + # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 + # frozen models from being partitioned during `zero.Init` which gets called during + # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding + # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. + with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + text_encoder = CLIPTextModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant + ) + vae = AutoencoderKL.from_pretrained( + args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant + ) + + unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision + ) + # Freeze vae and text_encoder and set unet to trainable + vae.requires_grad_(False) + text_encoder.requires_grad_(False) + unet.train() + + # Create EMA for the unet. + if args.use_ema: + ema_unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant + ) + ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warning( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + if args.use_ema: + ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) + + for i, model in enumerate(models): + model.save_pretrained(os.path.join(output_dir, "unet")) + + # make sure to pop weight so that corresponding model is not saved again + weights.pop() + + def load_model_hook(models, input_dir): + if args.use_ema: + load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) + ema_unet.load_state_dict(load_model.state_dict()) + ema_unet.to(accelerator.device) + del load_model + + for _ in range(len(models)): + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + elif args.apex_fused_adam: + import apex + optimizer_cls = apex.optimizers.FusedAdam + + else: + optimizer_cls = torch.optim.AdamW + + optimizer = optimizer_cls( + unet.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + data_dir=args.train_data_dir, + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer( + captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" + ) + return inputs.input_ids + + # Preprocessing the datasets. + train_transforms = transforms.Compose( + [ + transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), + transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["pixel_values"] = [train_transforms(image) for image in images] + examples["input_ids"] = tokenize_captions(examples) + return examples + + with accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train) + + # for testing ips + from datasets import concatenate_datasets + train_dataset = concatenate_datasets([train_dataset for i in range(10)]) + + + def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + input_ids = torch.stack([example["input_ids"] for example in examples]) + return {"pixel_values": pixel_values, "input_ids": input_ids} + + # DataLoaders creation: + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + pin_memory=True, + prefetch_factor = 2 + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, + num_training_steps=args.max_train_steps * accelerator.num_processes, + ) + + # Prepare everything with our `accelerator`. + unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, optimizer, train_dataloader, lr_scheduler + ) + + if args.use_ema: + ema_unet.to(accelerator.device) + + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision + # as these weights are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + args.mixed_precision = accelerator.mixed_precision + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + args.mixed_precision = accelerator.mixed_precision + + # Move text_encode and vae to gpu and cast to weight_dtype + text_encoder.to(accelerator.device, dtype=weight_dtype) + vae.to(accelerator.device, dtype=weight_dtype) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + tracker_config = dict(vars(args)) + tracker_config.pop("validation_prompts") + accelerator.init_trackers(args.tracker_project_name, tracker_config) + + # Function for unwrapping if model was compiled with `torch.compile`. + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + first_epoch = global_step // num_update_steps_per_epoch + + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + if args.NHWC: + unet = unet.to(memory_format=torch.channels_last) + vae = vae.to(memory_format=torch.channels_last) + + import time + for epoch in range(first_epoch, args.num_train_epochs): + train_loss = 0.0 + iter_start = time.time() + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(unet): + # Convert images to latent space + if args.NHWC: + batch["pixel_values"] = batch["pixel_values"].to(memory_format=torch.channels_last) + latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() + latents = latents * vae.config.scaling_factor + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn( + (latents.shape[0], latents.shape[1], 1, 1), device=latents.device + ) + if args.input_perturbation: + new_noise = noise + args.input_perturbation * torch.randn_like(noise) + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) + timesteps = timesteps.long() + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + if args.input_perturbation: + noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) + else: + noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + + # Get the text embedding for conditioning + encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] + + # Get the target for loss depending on the prediction type + if args.prediction_type is not None: + # set prediction_type of scheduler if defined + noise_scheduler.register_to_config(prediction_type=args.prediction_type) + + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + # Predict the noise residual and compute loss + if args.NHWC: + noisy_latents = noisy_latents.to(memory_format=torch.channels_last) + # timesteps = timesteps.to(memory_format=torch.channels_last) + # encoder_hidden_states = encoder_hidden_states.to(memory_format=torch.channels_last) + model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] + + if args.snr_gamma is None: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + else: + # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. + # Since we predict the noise instead of x_0, the original formulation is slightly changed. + # This is discussed in Section 4.2 of the same paper. + snr = compute_snr(noise_scheduler, timesteps) + mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( + dim=1 + )[0] + if noise_scheduler.config.prediction_type == "epsilon": + mse_loss_weights = mse_loss_weights / snr + elif noise_scheduler.config.prediction_type == "v_prediction": + mse_loss_weights = mse_loss_weights / (snr + 1) + + loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") + loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights + loss = loss.mean() + + # 检查 loss 是否正常,不正常的话退出并返回 -1 + if loss.isnan().any() or loss.isinf().any(): + import sys + sys.exit(1) + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + if args.use_ema: + ema_unet.step(unet.parameters()) + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + iter_elapse = time.time() - iter_start + iter_start = time.time() + ips_per_device = total_batch_size / iter_elapse / accelerator.num_processes + ips_per_gpu = ips_per_device * 2 + + if global_step % args.checkpointing_steps == 0: + if accelerator.is_main_process: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + + if args.NHWC: + origin_model = accelerator._models[0] + accelerator._models[0] = origin_model.to(memory_format=torch.contiguous_format) + accelerator.save_state(save_path) + accelerator._models[0] = origin_model.to(memory_format=torch.channels_last) + else: + accelerator.save_state(save_path) + + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], + "ips_per_device": ips_per_device, "ips_per_gpu": ips_per_gpu} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if accelerator.is_main_process: + if args.validation_prompts is not None and epoch % args.validation_epochs == 0: + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + ema_unet.store(unet.parameters()) + ema_unet.copy_to(unet.parameters()) + log_validation( + vae, + text_encoder, + tokenizer, + unet, + args, + accelerator, + weight_dtype, + global_step, + ) + if args.use_ema: + # Switch back to the original UNet parameters. + ema_unet.restore(unet.parameters()) + + # Create the pipeline using the trained modules and save it. + accelerator.wait_for_everyone() + if accelerator.is_main_process: + unet = unwrap_model(unet) + if args.use_ema: + ema_unet.copy_to(unet.parameters()) + + pipeline = StableDiffusionPipeline.from_pretrained( + args.pretrained_model_name_or_path, + text_encoder=text_encoder, + vae=vae.to(memory_format=torch.contiguous_format) if args.NHWC else vae, + unet=unet.to(memory_format=torch.contiguous_format) if args.NHWC else unet, + revision=args.revision, + variant=args.variant, + ) + pipeline.save_pretrained(args.output_dir) + + # Run a final round of inference. + images = [] + if args.validation_prompts is not None: + logger.info("Running inference for collecting generated images...") + pipeline = pipeline.to(accelerator.device) + pipeline.torch_dtype = weight_dtype + pipeline.set_progress_bar_config(disable=True) + + if args.enable_xformers_memory_efficient_attention: + pipeline.enable_xformers_memory_efficient_attention() + + if args.seed is None: + generator = None + else: + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) + + for i in range(len(args.validation_prompts)): + with torch.autocast("cuda"): + image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] + images.append(image) + + if args.push_to_hub: + save_model_card(args, repo_id, images, repo_folder=args.output_dir) + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + accelerator.end_training() + + +if __name__ == "__main__": + main() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_flax.py new file mode 100644 index 000000000..1386ccb04 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_flax.py @@ -0,0 +1,620 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import math +import os +import random +from pathlib import Path + +import jax +import jax.numpy as jnp +import numpy as np +import optax +import torch +import torch.utils.checkpoint +import transformers +from datasets import load_dataset +from flax import jax_utils +from flax.training import train_state +from flax.training.common_utils import shard +from huggingface_hub import create_repo, upload_folder +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed + +from diffusers import ( + FlaxAutoencoderKL, + FlaxDDPMScheduler, + FlaxPNDMScheduler, + FlaxStableDiffusionPipeline, + FlaxUNet2DConditionModel, +) +from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker +from diffusers.utils import check_min_version + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.27.0") + +logger = logging.getLogger(__name__) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-finetuned", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU." + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--from_pt", + action="store_true", + default=False, + help="Flag to indicate whether to convert models from PyTorch.", + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + return args + + +dataset_name_mapping = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def get_params_to_save(params): + return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) + + +def main(): + args = parse_args() + + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + # Setup logging, we only want one process per machine to log things on the screen. + logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) + if jax.process_index() == 0: + transformers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if jax.process_index() == 0: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = dataset_name_mapping.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True) + input_ids = inputs.input_ids + return input_ids + + train_transforms = transforms.Compose( + [ + transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), + transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["pixel_values"] = [train_transforms(image) for image in images] + examples["input_ids"] = tokenize_captions(examples) + + return examples + + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train) + + def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + input_ids = [example["input_ids"] for example in examples] + + padded_tokens = tokenizer.pad( + {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt" + ) + batch = { + "pixel_values": pixel_values, + "input_ids": padded_tokens.input_ids, + } + batch = {k: v.numpy() for k, v in batch.items()} + + return batch + + total_train_batch_size = args.train_batch_size * jax.local_device_count() + train_dataloader = torch.utils.data.DataLoader( + train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=total_train_batch_size, drop_last=True + ) + + weight_dtype = jnp.float32 + if args.mixed_precision == "fp16": + weight_dtype = jnp.float16 + elif args.mixed_precision == "bf16": + weight_dtype = jnp.bfloat16 + + # Load models and create wrapper for stable diffusion + tokenizer = CLIPTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + from_pt=args.from_pt, + revision=args.revision, + subfolder="tokenizer", + ) + text_encoder = FlaxCLIPTextModel.from_pretrained( + args.pretrained_model_name_or_path, + from_pt=args.from_pt, + revision=args.revision, + subfolder="text_encoder", + dtype=weight_dtype, + ) + vae, vae_params = FlaxAutoencoderKL.from_pretrained( + args.pretrained_model_name_or_path, + from_pt=args.from_pt, + revision=args.revision, + subfolder="vae", + dtype=weight_dtype, + ) + unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, + from_pt=args.from_pt, + revision=args.revision, + subfolder="unet", + dtype=weight_dtype, + ) + + # Optimization + if args.scale_lr: + args.learning_rate = args.learning_rate * total_train_batch_size + + constant_scheduler = optax.constant_schedule(args.learning_rate) + + adamw = optax.adamw( + learning_rate=constant_scheduler, + b1=args.adam_beta1, + b2=args.adam_beta2, + eps=args.adam_epsilon, + weight_decay=args.adam_weight_decay, + ) + + optimizer = optax.chain( + optax.clip_by_global_norm(args.max_grad_norm), + adamw, + ) + + state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer) + + noise_scheduler = FlaxDDPMScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 + ) + noise_scheduler_state = noise_scheduler.create_state() + + # Initialize our training + rng = jax.random.PRNGKey(args.seed) + train_rngs = jax.random.split(rng, jax.local_device_count()) + + def train_step(state, text_encoder_params, vae_params, batch, train_rng): + dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) + + def compute_loss(params): + # Convert images to latent space + vae_outputs = vae.apply( + {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode + ) + latents = vae_outputs.latent_dist.sample(sample_rng) + # (NHWC) -> (NCHW) + latents = jnp.transpose(latents, (0, 3, 1, 2)) + latents = latents * vae.config.scaling_factor + + # Sample noise that we'll add to the latents + noise_rng, timestep_rng = jax.random.split(sample_rng) + noise = jax.random.normal(noise_rng, latents.shape) + # Sample a random timestep for each image + bsz = latents.shape[0] + timesteps = jax.random.randint( + timestep_rng, + (bsz,), + 0, + noise_scheduler.config.num_train_timesteps, + ) + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) + + # Get the text embedding for conditioning + encoder_hidden_states = text_encoder( + batch["input_ids"], + params=text_encoder_params, + train=False, + )[0] + + # Predict the noise residual and compute loss + model_pred = unet.apply( + {"params": params}, noisy_latents, timesteps, encoder_hidden_states, train=True + ).sample + + # Get the target for loss depending on the prediction type + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + loss = (target - model_pred) ** 2 + loss = loss.mean() + + return loss + + grad_fn = jax.value_and_grad(compute_loss) + loss, grad = grad_fn(state.params) + grad = jax.lax.pmean(grad, "batch") + + new_state = state.apply_gradients(grads=grad) + + metrics = {"loss": loss} + metrics = jax.lax.pmean(metrics, axis_name="batch") + + return new_state, metrics, new_train_rng + + # Create parallel version of the train step + p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) + + # Replicate the train state on each device + state = jax_utils.replicate(state) + text_encoder_params = jax_utils.replicate(text_encoder.params) + vae_params = jax_utils.replicate(vae_params) + + # Train! + num_update_steps_per_epoch = math.ceil(len(train_dataloader)) + + # Scheduler and math around the number of training steps. + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + + global_step = 0 + + epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0) + for epoch in epochs: + # ======================== Training ================================ + + train_metrics = [] + + steps_per_epoch = len(train_dataset) // total_train_batch_size + train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) + # train + for batch in train_dataloader: + batch = shard(batch) + state, train_metric, train_rngs = p_train_step(state, text_encoder_params, vae_params, batch, train_rngs) + train_metrics.append(train_metric) + + train_step_progress_bar.update(1) + + global_step += 1 + if global_step >= args.max_train_steps: + break + + train_metric = jax_utils.unreplicate(train_metric) + + train_step_progress_bar.close() + epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") + + # Create the pipeline using using the trained modules and save it. + if jax.process_index() == 0: + scheduler = FlaxPNDMScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True + ) + safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", from_pt=True + ) + pipeline = FlaxStableDiffusionPipeline( + text_encoder=text_encoder, + vae=vae, + unet=unet, + tokenizer=tokenizer, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), + ) + + pipeline.save_pretrained( + args.output_dir, + params={ + "text_encoder": get_params_to_save(text_encoder_params), + "vae": get_params_to_save(vae_params), + "unet": get_params_to_save(state.params), + "safety_checker": safety_checker.params, + }, + ) + + if args.push_to_hub: + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + +if __name__ == "__main__": + main() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora.py new file mode 100644 index 000000000..97f258ba7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora.py @@ -0,0 +1,976 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fine-tuning script for Stable Diffusion for text2image with support for LoRA.""" + +import argparse +import logging +import math +import os +import random +import shutil +from pathlib import Path + +import datasets +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from peft import LoraConfig +from peft.utils import get_peft_model_state_dict +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import cast_training_params, compute_snr +from diffusers.utils import check_min_version, convert_state_dict_to_diffusers, is_wandb_available +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import is_compiled_module + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.27.0") + +logger = get_logger(__name__, log_level="INFO") + + +def save_model_card( + repo_id: str, + images: list = None, + base_model: str = None, + dataset_name: str = None, + repo_folder: str = None, +): + img_str = "" + if images is not None: + for i, image in enumerate(images): + image.save(os.path.join(repo_folder, f"image_{i}.png")) + img_str += f"![img_{i}](./image_{i}.png)\n" + + model_description = f""" +# LoRA text2image fine-tuning - {repo_id} +These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n +{img_str} +""" + + model_card = load_or_create_model_card( + repo_id_or_path=repo_id, + from_training=True, + license="creativeml-openrail-m", + base_model=base_model, + model_description=model_description, + inference=True, + ) + + tags = [ + "stable-diffusion", + "stable-diffusion-diffusers", + "text-to-image", + "diffusers", + "diffusers-training", + "lora", + ] + model_card = populate_model_card(model_card, tags=tags) + + model_card.save(os.path.join(repo_folder, "README.md")) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." + ) + parser.add_argument( + "--num_validation_images", + type=int, + default=4, + help="Number of images that should be generated during validation with `validation_prompt`.", + ) + parser.add_argument( + "--validation_epochs", + type=int, + default=1, + help=( + "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" + " `args.validation_prompt` multiple times: `args.num_validation_images`." + ), + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-finetuned-lora", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--prediction_type", + type=str, + default=None, + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--rank", + type=int, + default=4, + help=("The dimension of the LoRA update matrices."), + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + return args + + +DATASET_NAME_MAPPING = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def main(): + args = parse_args() + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + logging_dir = Path(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + ) + if args.report_to == "wandb": + if not is_wandb_available(): + raise ImportError("Make sure to install wandb if you want to use it for logging during training.") + import wandb + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + # Load scheduler, tokenizer and models. + noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") + tokenizer = CLIPTokenizer.from_pretrained( + args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision + ) + text_encoder = CLIPTextModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision + ) + vae = AutoencoderKL.from_pretrained( + args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant + ) + unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant + ) + # freeze parameters of models to save more memory + unet.requires_grad_(False) + vae.requires_grad_(False) + text_encoder.requires_grad_(False) + + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision + # as these weights are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # Freeze the unet parameters before adding adapters + for param in unet.parameters(): + param.requires_grad_(False) + + unet_lora_config = LoraConfig( + r=args.rank, + lora_alpha=args.rank, + init_lora_weights="gaussian", + target_modules=["to_k", "to_q", "to_v", "to_out.0"], + ) + + # Move unet, vae and text_encoder to device and cast to weight_dtype + unet.to(accelerator.device, dtype=weight_dtype) + vae.to(accelerator.device, dtype=weight_dtype) + text_encoder.to(accelerator.device, dtype=weight_dtype) + + # Add adapter and make sure the trainable params are in float32. + unet.add_adapter(unet_lora_config) + if args.mixed_precision == "fp16": + # only upcast trainable parameters (LoRA) into fp32 + cast_training_params(unet, dtype=torch.float32) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warning( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + lora_layers = filter(lambda p: p.requires_grad, unet.parameters()) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + else: + optimizer_cls = torch.optim.AdamW + + optimizer = optimizer_cls( + lora_layers, + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + data_dir=args.train_data_dir, + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer( + captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" + ) + return inputs.input_ids + + # Preprocessing the datasets. + train_transforms = transforms.Compose( + [ + transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), + transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["pixel_values"] = [train_transforms(image) for image in images] + examples["input_ids"] = tokenize_captions(examples) + return examples + + with accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train) + + def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + input_ids = torch.stack([example["input_ids"] for example in examples]) + return {"pixel_values": pixel_values, "input_ids": input_ids} + + # DataLoaders creation: + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, + num_training_steps=args.max_train_steps * accelerator.num_processes, + ) + + # Prepare everything with our `accelerator`. + unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, optimizer, train_dataloader, lr_scheduler + ) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + accelerator.init_trackers("text2image-fine-tune", config=vars(args)) + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + first_epoch = global_step // num_update_steps_per_epoch + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + for epoch in range(first_epoch, args.num_train_epochs): + unet.train() + train_loss = 0.0 + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(unet): + # Convert images to latent space + latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() + latents = latents * vae.config.scaling_factor + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn( + (latents.shape[0], latents.shape[1], 1, 1), device=latents.device + ) + + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) + timesteps = timesteps.long() + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + + # Get the text embedding for conditioning + encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] + + # Get the target for loss depending on the prediction type + if args.prediction_type is not None: + # set prediction_type of scheduler if defined + noise_scheduler.register_to_config(prediction_type=args.prediction_type) + + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + # Predict the noise residual and compute loss + model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] + + if args.snr_gamma is None: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + else: + # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. + # Since we predict the noise instead of x_0, the original formulation is slightly changed. + # This is discussed in Section 4.2 of the same paper. + snr = compute_snr(noise_scheduler, timesteps) + mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( + dim=1 + )[0] + if noise_scheduler.config.prediction_type == "epsilon": + mse_loss_weights = mse_loss_weights / snr + elif noise_scheduler.config.prediction_type == "v_prediction": + mse_loss_weights = mse_loss_weights / (snr + 1) + + loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") + loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights + loss = loss.mean() + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + params_to_clip = lora_layers + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + if global_step % args.checkpointing_steps == 0: + if accelerator.is_main_process: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + accelerator.save_state(save_path) + + unwrapped_unet = unwrap_model(unet) + unet_lora_state_dict = convert_state_dict_to_diffusers( + get_peft_model_state_dict(unwrapped_unet) + ) + + StableDiffusionPipeline.save_lora_weights( + save_directory=save_path, + unet_lora_layers=unet_lora_state_dict, + safe_serialization=True, + ) + + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if accelerator.is_main_process: + if args.validation_prompt is not None and epoch % args.validation_epochs == 0: + logger.info( + f"Running validation... \n Generating {args.num_validation_images} images with prompt:" + f" {args.validation_prompt}." + ) + # create pipeline + pipeline = DiffusionPipeline.from_pretrained( + args.pretrained_model_name_or_path, + unet=unwrap_model(unet), + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + # run inference + generator = torch.Generator(device=accelerator.device) + if args.seed is not None: + generator = generator.manual_seed(args.seed) + images = [] + with torch.cuda.amp.autocast(): + for _ in range(args.num_validation_images): + images.append( + pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0] + ) + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "validation": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + del pipeline + torch.cuda.empty_cache() + + # Save the lora layers + accelerator.wait_for_everyone() + if accelerator.is_main_process: + unet = unet.to(torch.float32) + + unwrapped_unet = unwrap_model(unet) + unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unwrapped_unet)) + StableDiffusionPipeline.save_lora_weights( + save_directory=args.output_dir, + unet_lora_layers=unet_lora_state_dict, + safe_serialization=True, + ) + + if args.push_to_hub: + save_model_card( + repo_id, + images=images, + base_model=args.pretrained_model_name_or_path, + dataset_name=args.dataset_name, + repo_folder=args.output_dir, + ) + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + # Final inference + # Load previous pipeline + if args.validation_prompt is not None: + pipeline = DiffusionPipeline.from_pretrained( + args.pretrained_model_name_or_path, + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + pipeline = pipeline.to(accelerator.device) + + # load attention processors + pipeline.load_lora_weights(args.output_dir) + + # run inference + generator = torch.Generator(device=accelerator.device) + if args.seed is not None: + generator = generator.manual_seed(args.seed) + images = [] + with torch.cuda.amp.autocast(): + for _ in range(args.num_validation_images): + images.append( + pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0] + ) + + for tracker in accelerator.trackers: + if len(images) != 0: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "test": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + accelerator.end_training() + + +if __name__ == "__main__": + main() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora_sdxl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora_sdxl.py new file mode 100644 index 000000000..43e090758 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_lora_sdxl.py @@ -0,0 +1,1317 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fine-tuning script for Stable Diffusion XL for text2image with support for LoRA.""" + +import argparse +import logging +import math +import os +import random +import shutil +from pathlib import Path + +import datasets +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from peft import LoraConfig, set_peft_model_state_dict +from peft.utils import get_peft_model_state_dict +from torchvision import transforms +from torchvision.transforms.functional import crop +from tqdm.auto import tqdm +from transformers import AutoTokenizer, PretrainedConfig + +import diffusers +from diffusers import ( + AutoencoderKL, + DDPMScheduler, + StableDiffusionXLPipeline, + UNet2DConditionModel, +) +from diffusers.loaders import LoraLoaderMixin +from diffusers.optimization import get_scheduler +from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr +from diffusers.utils import ( + check_min_version, + convert_state_dict_to_diffusers, + convert_unet_state_dict_to_peft, + is_wandb_available, +) +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import is_compiled_module + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.27.0") + +logger = get_logger(__name__) + + +def save_model_card( + repo_id: str, + images: list = None, + base_model: str = None, + dataset_name: str = None, + train_text_encoder: bool = False, + repo_folder: str = None, + vae_path: str = None, +): + img_str = "" + if images is not None: + for i, image in enumerate(images): + image.save(os.path.join(repo_folder, f"image_{i}.png")) + img_str += f"![img_{i}](./image_{i}.png)\n" + + model_description = f""" +# LoRA text2image fine-tuning - {repo_id} + +These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n +{img_str} + +LoRA for the text encoder was enabled: {train_text_encoder}. + +Special VAE used for training: {vae_path}. +""" + model_card = load_or_create_model_card( + repo_id_or_path=repo_id, + from_training=True, + license="creativeml-openrail-m", + base_model=base_model, + model_description=model_description, + inference=True, + ) + + tags = [ + "stable-diffusion-xl", + "stable-diffusion-xl-diffusers", + "text-to-image", + "diffusers", + "diffusers-training", + "lora", + ] + model_card = populate_model_card(model_card, tags=tags) + + model_card.save(os.path.join(repo_folder, "README.md")) + + +def import_model_class_from_model_name_or_path( + pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" +): + text_encoder_config = PretrainedConfig.from_pretrained( + pretrained_model_name_or_path, subfolder=subfolder, revision=revision + ) + model_class = text_encoder_config.architectures[0] + + if model_class == "CLIPTextModel": + from transformers import CLIPTextModel + + return CLIPTextModel + elif model_class == "CLIPTextModelWithProjection": + from transformers import CLIPTextModelWithProjection + + return CLIPTextModelWithProjection + else: + raise ValueError(f"{model_class} is not supported.") + + +def parse_args(input_args=None): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--pretrained_vae_model_name_or_path", + type=str, + default=None, + help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--validation_prompt", + type=str, + default=None, + help="A prompt that is used during validation to verify that the model is learning.", + ) + parser.add_argument( + "--num_validation_images", + type=int, + default=4, + help="Number of images that should be generated during validation with `validation_prompt`.", + ) + parser.add_argument( + "--validation_epochs", + type=int, + default=1, + help=( + "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" + " `args.validation_prompt` multiple times: `args.num_validation_images`." + ), + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-finetuned-lora", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=1024, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_text_encoder", + action="store_true", + help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" + " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--prediction_type", + type=str, + default=None, + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--rank", + type=int, + default=4, + help=("The dimension of the LoRA update matrices."), + ) + parser.add_argument( + "--debug_loss", + action="store_true", + help="debug loss for each image, if filenames are awailable in the dataset", + ) + + if input_args is not None: + args = parser.parse_args(input_args) + else: + args = parser.parse_args() + + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + return args + + +DATASET_NAME_MAPPING = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def tokenize_prompt(tokenizer, prompt): + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + return text_input_ids + + +# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt +def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None): + prompt_embeds_list = [] + + for i, text_encoder in enumerate(text_encoders): + if tokenizers is not None: + tokenizer = tokenizers[i] + text_input_ids = tokenize_prompt(tokenizer, prompt) + else: + assert text_input_ids_list is not None + text_input_ids = text_input_ids_list[i] + + prompt_embeds = text_encoder( + text_input_ids.to(text_encoder.device), output_hidden_states=True, return_dict=False + ) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + prompt_embeds = prompt_embeds[-1][-2] + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) + return prompt_embeds, pooled_prompt_embeds + + +def main(args): + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + logging_dir = Path(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) + kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + kwargs_handlers=[kwargs], + ) + + if args.report_to == "wandb": + if not is_wandb_available(): + raise ImportError("Make sure to install wandb if you want to use it for logging during training.") + import wandb + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load the tokenizers + tokenizer_one = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer", + revision=args.revision, + use_fast=False, + ) + tokenizer_two = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer_2", + revision=args.revision, + use_fast=False, + ) + + # import correct text encoder classes + text_encoder_cls_one = import_model_class_from_model_name_or_path( + args.pretrained_model_name_or_path, args.revision + ) + text_encoder_cls_two = import_model_class_from_model_name_or_path( + args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" + ) + + # Load scheduler and models + noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") + text_encoder_one = text_encoder_cls_one.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant + ) + text_encoder_two = text_encoder_cls_two.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant + ) + vae_path = ( + args.pretrained_model_name_or_path + if args.pretrained_vae_model_name_or_path is None + else args.pretrained_vae_model_name_or_path + ) + vae = AutoencoderKL.from_pretrained( + vae_path, + subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, + revision=args.revision, + variant=args.variant, + ) + unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant + ) + + # We only train the additional adapter LoRA layers + vae.requires_grad_(False) + text_encoder_one.requires_grad_(False) + text_encoder_two.requires_grad_(False) + unet.requires_grad_(False) + + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision + # as these weights are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # Move unet, vae and text_encoder to device and cast to weight_dtype + # The VAE is in float32 to avoid NaN losses. + unet.to(accelerator.device, dtype=weight_dtype) + + if args.pretrained_vae_model_name_or_path is None: + vae.to(accelerator.device, dtype=torch.float32) + else: + vae.to(accelerator.device, dtype=weight_dtype) + text_encoder_one.to(accelerator.device, dtype=weight_dtype) + text_encoder_two.to(accelerator.device, dtype=weight_dtype) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warning( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + # now we will add new LoRA weights to the attention layers + # Set correct lora layers + unet_lora_config = LoraConfig( + r=args.rank, + lora_alpha=args.rank, + init_lora_weights="gaussian", + target_modules=["to_k", "to_q", "to_v", "to_out.0"], + ) + + unet.add_adapter(unet_lora_config) + + # The text encoder comes from 🤗 transformers, we will also attach adapters to it. + if args.train_text_encoder: + # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16 + text_lora_config = LoraConfig( + r=args.rank, + lora_alpha=args.rank, + init_lora_weights="gaussian", + target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], + ) + text_encoder_one.add_adapter(text_lora_config) + text_encoder_two.add_adapter(text_lora_config) + + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + # there are only two options here. Either are just the unet attn processor layers + # or there are the unet and text encoder attn layers + unet_lora_layers_to_save = None + text_encoder_one_lora_layers_to_save = None + text_encoder_two_lora_layers_to_save = None + + for model in models: + if isinstance(unwrap_model(model), type(unwrap_model(unet))): + unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model)) + elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_one))): + text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers( + get_peft_model_state_dict(model) + ) + elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_two))): + text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers( + get_peft_model_state_dict(model) + ) + else: + raise ValueError(f"unexpected save model: {model.__class__}") + + # make sure to pop weight so that corresponding model is not saved again + if weights: + weights.pop() + + StableDiffusionXLPipeline.save_lora_weights( + output_dir, + unet_lora_layers=unet_lora_layers_to_save, + text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, + text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save, + ) + + def load_model_hook(models, input_dir): + unet_ = None + text_encoder_one_ = None + text_encoder_two_ = None + + while len(models) > 0: + model = models.pop() + + if isinstance(model, type(unwrap_model(unet))): + unet_ = model + elif isinstance(model, type(unwrap_model(text_encoder_one))): + text_encoder_one_ = model + elif isinstance(model, type(unwrap_model(text_encoder_two))): + text_encoder_two_ = model + else: + raise ValueError(f"unexpected save model: {model.__class__}") + + lora_state_dict, _ = LoraLoaderMixin.lora_state_dict(input_dir) + unet_state_dict = {f'{k.replace("unet.", "")}': v for k, v in lora_state_dict.items() if k.startswith("unet.")} + unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict) + incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default") + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + if args.train_text_encoder: + _set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_) + + _set_state_dict_into_text_encoder( + lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_two_ + ) + + # Make sure the trainable params are in float32. This is again needed since the base models + # are in `weight_dtype`. More details: + # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 + if args.mixed_precision == "fp16": + models = [unet_] + if args.train_text_encoder: + models.extend([text_encoder_one_, text_encoder_two_]) + cast_training_params(models, dtype=torch.float32) + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + if args.train_text_encoder: + text_encoder_one.gradient_checkpointing_enable() + text_encoder_two.gradient_checkpointing_enable() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Make sure the trainable params are in float32. + if args.mixed_precision == "fp16": + models = [unet] + if args.train_text_encoder: + models.extend([text_encoder_one, text_encoder_two]) + cast_training_params(models, dtype=torch.float32) + + # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." + ) + + optimizer_class = bnb.optim.AdamW8bit + else: + optimizer_class = torch.optim.AdamW + + # Optimizer creation + params_to_optimize = list(filter(lambda p: p.requires_grad, unet.parameters())) + if args.train_text_encoder: + params_to_optimize = ( + params_to_optimize + + list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) + + list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) + ) + optimizer = optimizer_class( + params_to_optimize, + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + tokens_one = tokenize_prompt(tokenizer_one, captions) + tokens_two = tokenize_prompt(tokenizer_two, captions) + return tokens_one, tokens_two + + # Preprocessing the datasets. + train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) + train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) + train_flip = transforms.RandomHorizontalFlip(p=1.0) + train_transforms = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + # image aug + original_sizes = [] + all_images = [] + crop_top_lefts = [] + for image in images: + original_sizes.append((image.height, image.width)) + image = train_resize(image) + if args.random_flip and random.random() < 0.5: + # flip + image = train_flip(image) + if args.center_crop: + y1 = max(0, int(round((image.height - args.resolution) / 2.0))) + x1 = max(0, int(round((image.width - args.resolution) / 2.0))) + image = train_crop(image) + else: + y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) + image = crop(image, y1, x1, h, w) + crop_top_left = (y1, x1) + crop_top_lefts.append(crop_top_left) + image = train_transforms(image) + all_images.append(image) + + examples["original_sizes"] = original_sizes + examples["crop_top_lefts"] = crop_top_lefts + examples["pixel_values"] = all_images + tokens_one, tokens_two = tokenize_captions(examples) + examples["input_ids_one"] = tokens_one + examples["input_ids_two"] = tokens_two + if args.debug_loss: + fnames = [os.path.basename(image.filename) for image in examples[image_column] if image.filename] + if fnames: + examples["filenames"] = fnames + return examples + + with accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train, output_all_columns=True) + + def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + original_sizes = [example["original_sizes"] for example in examples] + crop_top_lefts = [example["crop_top_lefts"] for example in examples] + input_ids_one = torch.stack([example["input_ids_one"] for example in examples]) + input_ids_two = torch.stack([example["input_ids_two"] for example in examples]) + result = { + "pixel_values": pixel_values, + "input_ids_one": input_ids_one, + "input_ids_two": input_ids_two, + "original_sizes": original_sizes, + "crop_top_lefts": crop_top_lefts, + } + + filenames = [example["filenames"] for example in examples if "filenames" in example] + if filenames: + result["filenames"] = filenames + return result + + # DataLoaders creation: + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare everything with our `accelerator`. + if args.train_text_encoder: + unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler + ) + else: + unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, optimizer, train_dataloader, lr_scheduler + ) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + accelerator.init_trackers("text2image-fine-tune", config=vars(args)) + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + first_epoch = global_step // num_update_steps_per_epoch + + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + for epoch in range(first_epoch, args.num_train_epochs): + unet.train() + if args.train_text_encoder: + text_encoder_one.train() + text_encoder_two.train() + train_loss = 0.0 + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(unet): + # Convert images to latent space + if args.pretrained_vae_model_name_or_path is not None: + pixel_values = batch["pixel_values"].to(dtype=weight_dtype) + else: + pixel_values = batch["pixel_values"] + + model_input = vae.encode(pixel_values).latent_dist.sample() + model_input = model_input * vae.config.scaling_factor + if args.pretrained_vae_model_name_or_path is None: + model_input = model_input.to(weight_dtype) + + # Sample noise that we'll add to the latents + noise = torch.randn_like(model_input) + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn( + (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device + ) + + bsz = model_input.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint( + 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device + ) + timesteps = timesteps.long() + + # Add noise to the model input according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) + + # time ids + def compute_time_ids(original_size, crops_coords_top_left): + # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids + target_size = (args.resolution, args.resolution) + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = torch.tensor([add_time_ids]) + add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) + return add_time_ids + + add_time_ids = torch.cat( + [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] + ) + + # Predict the noise residual + unet_added_conditions = {"time_ids": add_time_ids} + prompt_embeds, pooled_prompt_embeds = encode_prompt( + text_encoders=[text_encoder_one, text_encoder_two], + tokenizers=None, + prompt=None, + text_input_ids_list=[batch["input_ids_one"], batch["input_ids_two"]], + ) + unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) + model_pred = unet( + noisy_model_input, + timesteps, + prompt_embeds, + added_cond_kwargs=unet_added_conditions, + return_dict=False, + )[0] + + # Get the target for loss depending on the prediction type + if args.prediction_type is not None: + # set prediction_type of scheduler if defined + noise_scheduler.register_to_config(prediction_type=args.prediction_type) + + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(model_input, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + if args.snr_gamma is None: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + else: + # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. + # Since we predict the noise instead of x_0, the original formulation is slightly changed. + # This is discussed in Section 4.2 of the same paper. + snr = compute_snr(noise_scheduler, timesteps) + mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( + dim=1 + )[0] + if noise_scheduler.config.prediction_type == "epsilon": + mse_loss_weights = mse_loss_weights / snr + elif noise_scheduler.config.prediction_type == "v_prediction": + mse_loss_weights = mse_loss_weights / (snr + 1) + + loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") + loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights + loss = loss.mean() + if args.debug_loss and "filenames" in batch: + for fname in batch["filenames"]: + accelerator.log({"loss_for_" + fname: loss}, step=global_step) + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + if accelerator.is_main_process: + if global_step % args.checkpointing_steps == 0: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if accelerator.is_main_process: + if args.validation_prompt is not None and epoch % args.validation_epochs == 0: + logger.info( + f"Running validation... \n Generating {args.num_validation_images} images with prompt:" + f" {args.validation_prompt}." + ) + # create pipeline + pipeline = StableDiffusionXLPipeline.from_pretrained( + args.pretrained_model_name_or_path, + vae=vae, + text_encoder=unwrap_model(text_encoder_one), + text_encoder_2=unwrap_model(text_encoder_two), + unet=unwrap_model(unet), + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + # run inference + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None + pipeline_args = {"prompt": args.validation_prompt} + + with torch.cuda.amp.autocast(): + images = [ + pipeline(**pipeline_args, generator=generator).images[0] + for _ in range(args.num_validation_images) + ] + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "validation": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + del pipeline + torch.cuda.empty_cache() + + # Save the lora layers + accelerator.wait_for_everyone() + if accelerator.is_main_process: + unet = unwrap_model(unet) + unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet)) + + if args.train_text_encoder: + text_encoder_one = unwrap_model(text_encoder_one) + text_encoder_two = unwrap_model(text_encoder_two) + + text_encoder_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(text_encoder_one)) + text_encoder_2_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(text_encoder_two)) + else: + text_encoder_lora_layers = None + text_encoder_2_lora_layers = None + + StableDiffusionXLPipeline.save_lora_weights( + save_directory=args.output_dir, + unet_lora_layers=unet_lora_state_dict, + text_encoder_lora_layers=text_encoder_lora_layers, + text_encoder_2_lora_layers=text_encoder_2_lora_layers, + ) + + del unet + del text_encoder_one + del text_encoder_two + del text_encoder_lora_layers + del text_encoder_2_lora_layers + torch.cuda.empty_cache() + + # Final inference + # Make sure vae.dtype is consistent with the unet.dtype + if args.mixed_precision == "fp16": + vae.to(weight_dtype) + # Load previous pipeline + pipeline = StableDiffusionXLPipeline.from_pretrained( + args.pretrained_model_name_or_path, + vae=vae, + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + pipeline = pipeline.to(accelerator.device) + + # load attention processors + pipeline.load_lora_weights(args.output_dir) + + # run inference + images = [] + if args.validation_prompt and args.num_validation_images > 0: + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None + images = [ + pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] + for _ in range(args.num_validation_images) + ] + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "test": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + if args.push_to_hub: + save_model_card( + repo_id, + images=images, + base_model=args.pretrained_model_name_or_path, + dataset_name=args.dataset_name, + train_text_encoder=args.train_text_encoder, + repo_folder=args.output_dir, + vae_path=args.pretrained_vae_model_name_or_path, + ) + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + accelerator.end_training() + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_sdxl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_sdxl.py new file mode 100644 index 000000000..a360c5fab --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/train_text_to_image_sdxl.py @@ -0,0 +1,1374 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fine-tuning script for Stable Diffusion XL for text2image.""" + +import argparse +import functools +import gc +import logging +import math +import os +import random +import shutil +from pathlib import Path +import time + +import accelerate +import datasets +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.state import AcceleratorState +from accelerate.logging import get_logger +from accelerate.utils import ProjectConfiguration, set_seed +from datasets import concatenate_datasets, load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from torchvision import transforms +from torchvision.transforms.functional import crop +from tqdm.auto import tqdm +from transformers import AutoTokenizer, PretrainedConfig +from transformers.utils import ContextManagers + +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel, compute_snr +from diffusers.utils import check_min_version, is_wandb_available +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import is_compiled_module + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.27.0") + +logger = get_logger(__name__) + + +DATASET_NAME_MAPPING = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def save_model_card( + repo_id: str, + images: list = None, + validation_prompt: str = None, + base_model: str = None, + dataset_name: str = None, + repo_folder: str = None, + vae_path: str = None, +): + img_str = "" + if images is not None: + for i, image in enumerate(images): + image.save(os.path.join(repo_folder, f"image_{i}.png")) + img_str += f"![img_{i}](./image_{i}.png)\n" + + model_description = f""" +# Text-to-image finetuning - {repo_id} + +This pipeline was finetuned from **{base_model}** on the **{dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: {validation_prompt}: \n +{img_str} + +Special VAE used for training: {vae_path}. +""" + + model_card = load_or_create_model_card( + repo_id_or_path=repo_id, + from_training=True, + license="creativeml-openrail-m", + base_model=base_model, + model_description=model_description, + inference=True, + ) + + tags = [ + "stable-diffusion-xl", + "stable-diffusion-xl-diffusers", + "text-to-image", + "diffusers-training", + "diffusers", + ] + model_card = populate_model_card(model_card, tags=tags) + + model_card.save(os.path.join(repo_folder, "README.md")) + + +def import_model_class_from_model_name_or_path( + pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" +): + text_encoder_config = PretrainedConfig.from_pretrained( + pretrained_model_name_or_path, subfolder=subfolder, revision=revision + ) + model_class = text_encoder_config.architectures[0] + + if model_class == "CLIPTextModel": + from transformers import CLIPTextModel + + return CLIPTextModel + elif model_class == "CLIPTextModelWithProjection": + from transformers import CLIPTextModelWithProjection + + return CLIPTextModelWithProjection + else: + raise ValueError(f"{model_class} is not supported.") + + +def parse_args(input_args=None): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--pretrained_vae_model_name_or_path", + type=str, + default=None, + help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--validation_prompt", + type=str, + default=None, + help="A prompt that is used during validation to verify that the model is learning.", + ) + parser.add_argument( + "--num_validation_images", + type=int, + default=4, + help="Number of images that should be generated during validation with `validation_prompt`.", + ) + parser.add_argument( + "--validation_epochs", + type=int, + default=1, + help=( + "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" + " `args.validation_prompt` multiple times: `args.num_validation_images`." + ), + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--proportion_empty_prompts", + type=float, + default=0, + help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", + ) + parser.add_argument( + "--output_dir", + type=str, + default="sdxl-model-finetuned", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=1024, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" + " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--timestep_bias_strategy", + type=str, + default="none", + choices=["earlier", "later", "range", "none"], + help=( + "The timestep bias strategy, which may help direct the model toward learning low or high frequency details." + " Choices: ['earlier', 'later', 'range', 'none']." + " The default is 'none', which means no bias is applied, and training proceeds normally." + " The value of 'later' will increase the frequency of the model's final training timesteps." + ), + ) + parser.add_argument( + "--timestep_bias_multiplier", + type=float, + default=1.0, + help=( + "The multiplier for the bias. Defaults to 1.0, which means no bias is applied." + " A value of 2.0 will double the weight of the bias, and a value of 0.5 will halve it." + ), + ) + parser.add_argument( + "--timestep_bias_begin", + type=int, + default=0, + help=( + "When using `--timestep_bias_strategy=range`, the beginning (inclusive) timestep to bias." + " Defaults to zero, which equates to having no specific bias." + ), + ) + parser.add_argument( + "--timestep_bias_end", + type=int, + default=1000, + help=( + "When using `--timestep_bias_strategy=range`, the final timestep (inclusive) to bias." + " Defaults to 1000, which is the number of timesteps that Stable Diffusion is trained on." + ), + ) + parser.add_argument( + "--timestep_bias_portion", + type=float, + default=0.25, + help=( + "The portion of timesteps to bias. Defaults to 0.25, which 25% of timesteps will be biased." + " A value of 0.5 will bias one half of the timesteps. The value provided for `--timestep_bias_strategy` determines" + " whether the biased portions are in the earlier or later timesteps." + ), + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--prediction_type", + type=str, + default=None, + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + + parser.add_argument( + "--NHWC", + action="store_true", + help="Whether or not using NHWC for training", + ) + parser.add_argument( + "--apex_fused_adam", + action="store_true", + help="Whether or not using fused_adam optimizer", + ) + + if input_args is not None: + args = parser.parse_args(input_args) + else: + args = parser.parse_args() + + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: + raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") + + return args + + +# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt +def encode_prompt(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True): + prompt_embeds_list = [] + prompt_batch = batch[caption_column] + + captions = [] + for caption in prompt_batch: + if random.random() < proportion_empty_prompts: + captions.append("") + elif isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + + with torch.no_grad(): + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + text_inputs = tokenizer( + captions, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + prompt_embeds = text_encoder( + text_input_ids.to(text_encoder.device), + output_hidden_states=True, + return_dict=False, + ) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + prompt_embeds = prompt_embeds[-1][-2] + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) + return {"prompt_embeds": prompt_embeds.cpu(), "pooled_prompt_embeds": pooled_prompt_embeds.cpu()} + + +def compute_vae_encodings(batch, vae): + memory_format = torch.channels_last if int(os.environ["USE_NHWC_GN"]) else torch.contiguous_format + images = batch.pop("pixel_values") + pixel_values = torch.stack(list(images)) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + pixel_values = pixel_values.to(vae.device, dtype=vae.dtype, memory_format=memory_format) + + with torch.no_grad(): + model_input = vae.encode(pixel_values).latent_dist.sample() + model_input = model_input.to(memory_format=torch.contiguous_format) * vae.config.scaling_factor + return {"model_input": model_input.cpu()} + + +def generate_timestep_weights(args, num_timesteps): + weights = torch.ones(num_timesteps) + + # Determine the indices to bias + num_to_bias = int(args.timestep_bias_portion * num_timesteps) + + if args.timestep_bias_strategy == "later": + bias_indices = slice(-num_to_bias, None) + elif args.timestep_bias_strategy == "earlier": + bias_indices = slice(0, num_to_bias) + elif args.timestep_bias_strategy == "range": + # Out of the possible 1000 timesteps, we might want to focus on eg. 200-500. + range_begin = args.timestep_bias_begin + range_end = args.timestep_bias_end + if range_begin < 0: + raise ValueError( + "When using the range strategy for timestep bias, you must provide a beginning timestep greater or equal to zero." + ) + if range_end > num_timesteps: + raise ValueError( + "When using the range strategy for timestep bias, you must provide an ending timestep smaller than the number of timesteps." + ) + bias_indices = slice(range_begin, range_end) + else: # 'none' or any other string + return weights + if args.timestep_bias_multiplier <= 0: + return ValueError( + "The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific timesteps." + " If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead." + " A timestep bias multiplier less than or equal to 0 is not allowed." + ) + + # Apply the bias + weights[bias_indices] *= args.timestep_bias_multiplier + + # Normalize + weights /= weights.sum() + + return weights + + +def main(args): + if int(os.environ.get("USE_NHWC_GN", 0)): + assert args.NHWC, "USE_NHWC_GN requires NHWC to be true" + assert int(os.supports_bytes_environ) + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + logging_dir = Path(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + ) + + if args.report_to == "wandb": + if not is_wandb_available(): + raise ImportError("Make sure to install wandb if you want to use it for logging during training.") + import wandb + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load the tokenizers + tokenizer_one = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer", + revision=args.revision, + use_fast=False, + ) + tokenizer_two = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer_2", + revision=args.revision, + use_fast=False, + ) + + # import correct text encoder classes + text_encoder_cls_one = import_model_class_from_model_name_or_path( + args.pretrained_model_name_or_path, args.revision + ) + text_encoder_cls_two = import_model_class_from_model_name_or_path( + args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" + ) + + # Load scheduler and models + noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") + + def deepspeed_zero_init_disabled_context_manager(): + """ + returns either a context list that includes one that will disable zero.Init or an empty context list + """ + deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None + if deepspeed_plugin is None: + return [] + + return [deepspeed_plugin.zero3_init_context_manager(enable=False)] + + vae_path = ( + args.pretrained_model_name_or_path + if args.pretrained_vae_model_name_or_path is None + else args.pretrained_vae_model_name_or_path + ) + + with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + # Check for terminal SNR in combination with SNR Gamma + text_encoder_one = text_encoder_cls_one.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant + ) + text_encoder_two = text_encoder_cls_two.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant + ) + # vae 因为需要用float32的缘故,其中的attn部分需要用native实现,因为flash-attn 不支持float32 + origin_attn = os.environ.get("USE_NATIVE_ATTN", 0) + os.environ["USE_NATIVE_ATTN"] = "1" + vae = AutoencoderKL.from_pretrained( + vae_path, + subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, + revision=args.revision, + variant=args.variant, + ) + os.environ["USE_NATIVE_ATTN"] = origin_attn + unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant + ) + + # Freeze vae and text encoders. + vae.requires_grad_(False) + text_encoder_one.requires_grad_(False) + text_encoder_two.requires_grad_(False) + # Set unet as trainable. + unet.train() + + # For mixed precision training we cast all non-trainable weights to half-precision + # as these weights are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # Move unet, vae and text_encoder to device and cast to weight_dtype + # The VAE is in float32 to avoid NaN losses. + vae.to(accelerator.device, dtype=torch.float32) + text_encoder_one.to(accelerator.device, dtype=weight_dtype) + text_encoder_two.to(accelerator.device, dtype=weight_dtype) + + # Create EMA for the unet. + if args.use_ema: + ema_unet = UNet2DConditionModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant + ) + ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warning( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + if args.use_ema: + ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) + + for i, model in enumerate(models): + model.save_pretrained(os.path.join(output_dir, "unet")) + + # make sure to pop weight so that corresponding model is not saved again + weights.pop() + + def load_model_hook(models, input_dir): + if args.use_ema: + load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) + ema_unet.load_state_dict(load_model.state_dict()) + ema_unet.to(accelerator.device) + del load_model + + for _ in range(len(models)): + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." + ) + + optimizer_class = bnb.optim.AdamW8bit + elif args.apex_fused_adam: + import apex + optimizer_class = apex.optimizers.FusedAdam + else: + optimizer_class = torch.optim.AdamW + + # Optimizer creation + params_to_optimize = unet.parameters() + optimizer = optimizer_class( + params_to_optimize, + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) + train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) + train_flip = transforms.RandomHorizontalFlip(p=1.0) + train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + # image aug + original_sizes = [] + all_images = [] + crop_top_lefts = [] + for image in images: + original_sizes.append((image.height, image.width)) + image = train_resize(image) + if args.random_flip and random.random() < 0.5: + # flip + image = train_flip(image) + if args.center_crop: + y1 = max(0, int(round((image.height - args.resolution) / 2.0))) + x1 = max(0, int(round((image.width - args.resolution) / 2.0))) + image = train_crop(image) + else: + y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) + image = crop(image, y1, x1, h, w) + crop_top_left = (y1, x1) + crop_top_lefts.append(crop_top_left) + image = train_transforms(image) + all_images.append(image) + + examples["original_sizes"] = original_sizes + examples["crop_top_lefts"] = crop_top_lefts + examples["pixel_values"] = all_images + return examples + + with accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train) + + # Let's first compute all the embeddings so that we can free up the text encoders + # from memory. We will pre-compute the VAE encodings too. + text_encoders = [text_encoder_one, text_encoder_two] + tokenizers = [tokenizer_one, tokenizer_two] + compute_embeddings_fn = functools.partial( + encode_prompt, + text_encoders=text_encoders, + tokenizers=tokenizers, + proportion_empty_prompts=args.proportion_empty_prompts, + caption_column=args.caption_column, + ) + compute_vae_encodings_fn = functools.partial(compute_vae_encodings, vae=vae) + with accelerator.main_process_first(): + from datasets.fingerprint import Hasher + + # fingerprint used by the cache for the other processes to load the result + # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 + new_fingerprint = Hasher.hash(args) + new_fingerprint_for_vae = Hasher.hash(vae_path) + train_dataset_with_embeddings = train_dataset.map( + compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint + ) + train_dataset_with_vae = train_dataset.map( + compute_vae_encodings_fn, + batched=True, + # batch_size=args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps,\ + batch_size=args.train_batch_size, + new_fingerprint=new_fingerprint_for_vae, + ) + precomputed_dataset = concatenate_datasets( + [train_dataset_with_embeddings, train_dataset_with_vae.remove_columns(["image", "text"])], axis=1 + ) + precomputed_dataset = precomputed_dataset.with_transform(preprocess_train) + + del text_encoders, tokenizers, vae + gc.collect() + torch.cuda.empty_cache() + + def collate_fn(examples): + model_input = torch.stack([torch.tensor(example["model_input"]) for example in examples]) + original_sizes = [example["original_sizes"] for example in examples] + crop_top_lefts = [example["crop_top_lefts"] for example in examples] + prompt_embeds = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) + pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) + + return { + "model_input": model_input, + "prompt_embeds": prompt_embeds, + "pooled_prompt_embeds": pooled_prompt_embeds, + "original_sizes": original_sizes, + "crop_top_lefts": crop_top_lefts, + } + + # DataLoaders creation: + # for testing ips + precomputed_dataset = concatenate_datasets([precomputed_dataset for i in range(10)]) + + train_dataloader = torch.utils.data.DataLoader( + precomputed_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare everything with our `accelerator`. + unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, optimizer, train_dataloader, lr_scheduler + ) + + if args.use_ema: + ema_unet.to(accelerator.device) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + accelerator.init_trackers("text2image-fine-tune-sdxl", config=vars(args)) + + # Function for unwrapping if torch.compile() was used in accelerate. + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(precomputed_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + first_epoch = global_step // num_update_steps_per_epoch + + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + if args.NHWC: + unet = unet.to(memory_format=torch.channels_last) + + for epoch in range(first_epoch, args.num_train_epochs): + train_loss = 0.0 + iter_start = time.time() + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(unet): + # Sample noise that we'll add to the latents + model_input = batch["model_input"].to(accelerator.device) + noise = torch.randn_like(model_input) + if args.noise_offset: + # https://www.crosslabs.org//blog/diffusion-with-offset-noise + noise += args.noise_offset * torch.randn( + (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device + ) + + bsz = model_input.shape[0] + if args.timestep_bias_strategy == "none": + # Sample a random timestep for each image without bias. + timesteps = torch.randint( + 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device + ) + else: + # Sample a random timestep for each image, potentially biased by the timestep weights. + # Biasing the timestep weights allows us to spend less time training irrelevant timesteps. + weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to( + model_input.device + ) + timesteps = torch.multinomial(weights, bsz, replacement=True).long() + + # Add noise to the model input according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) + + # time ids + def compute_time_ids(original_size, crops_coords_top_left): + # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids + target_size = (args.resolution, args.resolution) + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = torch.tensor([add_time_ids]) + add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) + return add_time_ids + + add_time_ids = torch.cat( + [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] + ) + + # Predict the noise residual + unet_added_conditions = {"time_ids": add_time_ids} + prompt_embeds = batch["prompt_embeds"].to(accelerator.device) + pooled_prompt_embeds = batch["pooled_prompt_embeds"].to(accelerator.device) + unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) + + if args.NHWC: + noisy_model_input = noisy_model_input.to(memory_format=torch.channels_last) + model_pred = unet( + noisy_model_input, + timesteps, + prompt_embeds, + added_cond_kwargs=unet_added_conditions, + return_dict=False, + )[0] + + # Get the target for loss depending on the prediction type + if args.prediction_type is not None: + # set prediction_type of scheduler if defined + noise_scheduler.register_to_config(prediction_type=args.prediction_type) + + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(model_input, noise, timesteps) + elif noise_scheduler.config.prediction_type == "sample": + # We set the target to latents here, but the model_pred will return the noise sample prediction. + target = model_input + # We will have to subtract the noise residual from the prediction to get the target sample. + model_pred = model_pred - noise + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + if args.snr_gamma is None: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + else: + # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. + # Since we predict the noise instead of x_0, the original formulation is slightly changed. + # This is discussed in Section 4.2 of the same paper. + snr = compute_snr(noise_scheduler, timesteps) + mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( + dim=1 + )[0] + if noise_scheduler.config.prediction_type == "epsilon": + mse_loss_weights = mse_loss_weights / snr + elif noise_scheduler.config.prediction_type == "v_prediction": + mse_loss_weights = mse_loss_weights / (snr + 1) + + loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") + loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights + loss = loss.mean() + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + params_to_clip = unet.parameters() + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + if args.use_ema: + ema_unet.step(unet.parameters()) + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + iter_elapse = time.time() - iter_start + iter_start = time.time() + ips_per_device = total_batch_size / iter_elapse / accelerator.num_processes + ips_per_gpu = ips_per_device * 2 + + if accelerator.is_main_process: + if global_step % args.checkpointing_steps == 0: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + + if args.NHWC: + origin_model = accelerator._models[0] + accelerator._models[0] = origin_model.to(memory_format=torch.contiguous_format) + accelerator.save_state(save_path) + accelerator._models[0] = origin_model.to(memory_format=torch.channels_last) + else: + accelerator.save_state(save_path) + + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], + "ips_per_device": ips_per_device, "ips_per_gpu": ips_per_gpu} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if accelerator.is_main_process: + if args.validation_prompt is not None and epoch % args.validation_epochs == 0: + logger.info( + f"Running validation... \n Generating {args.num_validation_images} images with prompt:" + f" {args.validation_prompt}." + ) + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + ema_unet.store(unet.parameters()) + ema_unet.copy_to(unet.parameters()) + + # create pipeline + vae = AutoencoderKL.from_pretrained( + vae_path, + subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, + revision=args.revision, + variant=args.variant, + ) + pipeline = StableDiffusionXLPipeline.from_pretrained( + args.pretrained_model_name_or_path, + vae=vae, + unet=accelerator.unwrap_model(unet), + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + if args.prediction_type is not None: + scheduler_args = {"prediction_type": args.prediction_type} + pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) + + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + # run inference + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None + pipeline_args = {"prompt": args.validation_prompt} + + with torch.cuda.amp.autocast(): + images = [ + pipeline(**pipeline_args, generator=generator, num_inference_steps=25).images[0] + for _ in range(args.num_validation_images) + ] + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "validation": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + del pipeline + torch.cuda.empty_cache() + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + unet = unwrap_model(unet) + if args.use_ema: + ema_unet.copy_to(unet.parameters()) + + # Serialize pipeline. + vae = AutoencoderKL.from_pretrained( + vae_path, + subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + pipeline = StableDiffusionXLPipeline.from_pretrained( + args.pretrained_model_name_or_path, + unet=unet, + vae=vae, + revision=args.revision, + variant=args.variant, + torch_dtype=weight_dtype, + ) + if args.prediction_type is not None: + scheduler_args = {"prediction_type": args.prediction_type} + pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) + pipeline.save_pretrained(args.output_dir) + + # run inference + images = [] + if args.validation_prompt and args.num_validation_images > 0: + pipeline = pipeline.to(accelerator.device) + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None + with torch.cuda.amp.autocast(): + images = [ + pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] + for _ in range(args.num_validation_images) + ] + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "test": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + if args.push_to_hub: + save_model_card( + repo_id=repo_id, + images=images, + validation_prompt=args.validation_prompt, + base_model=args.pretrained_model_name_or_path, + dataset_name=args.dataset_name, + repo_folder=args.output_dir, + vae_path=args.pretrained_vae_model_name_or_path, + ) + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + accelerator.end_training() + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/zero2_config.yaml b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/zero2_config.yaml new file mode 100644 index 000000000..8ffdbbd1e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image/zero2_config.yaml @@ -0,0 +1,23 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 1.0 + offload_optimizer_device: none + offload_param_device: none + zero3_init_flag: false + zero_stage: 2 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +enable_cpu_affinity: false +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 16 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/install_diffusers.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/install_diffusers.sh new file mode 100644 index 000000000..9dab7ddf9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/install_diffusers.sh @@ -0,0 +1,33 @@ +#!/bin/bash +cd $(dirname $(realpath "$0")) +TARGET_DIR=${TARGET_DIR:-} + +PYTHON_PATH=$(which python3) +PYTHON_DIST_PATH=${TARGET_DIR}/lib/python3/dist-packages + +PKG_DIR="build_pip" +PKG_NAME="diffusers" + +if [[ ! -d ${PKG_DIR} ]]; then + echo "ERROR: Package directory ${PKG_DIR} doesn't exist" + exit 1 +fi + +latest_pkg="$(ls -t ${PKG_DIR} | grep ${PKG_NAME} | head -1)" +if [[ "${latest_pkg}" == "" ]]; then + echo "ERROR: Cannot find latest ${PKG_NAME} package" + exit 1 +else + echo "INFO: Found latest package ${latest_pkg} in directory ${PKG_DIR}" +fi + +if [[ "${TARGET_DIR}" != "" ]]; then + ${PYTHON_PATH} -m pip install --upgrade --no-deps -t ${PYTHON_DIST_PATH} ${PKG_DIR}/${latest_pkg} || exit + echo "lightllm installed in ${PYTHON_DIST_PATH}; please add it to your PYTHONPATH." +else + ${PYTHON_PATH} -m pip uninstall ${PKG_NAME} -y + ${PYTHON_PATH} -m pip install --no-deps ${PKG_DIR}/${latest_pkg} || exit +fi + +# Return 0 status if all finished +exit 0 \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5.sh new file mode 100644 index 000000000..248f79c63 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5.sh @@ -0,0 +1,32 @@ +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=1 +export USE_APEX_LN=1 +export USE_NATIVE_ATTN=0 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + +export MODEL_NAME=/data/yili.li/jira_1040/stable-diffusion-v1-5 +export DATASET_NAME=/data/yili.li/jira/pokemon-blip-captions/ + +cd /data/yili.li/jira_1068/diffusers/examples/text_to_image + +accelerate launch --config_file default_config.yaml --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --gradient_checkpointing \ + --center_crop \ + --random_flip \ + --train_batch_size=24 \ + --gradient_accumulation_steps=1 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --NHWC \ + --dataloader_num_workers=32 \ + --apex_fused_adam diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5_single.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5_single.sh new file mode 100644 index 000000000..2c2efcd4e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_1.5_single.sh @@ -0,0 +1,32 @@ +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=1 +export USE_APEX_LN=1 +export USE_NATIVE_ATTN=0 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + +export MODEL_NAME=/data/yili.li/jira_1040/stable-diffusion-v1-5 +export DATASET_NAME=/data/yili.li/jira/pokemon-blip-captions/ + +cd /data/yili.li/jira_1068/diffusers/examples/text_to_image + +accelerate launch --config_file single_config.yaml --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --gradient_checkpointing \ + --center_crop \ + --random_flip \ + --train_batch_size=32 \ + --gradient_accumulation_steps=1 \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --dataloader_num_workers=32 \ + --NHWC \ + --apex_fused_adam \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1.sh new file mode 100644 index 000000000..2ad523bbf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1.sh @@ -0,0 +1,32 @@ +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=1 +export USE_APEX_LN=1 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + +export MODEL_NAME=/data/yili.li/jira/stabilityai/stable-diffusion-2-1 +export DATASET_NAME=/data/yili.li/jira/pokemon-blip-captions/ + +cd /data/yili.li/jira_1068/diffusers/examples/text_to_image + +accelerate launch --config_file default_config.yaml --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --center_crop \ + --random_flip \ + --train_batch_size=32 \ + --gradient_accumulation_steps=1 \ + --gradient_checkpointing \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --NHWC \ + --dataloader_num_workers=32 \ + --apex_fused_adam + # --use_ema diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1_single.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1_single.sh new file mode 100644 index 000000000..b6fda519d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_2.1_single.sh @@ -0,0 +1,32 @@ +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=1 +export USE_APEX_LN=1 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + +# export MODEL_NAME=/data/yili.li/jira/stabilityai/stable-diffusion-2-1 +# export DATASET_NAME=/data/yili.li/jira/pokemon-blip-captions/ + +cd /data/yili.li/jira_1068/diffusers/examples/text_to_image + +accelerate launch --config_file single_config.yaml --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --center_crop \ + --random_flip \ + --train_batch_size=32 \ + --gradient_accumulation_steps=1 \ + --gradient_checkpointing \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --NHWC \ + --dataloader_num_workers=32 \ + --apex_fused_adam + # --use_ema diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_xl.sh b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_xl.sh new file mode 100644 index 000000000..4ae95dad9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/run_sd_xl.sh @@ -0,0 +1,37 @@ +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=1 +export USE_APEX_LN=1 +export USE_NATIVE_ATTN=0 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + +export MODEL_NAME=/data/yili.li/jira_1040/stable-diffusion-xl-base-1.0 +export DATASET_NAME=/data/yili.li/jira/pokemon-blip-captions/ +export VAE_NAME=/data/yili.li/jira_1040/sdxl-vae-fp16-fix + +cd /data/yili.li/jira_1068/diffusers/examples/text_to_image + +accelerate launch --config_file zero2_config.yaml --mixed_precision="fp16" train_text_to_image_sdxl.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --pretrained_vae_model_name_or_path=$VAE_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --gradient_checkpointing \ + --center_crop \ + --random_flip \ + --train_batch_size=32 \ + --gradient_accumulation_steps=1 \ + --gradient_checkpointing \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --dataloader_num_workers=32 \ + --NHWC \ + --apex_fused_adam + # --use_ema + diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/setup.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/setup.py new file mode 100644 index 000000000..5f33982ae --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/setup.py @@ -0,0 +1,304 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py + +To create the package for pypi. + +1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the + documentation. + + If releasing on a special branch, copy the updated README.md on the main branch for your the commit you will make + for the post-release and run `make fix-copies` on the main branch as well. + +2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid. + +3. Unpin specific versions from setup.py that use a git install. + +4. Checkout the release branch (v-release, for example v4.19-release), and commit these changes with the + message: "Release: " and push. + +5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs) + +6. Add a tag in git to mark the release: "git tag v -m 'Adds tag v for pypi' " + Push the tag to git: git push --tags origin v-release + +7. Build both the sources and the wheel. Do not change anything in setup.py between + creating the wheel and the source distribution (obviously). + + For the wheel, run: "python setup.py bdist_wheel" in the top level directory. + (this will build a wheel for the python version you use to build it). + + For the sources, run: "python setup.py sdist" + You should now have a /dist directory with both .whl and .tar.gz source versions. + + Long story cut short, you need to run both before you can upload the distribution to the + test pypi and the actual pypi servers: + + python setup.py bdist_wheel && python setup.py sdist + +8. Check that everything looks correct by uploading the package to the pypi test server: + + twine upload dist/* -r pypitest + (pypi suggest using twine as other methods upload files via plaintext.) + You may have to specify the repository url, use the following command then: + twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ + + Check that you can install it in a virtualenv by running: + pip install -i https://testpypi.python.org/pypi diffusers + + If you are testing from a Colab Notebook, for instance, then do: + pip install diffusers && pip uninstall diffusers + pip install -i https://testpypi.python.org/pypi diffusers + + Check you can run the following commands: + python -c "python -c "from diffusers import __version__; print(__version__)" + python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()" + python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')" + python -c "from diffusers import *" + +9. Upload the final version to actual pypi: + twine upload dist/* -r pypi + +10. Prepare the release notes and publish them on github once everything is looking hunky-dory. + +11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, + you need to go back to main before executing this. +""" + +import os +import re +from distutils.core import Command + +from setuptools import find_packages, setup + + +# IMPORTANT: +# 1. all dependencies should be listed here with their version requirements if any +# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py +_deps = [ + "Pillow", # keep the PIL.Image.Resampling deprecation away + "accelerate>=0.11.0", + "compel==0.1.8", + "black~=23.1", + "datasets", + "filelock", + "flax>=0.4.1", + "hf-doc-builder>=0.3.0", + "huggingface-hub>=0.13.2", + "requests-mock==1.10.0", + "importlib_metadata", + "invisible-watermark>=0.2.0", + "isort>=5.5.4", + "jax>=0.4.1", + "jaxlib>=0.4.1", + "Jinja2", + "k-diffusion>=0.0.12", + "torchsde", + "note_seq", + "librosa", + "numpy", + "omegaconf", + "parameterized", + "protobuf>=3.20.3,<4", + "pytest", + "pytest-timeout", + "pytest-xdist", + "ruff==0.0.280", + "safetensors>=0.3.1", + "sentencepiece>=0.1.91,!=0.1.92", + "scipy", + "onnx", + "regex!=2019.12.17", + "requests", + "tensorboard", + "torch>=1.4", + "torchvision", + "transformers>=4.25.1", + "urllib3<=2.0.0", +] + +# this is a lookup table with items like: +# +# tokenizers: "huggingface-hub==0.8.0" +# packaging: "packaging" +# +# some of the values are versioned whereas others aren't. +deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} + +# since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from +# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: +# +# python -c 'import sys; from diffusers.dependency_versions_table import deps; \ +# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets +# +# Just pass the desired package names to that script as it's shown with 2 packages above. +# +# If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above +# +# You can then feed this for example to `pip`: +# +# pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ +# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets) +# + + +def deps_list(*pkgs): + return [deps[pkg] for pkg in pkgs] + + +class DepsTableUpdateCommand(Command): + """ + A custom distutils command that updates the dependency table. + usage: python setup.py deps_table_update + """ + + description = "build runtime dependency table" + user_options = [ + # format: (long option, short option, description). + ("dep-table-update", None, "updates src/diffusers/dependency_versions_table.py"), + ] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) + content = [ + "# THIS FILE HAS BEEN AUTOGENERATED. To update:", + "# 1. modify the `_deps` dict in setup.py", + "# 2. run `make deps_table_update``", + "deps = {", + entries, + "}", + "", + ] + target = "src/diffusers/dependency_versions_table.py" + print(f"updating {target}") + with open(target, "w", encoding="utf-8", newline="\n") as f: + f.write("\n".join(content)) + + +extras = {} + + +extras = {} +extras["quality"] = deps_list("urllib3", "black", "isort", "ruff", "hf-doc-builder") +extras["docs"] = deps_list("hf-doc-builder") +extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2") +extras["test"] = deps_list( + "compel", + "datasets", + "Jinja2", + "invisible-watermark", + "k-diffusion", + "librosa", + "omegaconf", + "parameterized", + "pytest", + "pytest-timeout", + "pytest-xdist", + "requests-mock", + "safetensors", + "sentencepiece", + "scipy", + "torchvision", + "transformers", +) +extras["torch"] = deps_list("torch", "accelerate") + +if os.name == "nt": # windows + extras["flax"] = [] # jax is not supported on windows +else: + extras["flax"] = deps_list("jax", "jaxlib", "flax") + +extras["dev"] = ( + extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] +) + +install_requires = [ + deps["importlib_metadata"], + deps["filelock"], + deps["huggingface-hub"], + deps["numpy"], + deps["regex"], + deps["requests"], + deps["safetensors"], + deps["Pillow"], +] +LOCAL_VERSION_IDENTIFIER = os.getenv("LOCAL_VERSION_IDENTIFIER", "") +if LOCAL_VERSION_IDENTIFIER: + LOCAL_VERSION_IDENTIFIER = "+" + LOCAL_VERSION_IDENTIFIER + +version="0.22.0" + +version = version + LOCAL_VERSION_IDENTIFIER + +setup( + name="diffusers", + version=version, # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + description="State-of-the-art diffusion in PyTorch and JAX.", + long_description=open("README.md", "r", encoding="utf-8").read(), + long_description_content_type="text/markdown", + keywords="deep learning diffusion jax pytorch stable diffusion audioldm", + license="Apache", + author="The HuggingFace team", + author_email="patrick@huggingface.co", + url="https://github.com/huggingface/diffusers", + package_dir={"": "src"}, + packages=find_packages("src"), + package_data={"diffusers": ["py.typed"]}, + include_package_data=True, + python_requires=">=3.8.0", + install_requires=list(install_requires), + extras_require=extras, + entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], + cmdclass={"deps_table_update": DepsTableUpdateCommand}, +) + +# Release checklist +# 1. Change the version in __init__.py and setup.py. +# 2. Commit these changes with the message: "Release: Release" +# 3. Add a tag in git to mark the release: "git tag RELEASE -m 'Adds tag RELEASE for pypi' " +# Push the tag to git: git push --tags origin main +# 4. Run the following commands in the top-level directory: +# python setup.py bdist_wheel +# python setup.py sdist +# 5. Upload the package to the pypi test server first: +# twine upload dist/* -r pypitest +# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ +# 6. Check that you can install it in a virtualenv by running: +# pip install -i https://testpypi.python.org/pypi diffusers +# diffusers env +# diffusers test +# 7. Upload the final version to actual pypi: +# twine upload dist/* -r pypi +# 8. Add release notes to the tag in github once everything is looking hunky-dory. +# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/__init__.py new file mode 100644 index 000000000..2f258e9fb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/__init__.py @@ -0,0 +1,787 @@ +__version__ = "0.27.0" + +from typing import TYPE_CHECKING + +from .utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_k_diffusion_available, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_scipy_available, + is_torch_available, + is_torchsde_available, + is_transformers_available, +) + + +# Lazy Import based on +# https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py + +# When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names, +# and is used to defer the actual importing for when the objects are requested. +# This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends). + +_import_structure = { + "configuration_utils": ["ConfigMixin"], + "models": [], + "pipelines": [], + "schedulers": [], + "utils": [ + "OptionalDependencyNotAvailable", + "is_flax_available", + "is_inflect_available", + "is_invisible_watermark_available", + "is_k_diffusion_available", + "is_k_diffusion_version", + "is_librosa_available", + "is_note_seq_available", + "is_onnx_available", + "is_scipy_available", + "is_torch_available", + "is_torchsde_available", + "is_transformers_available", + "is_transformers_version", + "is_unidecode_available", + "logging", + ], +} + +try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_onnx_objects # noqa F403 + + _import_structure["utils.dummy_onnx_objects"] = [ + name for name in dir(dummy_onnx_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["OnnxRuntimeModel"]) + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_pt_objects # noqa F403 + + _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")] + +else: + _import_structure["models"].extend( + [ + "AsymmetricAutoencoderKL", + "AutoencoderKL", + "AutoencoderKLTemporalDecoder", + "AutoencoderTiny", + "ConsistencyDecoderVAE", + "ControlNetModel", + "I2VGenXLUNet", + "Kandinsky3UNet", + "ModelMixin", + "MotionAdapter", + "MultiAdapter", + "PriorTransformer", + "StableCascadeUNet", + "T2IAdapter", + "T5FilmDecoder", + "Transformer2DModel", + "UNet1DModel", + "UNet2DConditionModel", + "UNet2DModel", + "UNet3DConditionModel", + "UNetMotionModel", + "UNetSpatioTemporalConditionModel", + "UVit2DModel", + "VQModel", + ] + ) + + _import_structure["optimization"] = [ + "get_constant_schedule", + "get_constant_schedule_with_warmup", + "get_cosine_schedule_with_warmup", + "get_cosine_with_hard_restarts_schedule_with_warmup", + "get_linear_schedule_with_warmup", + "get_polynomial_decay_schedule_with_warmup", + "get_scheduler", + ] + _import_structure["pipelines"].extend( + [ + "AudioPipelineOutput", + "AutoPipelineForImage2Image", + "AutoPipelineForInpainting", + "AutoPipelineForText2Image", + "ConsistencyModelPipeline", + "DanceDiffusionPipeline", + "DDIMPipeline", + "DDPMPipeline", + "DiffusionPipeline", + "DiTPipeline", + "ImagePipelineOutput", + "KarrasVePipeline", + "LDMPipeline", + "LDMSuperResolutionPipeline", + "PNDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "StableDiffusionMixin", + ] + ) + _import_structure["schedulers"].extend( + [ + "AmusedScheduler", + "CMStochasticIterativeScheduler", + "DDIMInverseScheduler", + "DDIMParallelScheduler", + "DDIMScheduler", + "DDPMParallelScheduler", + "DDPMScheduler", + "DDPMWuerstchenScheduler", + "DEISMultistepScheduler", + "DPMSolverMultistepInverseScheduler", + "DPMSolverMultistepScheduler", + "DPMSolverSinglestepScheduler", + "EDMDPMSolverMultistepScheduler", + "EDMEulerScheduler", + "EulerAncestralDiscreteScheduler", + "EulerDiscreteScheduler", + "HeunDiscreteScheduler", + "IPNDMScheduler", + "KarrasVeScheduler", + "KDPM2AncestralDiscreteScheduler", + "KDPM2DiscreteScheduler", + "LCMScheduler", + "PNDMScheduler", + "RePaintScheduler", + "SASolverScheduler", + "SchedulerMixin", + "ScoreSdeVeScheduler", + "TCDScheduler", + "UnCLIPScheduler", + "UniPCMultistepScheduler", + "VQDiffusionScheduler", + ] + ) + _import_structure["training_utils"] = ["EMAModel"] + +try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_scipy_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_scipy_objects"] = [ + name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_") + ] + +else: + _import_structure["schedulers"].extend(["LMSDiscreteScheduler"]) + +try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_torchsde_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_torchsde_objects"] = [ + name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_") + ] + +else: + _import_structure["schedulers"].extend(["DPMSolverSDEScheduler"]) + +try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_objects"] = [ + name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend( + [ + "AltDiffusionImg2ImgPipeline", + "AltDiffusionPipeline", + "AmusedImg2ImgPipeline", + "AmusedInpaintPipeline", + "AmusedPipeline", + "AnimateDiffPipeline", + "AnimateDiffVideoToVideoPipeline", + "AudioLDM2Pipeline", + "AudioLDM2ProjectionModel", + "AudioLDM2UNet2DConditionModel", + "AudioLDMPipeline", + "BlipDiffusionControlNetPipeline", + "BlipDiffusionPipeline", + "CLIPImageProjection", + "CycleDiffusionPipeline", + "I2VGenXLPipeline", + "IFImg2ImgPipeline", + "IFImg2ImgSuperResolutionPipeline", + "IFInpaintingPipeline", + "IFInpaintingSuperResolutionPipeline", + "IFPipeline", + "IFSuperResolutionPipeline", + "ImageTextPipelineOutput", + "Kandinsky3Img2ImgPipeline", + "Kandinsky3Pipeline", + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyImg2ImgPipeline", + "KandinskyInpaintCombinedPipeline", + "KandinskyInpaintPipeline", + "KandinskyPipeline", + "KandinskyPriorPipeline", + "KandinskyV22CombinedPipeline", + "KandinskyV22ControlnetImg2ImgPipeline", + "KandinskyV22ControlnetPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22Img2ImgPipeline", + "KandinskyV22InpaintCombinedPipeline", + "KandinskyV22InpaintPipeline", + "KandinskyV22Pipeline", + "KandinskyV22PriorEmb2EmbPipeline", + "KandinskyV22PriorPipeline", + "LatentConsistencyModelImg2ImgPipeline", + "LatentConsistencyModelPipeline", + "LDMTextToImagePipeline", + "LEditsPPPipelineStableDiffusion", + "LEditsPPPipelineStableDiffusionXL", + "MusicLDMPipeline", + "PaintByExamplePipeline", + "PIAPipeline", + "PixArtAlphaPipeline", + "SemanticStableDiffusionPipeline", + "ShapEImg2ImgPipeline", + "ShapEPipeline", + "StableCascadeCombinedPipeline", + "StableCascadeDecoderPipeline", + "StableCascadePriorPipeline", + "StableDiffusionAdapterPipeline", + "StableDiffusionAttendAndExcitePipeline", + "StableDiffusionControlNetImg2ImgPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionControlNetPipeline", + "StableDiffusionDepth2ImgPipeline", + "StableDiffusionDiffEditPipeline", + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + "StableDiffusionImageVariationPipeline", + "StableDiffusionImg2ImgPipeline", + "StableDiffusionInpaintPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionInstructPix2PixPipeline", + "StableDiffusionLatentUpscalePipeline", + "StableDiffusionLDM3DPipeline", + "StableDiffusionModelEditingPipeline", + "StableDiffusionPanoramaPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionPipeline", + "StableDiffusionPipelineSafe", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionSAGPipeline", + "StableDiffusionUpscalePipeline", + "StableDiffusionXLAdapterPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + "StableDiffusionXLControlNetPipeline", + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLInstructPix2PixPipeline", + "StableDiffusionXLPipeline", + "StableUnCLIPImg2ImgPipeline", + "StableUnCLIPPipeline", + "StableVideoDiffusionPipeline", + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "UnCLIPImageVariationPipeline", + "UnCLIPPipeline", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + "VideoToVideoSDPipeline", + "VQDiffusionPipeline", + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) + +try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend( + [ + "OnnxStableDiffusionImg2ImgPipeline", + "OnnxStableDiffusionInpaintPipeline", + "OnnxStableDiffusionInpaintPipelineLegacy", + "OnnxStableDiffusionPipeline", + "OnnxStableDiffusionUpscalePipeline", + "StableDiffusionOnnxPipeline", + ] + ) + +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_librosa_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_librosa_objects"] = [ + name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [ + name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"]) + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_flax_objects # noqa F403 + + _import_structure["utils.dummy_flax_objects"] = [ + name for name in dir(dummy_flax_objects) if not name.startswith("_") + ] + + +else: + _import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"] + _import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"] + _import_structure["models.unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] + _import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"] + _import_structure["pipelines"].extend(["FlaxDiffusionPipeline"]) + _import_structure["schedulers"].extend( + [ + "FlaxDDIMScheduler", + "FlaxDDPMScheduler", + "FlaxDPMSolverMultistepScheduler", + "FlaxEulerDiscreteScheduler", + "FlaxKarrasVeScheduler", + "FlaxLMSDiscreteScheduler", + "FlaxPNDMScheduler", + "FlaxSchedulerMixin", + "FlaxScoreSdeVeScheduler", + ] + ) + + +try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_flax_and_transformers_objects # noqa F403 + + _import_structure["utils.dummy_flax_and_transformers_objects"] = [ + name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend( + [ + "FlaxStableDiffusionControlNetPipeline", + "FlaxStableDiffusionImg2ImgPipeline", + "FlaxStableDiffusionInpaintPipeline", + "FlaxStableDiffusionPipeline", + "FlaxStableDiffusionXLPipeline", + ] + ) + +try: + if not (is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_note_seq_objects # noqa F403 + + _import_structure["utils.dummy_note_seq_objects"] = [ + name for name in dir(dummy_note_seq_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend(["MidiProcessor"]) + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .configuration_utils import ConfigMixin + + try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_onnx_objects import * # noqa F403 + else: + from .pipelines import OnnxRuntimeModel + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_pt_objects import * # noqa F403 + else: + from .models import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + AutoencoderKLTemporalDecoder, + AutoencoderTiny, + ConsistencyDecoderVAE, + ControlNetModel, + I2VGenXLUNet, + Kandinsky3UNet, + ModelMixin, + MotionAdapter, + MultiAdapter, + PriorTransformer, + T2IAdapter, + T5FilmDecoder, + Transformer2DModel, + UNet1DModel, + UNet2DConditionModel, + UNet2DModel, + UNet3DConditionModel, + UNetMotionModel, + UNetSpatioTemporalConditionModel, + UVit2DModel, + VQModel, + ) + from .optimization import ( + get_constant_schedule, + get_constant_schedule_with_warmup, + get_cosine_schedule_with_warmup, + get_cosine_with_hard_restarts_schedule_with_warmup, + get_linear_schedule_with_warmup, + get_polynomial_decay_schedule_with_warmup, + get_scheduler, + ) + from .pipelines import ( + AudioPipelineOutput, + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + BlipDiffusionControlNetPipeline, + BlipDiffusionPipeline, + CLIPImageProjection, + ConsistencyModelPipeline, + DanceDiffusionPipeline, + DDIMPipeline, + DDPMPipeline, + DiffusionPipeline, + DiTPipeline, + ImagePipelineOutput, + KarrasVePipeline, + LDMPipeline, + LDMSuperResolutionPipeline, + PNDMPipeline, + RePaintPipeline, + ScoreSdeVePipeline, + StableDiffusionMixin, + ) + from .schedulers import ( + AmusedScheduler, + CMStochasticIterativeScheduler, + DDIMInverseScheduler, + DDIMParallelScheduler, + DDIMScheduler, + DDPMParallelScheduler, + DDPMScheduler, + DDPMWuerstchenScheduler, + DEISMultistepScheduler, + DPMSolverMultistepInverseScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + EDMDPMSolverMultistepScheduler, + EDMEulerScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + IPNDMScheduler, + KarrasVeScheduler, + KDPM2AncestralDiscreteScheduler, + KDPM2DiscreteScheduler, + LCMScheduler, + PNDMScheduler, + RePaintScheduler, + SASolverScheduler, + SchedulerMixin, + ScoreSdeVeScheduler, + TCDScheduler, + UnCLIPScheduler, + UniPCMultistepScheduler, + VQDiffusionScheduler, + ) + from .training_utils import EMAModel + + try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_scipy_objects import * # noqa F403 + else: + from .schedulers import LMSDiscreteScheduler + + try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 + else: + from .schedulers import DPMSolverSDEScheduler + + try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipelines import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + AmusedImg2ImgPipeline, + AmusedInpaintPipeline, + AmusedPipeline, + AnimateDiffPipeline, + AnimateDiffVideoToVideoPipeline, + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + AudioLDMPipeline, + CLIPImageProjection, + CycleDiffusionPipeline, + I2VGenXLPipeline, + IFImg2ImgPipeline, + IFImg2ImgSuperResolutionPipeline, + IFInpaintingPipeline, + IFInpaintingSuperResolutionPipeline, + IFPipeline, + IFSuperResolutionPipeline, + ImageTextPipelineOutput, + Kandinsky3Img2ImgPipeline, + Kandinsky3Pipeline, + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, + KandinskyPriorPipeline, + KandinskyV22CombinedPipeline, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorEmb2EmbPipeline, + KandinskyV22PriorPipeline, + LatentConsistencyModelImg2ImgPipeline, + LatentConsistencyModelPipeline, + LDMTextToImagePipeline, + LEditsPPPipelineStableDiffusion, + LEditsPPPipelineStableDiffusionXL, + MusicLDMPipeline, + PaintByExamplePipeline, + PIAPipeline, + PixArtAlphaPipeline, + SemanticStableDiffusionPipeline, + ShapEImg2ImgPipeline, + ShapEPipeline, + StableCascadeCombinedPipeline, + StableCascadeDecoderPipeline, + StableCascadePriorPipeline, + StableDiffusionAdapterPipeline, + StableDiffusionAttendAndExcitePipeline, + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionDepth2ImgPipeline, + StableDiffusionDiffEditPipeline, + StableDiffusionGLIGENPipeline, + StableDiffusionGLIGENTextImagePipeline, + StableDiffusionImageVariationPipeline, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionInstructPix2PixPipeline, + StableDiffusionLatentUpscalePipeline, + StableDiffusionLDM3DPipeline, + StableDiffusionModelEditingPipeline, + StableDiffusionPanoramaPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPipeline, + StableDiffusionPipelineSafe, + StableDiffusionPix2PixZeroPipeline, + StableDiffusionSAGPipeline, + StableDiffusionUpscalePipeline, + StableDiffusionXLAdapterPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLInstructPix2PixPipeline, + StableDiffusionXLPipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + StableVideoDiffusionPipeline, + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + UnCLIPImageVariationPipeline, + UnCLIPPipeline, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VideoToVideoSDPipeline, + VQDiffusionPipeline, + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 + else: + from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline + + try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 + else: + from .pipelines import ( + OnnxStableDiffusionImg2ImgPipeline, + OnnxStableDiffusionInpaintPipeline, + OnnxStableDiffusionInpaintPipelineLegacy, + OnnxStableDiffusionPipeline, + OnnxStableDiffusionUpscalePipeline, + StableDiffusionOnnxPipeline, + ) + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_librosa_objects import * # noqa F403 + else: + from .pipelines import AudioDiffusionPipeline, Mel + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + else: + from .pipelines import SpectrogramDiffusionPipeline + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_flax_objects import * # noqa F403 + else: + from .models.controlnet_flax import FlaxControlNetModel + from .models.modeling_flax_utils import FlaxModelMixin + from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel + from .models.vae_flax import FlaxAutoencoderKL + from .pipelines import FlaxDiffusionPipeline + from .schedulers import ( + FlaxDDIMScheduler, + FlaxDDPMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxEulerDiscreteScheduler, + FlaxKarrasVeScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, + FlaxSchedulerMixin, + FlaxScoreSdeVeScheduler, + ) + + try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + from .pipelines import ( + FlaxStableDiffusionControlNetPipeline, + FlaxStableDiffusionImg2ImgPipeline, + FlaxStableDiffusionInpaintPipeline, + FlaxStableDiffusionPipeline, + FlaxStableDiffusionXLPipeline, + ) + + try: + if not (is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_note_seq_objects import * # noqa F403 + else: + from .pipelines import MidiProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + extra_objects={"__version__": __version__}, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/__init__.py new file mode 100644 index 000000000..8208283f6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseDiffusersCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/diffusers_cli.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/diffusers_cli.py new file mode 100644 index 000000000..f582c3bcd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/diffusers_cli.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser + +from .env import EnvironmentCommand +from .fp16_safetensors import FP16SafetensorsCommand + + +def main(): + parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []") + commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") + + # Register commands + EnvironmentCommand.register_subcommand(commands_parser) + FP16SafetensorsCommand.register_subcommand(commands_parser) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + service = args.func(args) + service.run() + + +if __name__ == "__main__": + main() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/env.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/env.py new file mode 100644 index 000000000..baa69b361 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/env.py @@ -0,0 +1,84 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform +from argparse import ArgumentParser + +import huggingface_hub + +from .. import __version__ as version +from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available +from . import BaseDiffusersCLICommand + + +def info_command_factory(_): + return EnvironmentCommand() + + +class EnvironmentCommand(BaseDiffusersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + download_parser = parser.add_parser("env") + download_parser.set_defaults(func=info_command_factory) + + def run(self): + hub_version = huggingface_hub.__version__ + + pt_version = "not installed" + pt_cuda_available = "NA" + if is_torch_available(): + import torch + + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + + transformers_version = "not installed" + if is_transformers_available(): + import transformers + + transformers_version = transformers.__version__ + + accelerate_version = "not installed" + if is_accelerate_available(): + import accelerate + + accelerate_version = accelerate.__version__ + + xformers_version = "not installed" + if is_xformers_available(): + import xformers + + xformers_version = xformers.__version__ + + info = { + "`diffusers` version": version, + "Platform": platform.platform(), + "Python version": platform.python_version(), + "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", + "Huggingface_hub version": hub_version, + "Transformers version": transformers_version, + "Accelerate version": accelerate_version, + "xFormers version": xformers_version, + "Using GPU in script?": "", + "Using distributed or parallel set-up in script?": "", + } + + print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") + print(self.format_dict(info)) + + return info + + @staticmethod + def format_dict(d): + return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/fp16_safetensors.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/fp16_safetensors.py new file mode 100644 index 000000000..b26b8816b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/commands/fp16_safetensors.py @@ -0,0 +1,132 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage example: + diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors +""" + +import glob +import json +import warnings +from argparse import ArgumentParser, Namespace +from importlib import import_module + +import huggingface_hub +import torch +from huggingface_hub import hf_hub_download +from packaging import version + +from ..utils import logging +from . import BaseDiffusersCLICommand + + +def conversion_command_factory(args: Namespace): + if args.use_auth_token: + warnings.warn( + "The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now" + " handled automatically if user is logged in." + ) + return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors) + + +class FP16SafetensorsCommand(BaseDiffusersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + conversion_parser = parser.add_parser("fp16_safetensors") + conversion_parser.add_argument( + "--ckpt_id", + type=str, + help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.", + ) + conversion_parser.add_argument( + "--fp16", action="store_true", help="If serializing the variables in FP16 precision." + ) + conversion_parser.add_argument( + "--use_safetensors", action="store_true", help="If serializing in the safetensors format." + ) + conversion_parser.add_argument( + "--use_auth_token", + action="store_true", + help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.", + ) + conversion_parser.set_defaults(func=conversion_command_factory) + + def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool): + self.logger = logging.get_logger("diffusers-cli/fp16_safetensors") + self.ckpt_id = ckpt_id + self.local_ckpt_dir = f"/tmp/{ckpt_id}" + self.fp16 = fp16 + + self.use_safetensors = use_safetensors + + if not self.use_safetensors and not self.fp16: + raise NotImplementedError( + "When `use_safetensors` and `fp16` both are False, then this command is of no use." + ) + + def run(self): + if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"): + raise ImportError( + "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub" + " installation." + ) + else: + from huggingface_hub import create_commit + from huggingface_hub._commit_api import CommitOperationAdd + + model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json") + with open(model_index, "r") as f: + pipeline_class_name = json.load(f)["_class_name"] + pipeline_class = getattr(import_module("diffusers"), pipeline_class_name) + self.logger.info(f"Pipeline class imported: {pipeline_class_name}.") + + # Load the appropriate pipeline. We could have use `DiffusionPipeline` + # here, but just to avoid any rough edge cases. + pipeline = pipeline_class.from_pretrained( + self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32 + ) + pipeline.save_pretrained( + self.local_ckpt_dir, + safe_serialization=True if self.use_safetensors else False, + variant="fp16" if self.fp16 else None, + ) + self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.") + + # Fetch all the paths. + if self.fp16: + modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*") + elif self.use_safetensors: + modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors") + + # Prepare for the PR. + commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}." + operations = [] + for path in modified_paths: + operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path)) + + # Open the PR. + commit_description = ( + "Variables converted by the [`diffusers`' `fp16_safetensors`" + " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)." + ) + hub_pr_url = create_commit( + repo_id=self.ckpt_id, + operations=operations, + commit_message=commit_message, + commit_description=commit_description, + repo_type="model", + create_pr=True, + ).pr_url + self.logger.info(f"PR created here: {hub_pr_url}.") diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/configuration_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/configuration_utils.py new file mode 100644 index 000000000..189ef4380 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/configuration_utils.py @@ -0,0 +1,703 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ConfigMixin base class and utilities.""" +import dataclasses +import functools +import importlib +import inspect +import json +import os +import re +from collections import OrderedDict +from pathlib import PosixPath +from typing import Any, Dict, Tuple, Union + +import numpy as np +from huggingface_hub import create_repo, hf_hub_download +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + validate_hf_hub_args, +) +from requests import HTTPError + +from . import __version__ +from .utils import ( + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + DummyObject, + deprecate, + extract_commit_hash, + http_user_agent, + logging, +) + + +logger = logging.get_logger(__name__) + +_re_configuration_file = re.compile(r"config\.(.*)\.json") + + +class FrozenDict(OrderedDict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + for key, value in self.items(): + setattr(self, key, value) + + self.__frozen = True + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __setattr__(self, name, value): + if hasattr(self, "__frozen") and self.__frozen: + raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") + super().__setattr__(name, value) + + def __setitem__(self, name, value): + if hasattr(self, "__frozen") and self.__frozen: + raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") + super().__setitem__(name, value) + + +class ConfigMixin: + r""" + Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also + provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and + saving classes that inherit from [`ConfigMixin`]. + + Class attributes: + - **config_name** (`str`) -- A filename under which the config should stored when calling + [`~ConfigMixin.save_config`] (should be overridden by parent class). + - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be + overridden by subclass). + - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass). + - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function + should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by + subclass). + """ + + config_name = None + ignore_for_config = [] + has_compatibles = False + + _deprecated_kwargs = [] + + def register_to_config(self, **kwargs): + if self.config_name is None: + raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`") + # Special case for `kwargs` used in deprecation warning added to schedulers + # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument, + # or solve in a more general way. + kwargs.pop("kwargs", None) + + if not hasattr(self, "_internal_dict"): + internal_dict = kwargs + else: + previous_dict = dict(self._internal_dict) + internal_dict = {**self._internal_dict, **kwargs} + logger.debug(f"Updating config from {previous_dict} to {internal_dict}") + + self._internal_dict = FrozenDict(internal_dict) + + def __getattr__(self, name: str) -> Any: + """The only reason we overwrite `getattr` here is to gracefully deprecate accessing + config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 + + This function is mostly copied from PyTorch's __getattr__ overwrite: + https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module + """ + + is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) + is_attribute = name in self.__dict__ + + if is_in_config and not is_attribute: + deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'." + deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False) + return self._internal_dict[name] + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") + + def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the + [`~ConfigMixin.from_config`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file is saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + # If we save using the predefined names, we can load using `from_config` + output_config_file = os.path.join(save_directory, self.config_name) + + self.to_json_file(output_config_file) + logger.info(f"Configuration saved in {output_config_file}") + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + @classmethod + def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs): + r""" + Instantiate a Python class from a config dictionary. + + Parameters: + config (`Dict[str, Any]`): + A config dictionary from which the Python class is instantiated. Make sure to only load configuration + files of compatible classes. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it is loaded) and initiate the Python class. + `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually + overwrite the same named arguments in `config`. + + Returns: + [`ModelMixin`] or [`SchedulerMixin`]: + A model or scheduler object instantiated from a config dictionary. + + Examples: + + ```python + >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler + + >>> # Download scheduler from huggingface.co and cache. + >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32") + + >>> # Instantiate DDIM scheduler class with same config as DDPM + >>> scheduler = DDIMScheduler.from_config(scheduler.config) + + >>> # Instantiate PNDM scheduler class with same config as DDPM + >>> scheduler = PNDMScheduler.from_config(scheduler.config) + ``` + """ + # <===== TO BE REMOVED WITH DEPRECATION + # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated + if "pretrained_model_name_or_path" in kwargs: + config = kwargs.pop("pretrained_model_name_or_path") + + if config is None: + raise ValueError("Please make sure to provide a config as the first positional argument.") + # ======> + + if not isinstance(config, dict): + deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`." + if "Scheduler" in cls.__name__: + deprecation_message += ( + f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead." + " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will" + " be removed in v1.0.0." + ) + elif "Model" in cls.__name__: + deprecation_message += ( + f"If you were trying to load a model, please use {cls}.load_config(...) followed by" + f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary" + " instead. This functionality will be removed in v1.0.0." + ) + deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False) + config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs) + + init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs) + + # Allow dtype to be specified on initialization + if "dtype" in unused_kwargs: + init_dict["dtype"] = unused_kwargs.pop("dtype") + + # add possible deprecated kwargs + for deprecated_kwarg in cls._deprecated_kwargs: + if deprecated_kwarg in unused_kwargs: + init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg) + + # Return model and optionally state and/or unused_kwargs + model = cls(**init_dict) + + # make sure to also save config parameters that might be used for compatible classes + # update _class_name + if "_class_name" in hidden_dict: + hidden_dict["_class_name"] = cls.__name__ + + model.register_to_config(**hidden_dict) + + # add hidden kwargs of compatible classes to unused_kwargs + unused_kwargs = {**unused_kwargs, **hidden_dict} + + if return_unused_kwargs: + return (model, unused_kwargs) + else: + return model + + @classmethod + def get_config_dict(cls, *args, **kwargs): + deprecation_message = ( + f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be" + " removed in version v1.0.0" + ) + deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False) + return cls.load_config(*args, **kwargs) + + @classmethod + @validate_hf_hub_args + def load_config( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + return_unused_kwargs=False, + return_commit_hash=False, + **kwargs, + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + r""" + Load a model or scheduler configuration. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with + [`~ConfigMixin.save_config`]. + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + return_unused_kwargs (`bool`, *optional*, defaults to `False): + Whether unused keyword arguments of the config are returned. + return_commit_hash (`bool`, *optional*, defaults to `False): + Whether the `commit_hash` of the loaded configuration are returned. + + Returns: + `dict`: + A dictionary of all the parameters stored in a JSON configuration file. + + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + _ = kwargs.pop("mirror", None) + subfolder = kwargs.pop("subfolder", None) + user_agent = kwargs.pop("user_agent", {}) + + user_agent = {**user_agent, "file_type": "config"} + user_agent = http_user_agent(user_agent) + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + if cls.config_name is None: + raise ValueError( + "`self.config_name` is not defined. Note that one should not load a config from " + "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`" + ) + + if os.path.isfile(pretrained_model_name_or_path): + config_file = pretrained_model_name_or_path + elif os.path.isdir(pretrained_model_name_or_path): + if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): + # Load from a PyTorch checkpoint + config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) + elif subfolder is not None and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) + ): + config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) + else: + raise EnvironmentError( + f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}." + ) + else: + try: + # Load from URL or cache if already cached + config_file = hf_hub_download( + pretrained_model_name_or_path, + filename=cls.config_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision, + ) + except RepositoryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" + " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" + " token having permission to this repo with `token` or log in with `huggingface-cli login`." + ) + except RevisionNotFoundError: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for" + " this model name. Check the model page at" + f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) + except EntryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}." + ) + except HTTPError as err: + raise EnvironmentError( + "There was a specific connection error when trying to load" + f" {pretrained_model_name_or_path}:\n{err}" + ) + except ValueError: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to" + " run the library in offline mode at" + " 'https://huggingface.co/docs/diffusers/installation#offline-mode'." + ) + except EnvironmentError: + raise EnvironmentError( + f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a {cls.config_name} file" + ) + + try: + # Load config dict + config_dict = cls._dict_from_json_file(config_file) + + commit_hash = extract_commit_hash(config_file) + except (json.JSONDecodeError, UnicodeDecodeError): + raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") + + if not (return_unused_kwargs or return_commit_hash): + return config_dict + + outputs = (config_dict,) + + if return_unused_kwargs: + outputs += (kwargs,) + + if return_commit_hash: + outputs += (commit_hash,) + + return outputs + + @staticmethod + def _get_init_keys(cls): + return set(dict(inspect.signature(cls.__init__).parameters).keys()) + + @classmethod + def extract_init_dict(cls, config_dict, **kwargs): + # Skip keys that were not present in the original config, so default __init__ values were used + used_defaults = config_dict.get("_use_default_values", []) + config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"} + + # 0. Copy origin config dict + original_dict = dict(config_dict.items()) + + # 1. Retrieve expected config attributes from __init__ signature + expected_keys = cls._get_init_keys(cls) + expected_keys.remove("self") + # remove general kwargs if present in dict + if "kwargs" in expected_keys: + expected_keys.remove("kwargs") + # remove flax internal keys + if hasattr(cls, "_flax_internal_args"): + for arg in cls._flax_internal_args: + expected_keys.remove(arg) + + # 2. Remove attributes that cannot be expected from expected config attributes + # remove keys to be ignored + if len(cls.ignore_for_config) > 0: + expected_keys = expected_keys - set(cls.ignore_for_config) + + # load diffusers library to import compatible and original scheduler + diffusers_library = importlib.import_module(__name__.split(".")[0]) + + if cls.has_compatibles: + compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)] + else: + compatible_classes = [] + + expected_keys_comp_cls = set() + for c in compatible_classes: + expected_keys_c = cls._get_init_keys(c) + expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c) + expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls) + config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls} + + # remove attributes from orig class that cannot be expected + orig_cls_name = config_dict.pop("_class_name", cls.__name__) + if ( + isinstance(orig_cls_name, str) + and orig_cls_name != cls.__name__ + and hasattr(diffusers_library, orig_cls_name) + ): + orig_cls = getattr(diffusers_library, orig_cls_name) + unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys + config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig} + elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)): + raise ValueError( + "Make sure that the `_class_name` is of type string or list of string (for custom pipelines)." + ) + + # remove private attributes + config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")} + + # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments + init_dict = {} + for key in expected_keys: + # if config param is passed to kwarg and is present in config dict + # it should overwrite existing config dict key + if key in kwargs and key in config_dict: + config_dict[key] = kwargs.pop(key) + + if key in kwargs: + # overwrite key + init_dict[key] = kwargs.pop(key) + elif key in config_dict: + # use value from config dict + init_dict[key] = config_dict.pop(key) + + # 4. Give nice warning if unexpected values have been passed + if len(config_dict) > 0: + logger.warning( + f"The config attributes {config_dict} were passed to {cls.__name__}, " + "but are not expected and will be ignored. Please verify your " + f"{cls.config_name} configuration file." + ) + + # 5. Give nice info if config attributes are initialized to default because they have not been passed + passed_keys = set(init_dict.keys()) + if len(expected_keys - passed_keys) > 0: + logger.info( + f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values." + ) + + # 6. Define unused keyword arguments + unused_kwargs = {**config_dict, **kwargs} + + # 7. Define "hidden" config parameters that were saved for compatible classes + hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict} + + return init_dict, unused_kwargs, hidden_config_dict + + @classmethod + def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + @property + def config(self) -> Dict[str, Any]: + """ + Returns the config of the class as a frozen dictionary + + Returns: + `Dict[str, Any]`: Config of the class. + """ + return self._internal_dict + + def to_json_string(self) -> str: + """ + Serializes the configuration instance to a JSON string. + + Returns: + `str`: + String containing all the attributes that make up the configuration instance in JSON format. + """ + config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {} + config_dict["_class_name"] = self.__class__.__name__ + config_dict["_diffusers_version"] = __version__ + + def to_json_saveable(value): + if isinstance(value, np.ndarray): + value = value.tolist() + elif isinstance(value, PosixPath): + value = str(value) + return value + + config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()} + # Don't save "_ignore_files" or "_use_default_values" + config_dict.pop("_ignore_files", None) + config_dict.pop("_use_default_values", None) + + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save the configuration instance's parameters to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file to save a configuration instance's parameters. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string()) + + +def register_to_config(init): + r""" + Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are + automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that + shouldn't be registered in the config, use the `ignore_for_config` class variable + + Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init! + """ + + @functools.wraps(init) + def inner_init(self, *args, **kwargs): + # Ignore private kwargs in the init. + init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} + config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")} + if not isinstance(self, ConfigMixin): + raise RuntimeError( + f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " + "not inherit from `ConfigMixin`." + ) + + ignore = getattr(self, "ignore_for_config", []) + # Get positional arguments aligned with kwargs + new_kwargs = {} + signature = inspect.signature(init) + parameters = { + name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore + } + for arg, name in zip(args, parameters.keys()): + new_kwargs[name] = arg + + # Then add all kwargs + new_kwargs.update( + { + k: init_kwargs.get(k, default) + for k, default in parameters.items() + if k not in ignore and k not in new_kwargs + } + ) + + # Take note of the parameters that were not present in the loaded config + if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: + new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) + + new_kwargs = {**config_init_kwargs, **new_kwargs} + getattr(self, "register_to_config")(**new_kwargs) + init(self, *args, **init_kwargs) + + return inner_init + + +def flax_register_to_config(cls): + original_init = cls.__init__ + + @functools.wraps(original_init) + def init(self, *args, **kwargs): + if not isinstance(self, ConfigMixin): + raise RuntimeError( + f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " + "not inherit from `ConfigMixin`." + ) + + # Ignore private kwargs in the init. Retrieve all passed attributes + init_kwargs = dict(kwargs.items()) + + # Retrieve default values + fields = dataclasses.fields(self) + default_kwargs = {} + for field in fields: + # ignore flax specific attributes + if field.name in self._flax_internal_args: + continue + if type(field.default) == dataclasses._MISSING_TYPE: + default_kwargs[field.name] = None + else: + default_kwargs[field.name] = getattr(self, field.name) + + # Make sure init_kwargs override default kwargs + new_kwargs = {**default_kwargs, **init_kwargs} + # dtype should be part of `init_kwargs`, but not `new_kwargs` + if "dtype" in new_kwargs: + new_kwargs.pop("dtype") + + # Get positional arguments aligned with kwargs + for i, arg in enumerate(args): + name = fields[i].name + new_kwargs[name] = arg + + # Take note of the parameters that were not present in the loaded config + if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: + new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) + + getattr(self, "register_to_config")(**new_kwargs) + original_init(self, *args, **kwargs) + + cls.__init__ = init + return cls diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_check.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_check.py new file mode 100644 index 000000000..0728b3a7c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_check.py @@ -0,0 +1,34 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dependency_versions_table import deps +from .utils.versions import require_version, require_version_core + + +# define which module versions we always want to check at run time +# (usually the ones defined in `install_requires` in setup.py) +# +# order specific notes: +# - tqdm must be checked before tokenizers + +pkgs_to_check_at_runtime = "python requests filelock numpy".split() +for pkg in pkgs_to_check_at_runtime: + if pkg in deps: + require_version_core(deps[pkg]) + else: + raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") + + +def dep_version_check(pkg, hint=None): + require_version(deps[pkg], hint) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_table.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_table.py new file mode 100644 index 000000000..e92a486bf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/dependency_versions_table.py @@ -0,0 +1,45 @@ +# THIS FILE HAS BEEN AUTOGENERATED. To update: +# 1. modify the `_deps` dict in setup.py +# 2. run `make deps_table_update` +deps = { + "Pillow": "Pillow", + "accelerate": "accelerate>=0.11.0", + "compel": "compel==0.1.8", + "datasets": "datasets", + "filelock": "filelock", + "flax": "flax>=0.4.1", + "hf-doc-builder": "hf-doc-builder>=0.3.0", + "huggingface-hub": "huggingface-hub>=0.20.2", + "requests-mock": "requests-mock==1.10.0", + "importlib_metadata": "importlib_metadata", + "invisible-watermark": "invisible-watermark>=0.2.0", + "isort": "isort>=5.5.4", + "jax": "jax>=0.4.1", + "jaxlib": "jaxlib>=0.4.1", + "Jinja2": "Jinja2", + "k-diffusion": "k-diffusion>=0.0.12", + "torchsde": "torchsde", + "note_seq": "note_seq", + "librosa": "librosa", + "numpy": "numpy", + "parameterized": "parameterized", + "peft": "peft>=0.6.0", + "protobuf": "protobuf>=3.20.3,<4", + "pytest": "pytest", + "pytest-timeout": "pytest-timeout", + "pytest-xdist": "pytest-xdist", + "python": "python>=3.8.0", + "ruff": "ruff==0.1.5", + "safetensors": "safetensors>=0.3.1", + "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", + "GitPython": "GitPython<3.1.19", + "scipy": "scipy", + "onnx": "onnx", + "regex": "regex!=2019.12.17", + "requests": "requests", + "tensorboard": "tensorboard", + "torch": "torch>=1.4", + "torchvision": "torchvision", + "transformers": "transformers>=4.25.1", + "urllib3": "urllib3<=2.0.0", +} diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/README.md new file mode 100644 index 000000000..81a9de81c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/README.md @@ -0,0 +1,5 @@ +# 🧨 Diffusers Experimental + +We are adding experimental code to support novel applications and usages of the Diffusers library. +Currently, the following experiments are supported: +* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/__init__.py new file mode 100644 index 000000000..ebc815540 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/__init__.py @@ -0,0 +1 @@ +from .rl import ValueGuidedRLPipeline diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/__init__.py new file mode 100644 index 000000000..7b338d317 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/__init__.py @@ -0,0 +1 @@ +from .value_guided_sampling import ValueGuidedRLPipeline diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/value_guided_sampling.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/value_guided_sampling.py new file mode 100644 index 000000000..2f9de8574 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/experimental/rl/value_guided_sampling.py @@ -0,0 +1,153 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +import tqdm + +from ...models.unets.unet_1d import UNet1DModel +from ...pipelines import DiffusionPipeline +from ...utils.dummy_pt_objects import DDPMScheduler +from ...utils.torch_utils import randn_tensor + + +class ValueGuidedRLPipeline(DiffusionPipeline): + r""" + Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + value_function ([`UNet1DModel`]): + A specialized UNet for fine-tuning trajectories base on reward. + unet ([`UNet1DModel`]): + UNet architecture to denoise the encoded trajectories. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this + application is [`DDPMScheduler`]. + env (): + An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. + """ + + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): + super().__init__() + + self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) + + self.data = env.get_dataset() + self.means = {} + for key in self.data.keys(): + try: + self.means[key] = self.data[key].mean() + except: # noqa: E722 + pass + self.stds = {} + for key in self.data.keys(): + try: + self.stds[key] = self.data[key].std() + except: # noqa: E722 + pass + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + if isinstance(x_in, dict): + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + + # permute to match dimension for pre-trained models + y = self.value_function(x.permute(0, 2, 1), timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + + # TODO: verify deprecation of this kwarg + x = self.scheduler.step(prev_x, i, x)["prev_sample"] + + # apply conditions to the trajectory (set the initial state) + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x) + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) + x1 = randn_tensor(shape, device=self.unet.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + + # run the diffusion process + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + + # select the action with the highest value + if y is not None: + selected_index = 0 + else: + # if we didn't run value guiding, select a random action + selected_index = np.random.randint(0, batch_size) + + denorm_actions = denorm_actions[selected_index, 0] + return denorm_actions diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/image_processor.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/image_processor.py new file mode 100644 index 000000000..daeb8fd6f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/image_processor.py @@ -0,0 +1,990 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import warnings +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from PIL import Image, ImageFilter, ImageOps + +from .configuration_utils import ConfigMixin, register_to_config +from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate + + +PipelineImageInput = Union[ + PIL.Image.Image, + np.ndarray, + torch.FloatTensor, + List[PIL.Image.Image], + List[np.ndarray], + List[torch.FloatTensor], +] + +PipelineDepthInput = PipelineImageInput + + +class VaeImageProcessor(ConfigMixin): + """ + Image processor for VAE. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept + `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image to [-1,1]. + do_binarize (`bool`, *optional*, defaults to `False`): + Whether to binarize the image to 0/1. + do_convert_rgb (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to RGB format. + do_convert_grayscale (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to grayscale format. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = True, + do_binarize: bool = False, + do_convert_rgb: bool = False, + do_convert_grayscale: bool = False, + ): + super().__init__() + if do_convert_rgb and do_convert_grayscale: + raise ValueError( + "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`," + " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.", + " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`", + ) + self.config.do_convert_rgb = False + + @staticmethod + def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + @staticmethod + def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: + """ + Convert a PIL image or a list of PIL images to NumPy arrays. + """ + if not isinstance(images, list): + images = [images] + images = [np.array(image).astype(np.float32) / 255.0 for image in images] + images = np.stack(images, axis=0) + + return images + + @staticmethod + def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor: + """ + Convert a NumPy image to a PyTorch tensor. + """ + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + @staticmethod + def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray: + """ + Convert a PyTorch tensor to a NumPy image. + """ + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + return images + + @staticmethod + def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Normalize an image array to [-1,1]. + """ + return 2.0 * images - 1.0 + + @staticmethod + def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Denormalize an image array to [0,1]. + """ + return (images / 2 + 0.5).clamp(0, 1) + + @staticmethod + def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image: + """ + Converts a PIL image to RGB format. + """ + image = image.convert("RGB") + + return image + + @staticmethod + def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image: + """ + Converts a PIL image to grayscale format. + """ + image = image.convert("L") + + return image + + @staticmethod + def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image: + """ + Applies Gaussian blur to an image. + """ + image = image.filter(ImageFilter.GaussianBlur(blur_factor)) + + return image + + @staticmethod + def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0): + """ + Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect ratio of the original image; + for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128. + + Args: + mask_image (PIL.Image.Image): Mask image. + width (int): Width of the image to be processed. + height (int): Height of the image to be processed. + pad (int, optional): Padding to be added to the crop region. Defaults to 0. + + Returns: + tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and matches the original aspect ratio. + """ + + mask_image = mask_image.convert("L") + mask = np.array(mask_image) + + # 1. find a rectangular region that contains all masked ares in an image + h, w = mask.shape + crop_left = 0 + for i in range(w): + if not (mask[:, i] == 0).all(): + break + crop_left += 1 + + crop_right = 0 + for i in reversed(range(w)): + if not (mask[:, i] == 0).all(): + break + crop_right += 1 + + crop_top = 0 + for i in range(h): + if not (mask[i] == 0).all(): + break + crop_top += 1 + + crop_bottom = 0 + for i in reversed(range(h)): + if not (mask[i] == 0).all(): + break + crop_bottom += 1 + + # 2. add padding to the crop region + x1, y1, x2, y2 = ( + int(max(crop_left - pad, 0)), + int(max(crop_top - pad, 0)), + int(min(w - crop_right + pad, w)), + int(min(h - crop_bottom + pad, h)), + ) + + # 3. expands crop region to match the aspect ratio of the image to be processed + ratio_crop_region = (x2 - x1) / (y2 - y1) + ratio_processing = width / height + + if ratio_crop_region > ratio_processing: + desired_height = (x2 - x1) / ratio_processing + desired_height_diff = int(desired_height - (y2 - y1)) + y1 -= desired_height_diff // 2 + y2 += desired_height_diff - desired_height_diff // 2 + if y2 >= mask_image.height: + diff = y2 - mask_image.height + y2 -= diff + y1 -= diff + if y1 < 0: + y2 -= y1 + y1 -= y1 + if y2 >= mask_image.height: + y2 = mask_image.height + else: + desired_width = (y2 - y1) * ratio_processing + desired_width_diff = int(desired_width - (x2 - x1)) + x1 -= desired_width_diff // 2 + x2 += desired_width_diff - desired_width_diff // 2 + if x2 >= mask_image.width: + diff = x2 - mask_image.width + x2 -= diff + x1 -= diff + if x1 < 0: + x2 -= x1 + x1 -= x1 + if x2 >= mask_image.width: + x2 = mask_image.width + + return x1, y1, x2, y2 + + def _resize_and_fill( + self, + image: PIL.Image.Image, + width: int, + height: int, + ) -> PIL.Image.Image: + """ + Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. + + Args: + image: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + """ + + ratio = width / height + src_ratio = image.width / image.height + + src_w = width if ratio < src_ratio else image.width * height // image.height + src_h = height if ratio >= src_ratio else image.height * width // image.width + + resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"]) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + + if ratio < src_ratio: + fill_height = height // 2 - src_h // 2 + if fill_height > 0: + res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) + res.paste( + resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), + box=(0, fill_height + src_h), + ) + elif ratio > src_ratio: + fill_width = width // 2 - src_w // 2 + if fill_width > 0: + res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) + res.paste( + resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), + box=(fill_width + src_w, 0), + ) + + return res + + def _resize_and_crop( + self, + image: PIL.Image.Image, + width: int, + height: int, + ) -> PIL.Image.Image: + """ + Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. + + Args: + image: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + """ + ratio = width / height + src_ratio = image.width / image.height + + src_w = width if ratio > src_ratio else image.width * height // image.height + src_h = height if ratio <= src_ratio else image.height * width // image.width + + resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"]) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + return res + + def resize( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + height: int, + width: int, + resize_mode: str = "default", # "default", "fill", "crop" + ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: + """ + Resize image. + + Args: + image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`): + The image input, can be a PIL image, numpy array or pytorch tensor. + height (`int`): + The height to resize to. + width (`int`): + The width to resize to. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit + within the specified width and height, and it may not maintaining the original aspect ratio. + If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, filling empty with data from image. + If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, cropping the excess. + Note that resize_mode `fill` and `crop` are only supported for PIL image input. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`: + The resized image. + """ + if resize_mode != "default" and not isinstance(image, PIL.Image.Image): + raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}") + if isinstance(image, PIL.Image.Image): + if resize_mode == "default": + image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample]) + elif resize_mode == "fill": + image = self._resize_and_fill(image, width, height) + elif resize_mode == "crop": + image = self._resize_and_crop(image, width, height) + else: + raise ValueError(f"resize_mode {resize_mode} is not supported") + + elif isinstance(image, torch.Tensor): + image = torch.nn.functional.interpolate( + image, + size=(height, width), + ) + elif isinstance(image, np.ndarray): + image = self.numpy_to_pt(image) + image = torch.nn.functional.interpolate( + image, + size=(height, width), + ) + image = self.pt_to_numpy(image) + return image + + def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image: + """ + Create a mask. + + Args: + image (`PIL.Image.Image`): + The image input, should be a PIL image. + + Returns: + `PIL.Image.Image`: + The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1. + """ + image[image < 0.5] = 0 + image[image >= 0.5] = 1 + + return image + + def get_default_height_width( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + height: Optional[int] = None, + width: Optional[int] = None, + ) -> Tuple[int, int]: + """ + This function return the height and width that are downscaled to the next integer multiple of + `vae_scale_factor`. + + Args: + image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`): + The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have + shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should + have shape `[batch, channel, height, width]`. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the height of `image` input. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use the width of the `image` input. + """ + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[2] + else: + height = image.shape[1] + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[3] + else: + width = image.shape[2] + + width, height = ( + x - x % self.config.vae_scale_factor for x in (width, height) + ) # resize to integer multiple of vae_scale_factor + + return height, width + + def preprocess( + self, + image: PipelineImageInput, + height: Optional[int] = None, + width: Optional[int] = None, + resize_mode: str = "default", # "default", "fill", "crop" + crops_coords: Optional[Tuple[int, int, int, int]] = None, + ) -> torch.Tensor: + """ + Preprocess the image input. + + Args: + image (`pipeline_image_input`): + The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of supported formats. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit + within the specified width and height, and it may not maintaining the original aspect ratio. + If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, filling empty with data from image. + If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, cropping the excess. + Note that resize_mode `fill` and `crop` are only supported for PIL image input. + crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): + The crop coordinates for each image in the batch. If `None`, will not crop the image. + """ + supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) + + # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image + if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3: + if isinstance(image, torch.Tensor): + # if image is a pytorch tensor could have 2 possible shapes: + # 1. batch x height x width: we should insert the channel dimension at position 1 + # 2. channel x height x width: we should insert batch dimension at position 0, + # however, since both channel and batch dimension has same size 1, it is same to insert at position 1 + # for simplicity, we insert a dimension of size 1 at position 1 for both cases + image = image.unsqueeze(1) + else: + # if it is a numpy array, it could have 2 possible shapes: + # 1. batch x height x width: insert channel dimension on last position + # 2. height x width x channel: insert batch dimension on first position + if image.shape[-1] == 1: + image = np.expand_dims(image, axis=0) + else: + image = np.expand_dims(image, axis=-1) + + if isinstance(image, supported_formats): + image = [image] + elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}" + ) + + if isinstance(image[0], PIL.Image.Image): + if crops_coords is not None: + image = [i.crop(crops_coords) for i in image] + if self.config.do_resize: + height, width = self.get_default_height_width(image[0], height, width) + image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image] + if self.config.do_convert_rgb: + image = [self.convert_to_rgb(i) for i in image] + elif self.config.do_convert_grayscale: + image = [self.convert_to_grayscale(i) for i in image] + image = self.pil_to_numpy(image) # to np + image = self.numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + + image = self.numpy_to_pt(image) + + height, width = self.get_default_height_width(image, height, width) + if self.config.do_resize: + image = self.resize(image, height, width) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + if self.config.do_convert_grayscale and image.ndim == 3: + image = image.unsqueeze(1) + + channel = image.shape[1] + # don't need any preprocess if the image is latents + if channel == 4: + return image + + height, width = self.get_default_height_width(image, height, width) + if self.config.do_resize: + image = self.resize(image, height, width) + + # expected range [0,1], normalize to [-1,1] + do_normalize = self.config.do_normalize + if do_normalize and image.min() < 0: + warnings.warn( + "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] " + f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]", + FutureWarning, + ) + do_normalize = False + + if do_normalize: + image = self.normalize(image) + + if self.config.do_binarize: + image = self.binarize(image) + + return image + + def postprocess( + self, + image: torch.FloatTensor, + output_type: str = "pil", + do_denormalize: Optional[List[bool]] = None, + ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]: + """ + Postprocess the image output from tensor to `output_type`. + + Args: + image (`torch.FloatTensor`): + The image input, should be a pytorch tensor with shape `B x C x H x W`. + output_type (`str`, *optional*, defaults to `pil`): + The output type of the image, can be one of `pil`, `np`, `pt`, `latent`. + do_denormalize (`List[bool]`, *optional*, defaults to `None`): + Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the + `VaeImageProcessor` config. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`: + The postprocessed image. + """ + if not isinstance(image, torch.Tensor): + raise ValueError( + f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor" + ) + if output_type not in ["latent", "pt", "np", "pil"]: + deprecation_message = ( + f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: " + "`pil`, `np`, `pt`, `latent`" + ) + deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) + output_type = "np" + + if output_type == "latent": + return image + + if do_denormalize is None: + do_denormalize = [self.config.do_normalize] * image.shape[0] + + image = torch.stack( + [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])] + ) + + if output_type == "pt": + return image + + image = self.pt_to_numpy(image) + + if output_type == "np": + return image + + if output_type == "pil": + return self.numpy_to_pil(image) + + def apply_overlay( + self, + mask: PIL.Image.Image, + init_image: PIL.Image.Image, + image: PIL.Image.Image, + crop_coords: Optional[Tuple[int, int, int, int]] = None, + ) -> PIL.Image.Image: + """ + overlay the inpaint output to the original image + """ + + width, height = image.width, image.height + + init_image = self.resize(init_image, width=width, height=height) + mask = self.resize(mask, width=width, height=height) + + init_image_masked = PIL.Image.new("RGBa", (width, height)) + init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L"))) + init_image_masked = init_image_masked.convert("RGBA") + + if crop_coords is not None: + x, y, x2, y2 = crop_coords + w = x2 - x + h = y2 - y + base_image = PIL.Image.new("RGBA", (width, height)) + image = self.resize(image, height=h, width=w, resize_mode="crop") + base_image.paste(image, (x, y)) + image = base_image.convert("RGB") + + image = image.convert("RGBA") + image.alpha_composite(init_image_masked) + image = image.convert("RGB") + + return image + + +class VaeImageProcessorLDM3D(VaeImageProcessor): + """ + Image processor for VAE LDM3D. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image to [-1,1]. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = True, + ): + super().__init__() + + @staticmethod + def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image[:, :, :3]) for image in images] + + return pil_images + + @staticmethod + def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: + """ + Convert a PIL image or a list of PIL images to NumPy arrays. + """ + if not isinstance(images, list): + images = [images] + + images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images] + images = np.stack(images, axis=0) + return images + + @staticmethod + def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Args: + image: RGB-like depth image + + Returns: depth map + + """ + return image[:, :, 1] * 2**8 + image[:, :, 2] + + def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a NumPy depth image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images_depth = images[:, :, :, 3:] + if images.shape[-1] == 6: + images_depth = (images_depth * 255).round().astype("uint8") + pil_images = [ + Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth + ] + elif images.shape[-1] == 4: + images_depth = (images_depth * 65535.0).astype(np.uint16) + pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth] + else: + raise Exception("Not supported") + + return pil_images + + def postprocess( + self, + image: torch.FloatTensor, + output_type: str = "pil", + do_denormalize: Optional[List[bool]] = None, + ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]: + """ + Postprocess the image output from tensor to `output_type`. + + Args: + image (`torch.FloatTensor`): + The image input, should be a pytorch tensor with shape `B x C x H x W`. + output_type (`str`, *optional*, defaults to `pil`): + The output type of the image, can be one of `pil`, `np`, `pt`, `latent`. + do_denormalize (`List[bool]`, *optional*, defaults to `None`): + Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the + `VaeImageProcessor` config. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`: + The postprocessed image. + """ + if not isinstance(image, torch.Tensor): + raise ValueError( + f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor" + ) + if output_type not in ["latent", "pt", "np", "pil"]: + deprecation_message = ( + f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: " + "`pil`, `np`, `pt`, `latent`" + ) + deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) + output_type = "np" + + if do_denormalize is None: + do_denormalize = [self.config.do_normalize] * image.shape[0] + + image = torch.stack( + [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])] + ) + + image = self.pt_to_numpy(image) + + if output_type == "np": + if image.shape[-1] == 6: + image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0) + else: + image_depth = image[:, :, :, 3:] + return image[:, :, :, :3], image_depth + + if output_type == "pil": + return self.numpy_to_pil(image), self.numpy_to_depth(image) + else: + raise Exception(f"This type {output_type} is not supported") + + def preprocess( + self, + rgb: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], + depth: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], + height: Optional[int] = None, + width: Optional[int] = None, + target_res: Optional[int] = None, + ) -> torch.Tensor: + """ + Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors. + """ + supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) + + # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image + if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3: + raise Exception("This is not yet supported") + + if isinstance(rgb, supported_formats): + rgb = [rgb] + depth = [depth] + elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}" + ) + + if isinstance(rgb[0], PIL.Image.Image): + if self.config.do_convert_rgb: + raise Exception("This is not yet supported") + # rgb = [self.convert_to_rgb(i) for i in rgb] + # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth + if self.config.do_resize or target_res: + height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res + rgb = [self.resize(i, height, width) for i in rgb] + depth = [self.resize(i, height, width) for i in depth] + rgb = self.pil_to_numpy(rgb) # to np + rgb = self.numpy_to_pt(rgb) # to pt + + depth = self.depth_pil_to_numpy(depth) # to np + depth = self.numpy_to_pt(depth) # to pt + + elif isinstance(rgb[0], np.ndarray): + rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0) + rgb = self.numpy_to_pt(rgb) + height, width = self.get_default_height_width(rgb, height, width) + if self.config.do_resize: + rgb = self.resize(rgb, height, width) + + depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0) + depth = self.numpy_to_pt(depth) + height, width = self.get_default_height_width(depth, height, width) + if self.config.do_resize: + depth = self.resize(depth, height, width) + + elif isinstance(rgb[0], torch.Tensor): + raise Exception("This is not yet supported") + # rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0) + + # if self.config.do_convert_grayscale and rgb.ndim == 3: + # rgb = rgb.unsqueeze(1) + + # channel = rgb.shape[1] + + # height, width = self.get_default_height_width(rgb, height, width) + # if self.config.do_resize: + # rgb = self.resize(rgb, height, width) + + # depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0) + + # if self.config.do_convert_grayscale and depth.ndim == 3: + # depth = depth.unsqueeze(1) + + # channel = depth.shape[1] + # # don't need any preprocess if the image is latents + # if depth == 4: + # return rgb, depth + + # height, width = self.get_default_height_width(depth, height, width) + # if self.config.do_resize: + # depth = self.resize(depth, height, width) + # expected range [0,1], normalize to [-1,1] + do_normalize = self.config.do_normalize + if rgb.min() < 0 and do_normalize: + warnings.warn( + "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] " + f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]", + FutureWarning, + ) + do_normalize = False + + if do_normalize: + rgb = self.normalize(rgb) + depth = self.normalize(depth) + + if self.config.do_binarize: + rgb = self.binarize(rgb) + depth = self.binarize(depth) + + return rgb, depth + + +class IPAdapterMaskProcessor(VaeImageProcessor): + """ + Image processor for IP Adapter image masks. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `False`): + Whether to normalize the image to [-1,1]. + do_binarize (`bool`, *optional*, defaults to `True`): + Whether to binarize the image to 0/1. + do_convert_grayscale (`bool`, *optional*, defaults to be `True`): + Whether to convert the images to grayscale format. + + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = False, + do_binarize: bool = True, + do_convert_grayscale: bool = True, + ): + super().__init__( + do_resize=do_resize, + vae_scale_factor=vae_scale_factor, + resample=resample, + do_normalize=do_normalize, + do_binarize=do_binarize, + do_convert_grayscale=do_convert_grayscale, + ) + + @staticmethod + def downsample(mask: torch.FloatTensor, batch_size: int, num_queries: int, value_embed_dim: int): + """ + Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention. + If the aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued. + + Args: + mask (`torch.FloatTensor`): + The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`. + batch_size (`int`): + The batch size. + num_queries (`int`): + The number of queries. + value_embed_dim (`int`): + The dimensionality of the value embeddings. + + Returns: + `torch.FloatTensor`: + The downsampled mask tensor. + + """ + o_h = mask.shape[1] + o_w = mask.shape[2] + ratio = o_w / o_h + mask_h = int(math.sqrt(num_queries / ratio)) + mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0) + mask_w = num_queries // mask_h + + mask_downsample = F.interpolate(mask.unsqueeze(0), size=(mask_h, mask_w), mode="bicubic").squeeze(0) + + # Repeat batch_size times + if mask_downsample.shape[0] < batch_size: + mask_downsample = mask_downsample.repeat(batch_size, 1, 1) + + mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1) + + downsampled_area = mask_h * mask_w + # If the output image and the mask do not have the same aspect ratio, tensor shapes will not match + # Pad tensor if downsampled_mask.shape[1] is smaller than num_queries + if downsampled_area < num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = F.pad(mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0) + # Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries + if downsampled_area > num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = mask_downsample[:, :num_queries] + + # Repeat last dimension to match SDPA output shape + mask_downsample = mask_downsample.view(mask_downsample.shape[0], mask_downsample.shape[1], 1).repeat( + 1, 1, value_embed_dim + ) + + return mask_downsample diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/__init__.py new file mode 100644 index 000000000..4da047435 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/__init__.py @@ -0,0 +1,88 @@ +from typing import TYPE_CHECKING + +from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate +from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available + + +def text_encoder_lora_state_dict(text_encoder): + deprecate( + "text_encoder_load_state_dict in `models`", + "0.27.0", + "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", + ) + state_dict = {} + + for name, module in text_encoder_attn_modules(text_encoder): + for k, v in module.q_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v + + for k, v in module.k_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v + + for k, v in module.v_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v + + for k, v in module.out_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v + + return state_dict + + +if is_transformers_available(): + + def text_encoder_attn_modules(text_encoder): + deprecate( + "text_encoder_attn_modules in `models`", + "0.27.0", + "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", + ) + from transformers import CLIPTextModel, CLIPTextModelWithProjection + + attn_modules = [] + + if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): + for i, layer in enumerate(text_encoder.text_model.encoder.layers): + name = f"text_model.encoder.layers.{i}.self_attn" + mod = layer.self_attn + attn_modules.append((name, mod)) + else: + raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") + + return attn_modules + + +_import_structure = {} + +if is_torch_available(): + _import_structure["autoencoder"] = ["FromOriginalVAEMixin"] + + _import_structure["controlnet"] = ["FromOriginalControlNetMixin"] + _import_structure["unet"] = ["UNet2DConditionLoadersMixin"] + _import_structure["utils"] = ["AttnProcsLayers"] + if is_transformers_available(): + _import_structure["single_file"] = ["FromSingleFileMixin"] + _import_structure["lora"] = ["LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin"] + _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"] + _import_structure["ip_adapter"] = ["IPAdapterMixin"] + +_import_structure["peft"] = ["PeftAdapterMixin"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + if is_torch_available(): + from .autoencoder import FromOriginalVAEMixin + from .controlnet import FromOriginalControlNetMixin + from .unet import UNet2DConditionLoadersMixin + from .utils import AttnProcsLayers + + if is_transformers_available(): + from .ip_adapter import IPAdapterMixin + from .lora import LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin + from .single_file import FromSingleFileMixin + from .textual_inversion import TextualInversionLoaderMixin + + from .peft import PeftAdapterMixin +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/autoencoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/autoencoder.py new file mode 100644 index 000000000..b91d27f7d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/autoencoder.py @@ -0,0 +1,146 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from huggingface_hub.utils import validate_hf_hub_args + +from .single_file_utils import ( + create_diffusers_vae_model_from_ldm, + fetch_ldm_config_and_checkpoint, +) + + +class FromOriginalVAEMixin: + """ + Load pretrained AutoencoderKL weights saved in the `.ckpt` or `.safetensors` format into a [`AutoencoderKL`]. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path, **kwargs): + r""" + Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or + `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + - A link to the `.ckpt` file (for example + `"https://huggingface.co//blob/main/.ckpt"`) on the Hub. + - A path to a *file* containing all pipeline weights. + config_file (`str`, *optional*): + Filepath to the configuration YAML file associated with the model. If not provided it will default to: + https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to True, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + image_size (`int`, *optional*, defaults to 512): + The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable + Diffusion v2 base model. Use 768 for Stable Diffusion v2. + scaling_factor (`float`, *optional*, defaults to 0.18215): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z + = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution + Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (for example the pipeline components of the + specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` + method. See example below for more information. + + + + Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading + a VAE from SDXL or a Stable Diffusion v2 model or higher. + + + + Examples: + + ```py + from diffusers import AutoencoderKL + + url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file + model = AutoencoderKL.from_single_file(url) + ``` + """ + + original_config_file = kwargs.pop("original_config_file", None) + config_file = kwargs.pop("config_file", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", None) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + class_name = cls.__name__ + + if (config_file is not None) and (original_config_file is not None): + raise ValueError( + "You cannot pass both `config_file` and `original_config_file` to `from_single_file`. Please use only one of these arguments." + ) + + original_config_file = original_config_file or config_file + original_config, checkpoint = fetch_ldm_config_and_checkpoint( + pretrained_model_link_or_path=pretrained_model_link_or_path, + class_name=class_name, + original_config_file=original_config_file, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + cache_dir=cache_dir, + ) + + image_size = kwargs.pop("image_size", None) + scaling_factor = kwargs.pop("scaling_factor", None) + component = create_diffusers_vae_model_from_ldm( + class_name, + original_config, + checkpoint, + image_size=image_size, + scaling_factor=scaling_factor, + torch_dtype=torch_dtype, + ) + vae = component["vae"] + if torch_dtype is not None: + vae = vae.to(torch_dtype) + + return vae diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/controlnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/controlnet.py new file mode 100644 index 000000000..d323f60aa --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/controlnet.py @@ -0,0 +1,136 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from huggingface_hub.utils import validate_hf_hub_args + +from .single_file_utils import ( + create_diffusers_controlnet_model_from_ldm, + fetch_ldm_config_and_checkpoint, +) + + +class FromOriginalControlNetMixin: + """ + Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`]. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path, **kwargs): + r""" + Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or + `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + - A link to the `.ckpt` file (for example + `"https://huggingface.co//blob/main/.ckpt"`) on the Hub. + - A path to a *file* containing all pipeline weights. + config_file (`str`, *optional*): + Filepath to the configuration YAML file associated with the model. If not provided it will default to: + https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to True, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + image_size (`int`, *optional*, defaults to 512): + The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable + Diffusion v2 base model. Use 768 for Stable Diffusion v2. + upcast_attention (`bool`, *optional*, defaults to `None`): + Whether the attention computation should always be upcasted. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (for example the pipeline components of the + specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` + method. See example below for more information. + + Examples: + + ```py + from diffusers import StableDiffusionControlNetPipeline, ControlNetModel + + url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path + model = ControlNetModel.from_single_file(url) + + url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path + pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet) + ``` + """ + original_config_file = kwargs.pop("original_config_file", None) + config_file = kwargs.pop("config_file", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", None) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + class_name = cls.__name__ + if (config_file is not None) and (original_config_file is not None): + raise ValueError( + "You cannot pass both `config_file` and `original_config_file` to `from_single_file`. Please use only one of these arguments." + ) + + original_config_file = config_file or original_config_file + original_config, checkpoint = fetch_ldm_config_and_checkpoint( + pretrained_model_link_or_path=pretrained_model_link_or_path, + class_name=class_name, + original_config_file=original_config_file, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + cache_dir=cache_dir, + ) + + upcast_attention = kwargs.pop("upcast_attention", False) + image_size = kwargs.pop("image_size", None) + + component = create_diffusers_controlnet_model_from_ldm( + class_name, + original_config, + checkpoint, + upcast_attention=upcast_attention, + image_size=image_size, + torch_dtype=torch_dtype, + ) + controlnet = component["controlnet"] + if torch_dtype is not None: + controlnet = controlnet.to(torch_dtype) + + return controlnet diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/ip_adapter.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/ip_adapter.py new file mode 100644 index 000000000..93959b9f0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/ip_adapter.py @@ -0,0 +1,281 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +from typing import Dict, List, Optional, Union + +import torch +from huggingface_hub.utils import validate_hf_hub_args +from safetensors import safe_open + +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT +from ..utils import ( + _get_model_file, + is_accelerate_available, + is_torch_version, + is_transformers_available, + logging, +) + + +if is_transformers_available(): + from transformers import ( + CLIPImageProcessor, + CLIPVisionModelWithProjection, + ) + + from ..models.attention_processor import ( + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, + ) + +logger = logging.get_logger(__name__) + + +class IPAdapterMixin: + """Mixin for handling IP Adapters.""" + + @validate_hf_hub_args + def load_ip_adapter( + self, + pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], + subfolder: Union[str, List[str]], + weight_name: Union[str, List[str]], + image_encoder_folder: Optional[str] = "image_encoder", + **kwargs, + ): + """ + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + subfolder (`str` or `List[str]`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + If a list is passed, it should have the same length as `weight_name`. + weight_name (`str` or `List[str]`): + The name of the weight file to load. If a list is passed, it should have the same length as + `weight_name`. + image_encoder_folder (`str`, *optional*, defaults to `image_encoder`): + The subfolder location of the image encoder within a larger model repository on the Hub or locally. + Pass `None` to not load the image encoder. If the image encoder is located in a folder inside `subfolder`, + you only need to pass the name of the folder that contains image encoder weights, e.g. `image_encoder_folder="image_encoder"`. + If the image encoder is located in a folder other than `subfolder`, you should pass the path to the folder that contains image encoder weights, + for example, `image_encoder_folder="different_subfolder/image_encoder"`. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + """ + + # handle the list inputs for multiple IP Adapters + if not isinstance(weight_name, list): + weight_name = [weight_name] + + if not isinstance(pretrained_model_name_or_path_or_dict, list): + pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] + if len(pretrained_model_name_or_path_or_dict) == 1: + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) + + if not isinstance(subfolder, list): + subfolder = [subfolder] + if len(subfolder) == 1: + subfolder = subfolder * len(weight_name) + + if len(weight_name) != len(pretrained_model_name_or_path_or_dict): + raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") + + if len(weight_name) != len(subfolder): + raise ValueError("`weight_name` and `subfolder` must have the same length.") + + # Load the main state dict first. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + + if low_cpu_mem_usage and not is_accelerate_available(): + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + state_dicts = [] + for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( + pretrained_model_name_or_path_or_dict, weight_name, subfolder + ): + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + if weight_name.endswith(".safetensors"): + state_dict = {"image_proj": {}, "ip_adapter": {}} + with safe_open(model_file, framework="pt", device="cpu") as f: + for key in f.keys(): + if key.startswith("image_proj."): + state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) + elif key.startswith("ip_adapter."): + state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) + else: + state_dict = torch.load(model_file, map_location="cpu") + else: + state_dict = pretrained_model_name_or_path_or_dict + + keys = list(state_dict.keys()) + if keys != ["image_proj", "ip_adapter"]: + raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") + + state_dicts.append(state_dict) + + # load CLIP image encoder here if it has not been registered to the pipeline yet + if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: + if image_encoder_folder is not None: + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}") + if image_encoder_folder.count("/") == 0: + image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() + else: + image_encoder_subfolder = Path(image_encoder_folder).as_posix() + + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + pretrained_model_name_or_path_or_dict, + subfolder=image_encoder_subfolder, + low_cpu_mem_usage=low_cpu_mem_usage, + ).to(self.device, dtype=self.dtype) + self.register_modules(image_encoder=image_encoder) + else: + raise ValueError( + "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." + ) + else: + logger.warning( + "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." + "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." + ) + + # create feature extractor if it has not been registered to the pipeline yet + if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None: + feature_extractor = CLIPImageProcessor() + self.register_modules(feature_extractor=feature_extractor) + + # load ip-adapter into unet + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) + + def set_ip_adapter_scale(self, scale): + """ + Sets the conditioning scale between text and image. + + Example: + + ```py + pipeline.set_ip_adapter_scale(0.5) + ``` + """ + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + for attn_processor in unet.attn_processors.values(): + if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): + if not isinstance(scale, list): + scale = [scale] * len(attn_processor.scale) + if len(attn_processor.scale) != len(scale): + raise ValueError( + f"`scale` should be a list of same length as the number if ip-adapters " + f"Expected {len(attn_processor.scale)} but got {len(scale)}." + ) + attn_processor.scale = scale + + def unload_ip_adapter(self): + """ + Unloads the IP Adapter weights + + Examples: + + ```python + >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. + >>> pipeline.unload_ip_adapter() + >>> ... + ``` + """ + # remove CLIP image encoder + if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: + self.image_encoder = None + self.register_to_config(image_encoder=[None, None]) + + # remove feature extractor only when safety_checker is None as safety_checker uses + # the feature_extractor later + if not hasattr(self, "safety_checker"): + if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: + self.feature_extractor = None + self.register_to_config(feature_extractor=[None, None]) + + # remove hidden encoder + self.unet.encoder_hid_proj = None + self.config.encoder_hid_dim_type = None + + # restore original Unet attention processors layers + self.unet.set_default_attn_processor() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora.py new file mode 100644 index 000000000..c6077f3a8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora.py @@ -0,0 +1,1349 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import os +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +import safetensors +import torch +from huggingface_hub import model_info +from huggingface_hub.constants import HF_HUB_OFFLINE +from huggingface_hub.utils import validate_hf_hub_args +from packaging import version +from torch import nn + +from .. import __version__ +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT +from ..utils import ( + USE_PEFT_BACKEND, + _get_model_file, + convert_state_dict_to_diffusers, + convert_state_dict_to_peft, + convert_unet_state_dict_to_peft, + delete_adapter_layers, + get_adapter_name, + get_peft_kwargs, + is_accelerate_available, + is_transformers_available, + logging, + recurse_remove_peft_layers, + scale_lora_layers, + set_adapter_layers, + set_weights_and_activate_adapters, +) +from .lora_conversion_utils import _convert_kohya_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers + + +if is_transformers_available(): + from transformers import PreTrainedModel + + from ..models.lora import text_encoder_attn_modules, text_encoder_mlp_modules + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + +TEXT_ENCODER_NAME = "text_encoder" +UNET_NAME = "unet" +TRANSFORMER_NAME = "transformer" + +LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" +LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" + +LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future." + + +class LoraLoaderMixin: + r""" + Load LoRA layers into [`UNet2DConditionModel`] and + [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel). + """ + + text_encoder_name = TEXT_ENCODER_NAME + unet_name = UNET_NAME + transformer_name = TRANSFORMER_NAME + num_fused_loras = 0 + + def load_lora_weights( + self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. + + See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into + `self.unet`. + + See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded + into `self.text_encoder`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.LoraLoaderMixin.lora_state_dict`]. + kwargs (`dict`, *optional*): + See [`~loaders.LoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) + + is_correct_format = all("lora" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + + self.load_lora_into_unet( + state_dict, + network_alphas=network_alphas, + unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet, + low_cpu_mem_usage=low_cpu_mem_usage, + adapter_name=adapter_name, + _pipeline=self, + ) + self.load_lora_into_text_encoder( + state_dict, + network_alphas=network_alphas, + text_encoder=getattr(self, self.text_encoder_name) + if not hasattr(self, "text_encoder") + else self.text_encoder, + lora_scale=self.lora_scale, + low_cpu_mem_usage=low_cpu_mem_usage, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + @validate_hf_hub_args + def lora_state_dict( + cls, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + **kwargs, + ): + r""" + Return state dict for lora weights and the network alphas. + + + + We support loading A1111 formatted LoRA checkpoints in a limited capacity. + + This function is experimental and might change in the future. + + + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + + """ + # Load the main state dict first which has the LoRA layers for either of + # UNet and text encoder or both. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + unet_config = kwargs.pop("unet_config", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + model_file = None + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + # Here we're relaxing the loading check to enable more Inference API + # friendliness where sometimes, it's not at all possible to automatically + # determine `weight_name`. + if weight_name is None: + weight_name = cls._best_guess_weight_name( + pretrained_model_name_or_path_or_dict, + file_extension=".safetensors", + local_files_only=local_files_only, + ) + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except (IOError, safetensors.SafetensorError) as e: + if not allow_pickle: + raise e + # try loading non-safetensors weights + model_file = None + pass + + if model_file is None: + if weight_name is None: + weight_name = cls._best_guess_weight_name( + pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only + ) + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = torch.load(model_file, map_location="cpu") + else: + state_dict = pretrained_model_name_or_path_or_dict + + network_alphas = None + # TODO: replace it with a method from `state_dict_utils` + if all( + ( + k.startswith("lora_te_") + or k.startswith("lora_unet_") + or k.startswith("lora_te1_") + or k.startswith("lora_te2_") + ) + for k in state_dict.keys() + ): + # Map SDXL blocks correctly. + if unet_config is not None: + # use unet config to remap block numbers + state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) + state_dict, network_alphas = _convert_kohya_lora_to_diffusers(state_dict) + + return state_dict, network_alphas + + @classmethod + def _best_guess_weight_name( + cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False + ): + if local_files_only or HF_HUB_OFFLINE: + raise ValueError("When using the offline mode, you must specify a `weight_name`.") + + targeted_files = [] + + if os.path.isfile(pretrained_model_name_or_path_or_dict): + return + elif os.path.isdir(pretrained_model_name_or_path_or_dict): + targeted_files = [ + f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension) + ] + else: + files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings + targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)] + if len(targeted_files) == 0: + return + + # "scheduler" does not correspond to a LoRA checkpoint. + # "optimizer" does not correspond to a LoRA checkpoint + # only top-level checkpoints are considered and not the other ones, hence "checkpoint". + unallowed_substrings = {"scheduler", "optimizer", "checkpoint"} + targeted_files = list( + filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files) + ) + + if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files): + targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files)) + elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files): + targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files)) + + if len(targeted_files) > 1: + raise ValueError( + f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}." + ) + weight_name = targeted_files[0] + return weight_name + + @classmethod + def _optionally_disable_offloading(cls, _pipeline): + """ + Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU. + + Args: + _pipeline (`DiffusionPipeline`): + The pipeline to disable offloading for. + + Returns: + tuple: + A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True. + """ + is_model_cpu_offload = False + is_sequential_cpu_offload = False + + if _pipeline is not None: + for _, component in _pipeline.components.items(): + if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"): + if not is_model_cpu_offload: + is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload) + if not is_sequential_cpu_offload: + is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook) + + logger.info( + "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." + ) + remove_hook_from_module(component, recurse=is_sequential_cpu_offload) + + return (is_model_cpu_offload, is_sequential_cpu_offload) + + @classmethod + def load_lora_into_unet( + cls, state_dict, network_alphas, unet, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None + ): + """ + This will load the LoRA layers specified in `state_dict` into `unet`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + See `LoRALinearLayer` for more details. + unet (`UNet2DConditionModel`): + The UNet model to load the LoRA layers into. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + + if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys): + # Load the layers corresponding to UNet. + logger.info(f"Loading {cls.unet_name}.") + + unet_keys = [k for k in keys if k.startswith(cls.unet_name)] + state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys} + + if network_alphas is not None: + alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)] + network_alphas = { + k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + else: + # Otherwise, we're dealing with the old format. This means the `state_dict` should only + # contain the module names of the `unet` as its keys WITHOUT any prefix. + if not USE_PEFT_BACKEND: + warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`." + logger.warning(warn_message) + + if len(state_dict.keys()) > 0: + if adapter_name in getattr(unet, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name." + ) + + state_dict = convert_unet_state_dict_to_peft(state_dict) + + if network_alphas is not None: + # The alphas state dict have the same structure as Unet, thus we convert it to peft format using + # `convert_unet_state_dict_to_peft` method. + network_alphas = convert_unet_state_dict_to_peft(network_alphas) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True) + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(unet) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, unet, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(unet, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + unet.load_attn_procs( + state_dict, network_alphas=network_alphas, low_cpu_mem_usage=low_cpu_mem_usage, _pipeline=_pipeline + ) + + @classmethod + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + low_cpu_mem_usage=None, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + See `LoRALinearLayer` for more details. + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + rank_key = f"{name}.out_proj.lora_B.weight" + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys()) + if patch_mlp: + for name, _ in text_encoder_mlp_modules(text_encoder): + rank_key_fc1 = f"{name}.fc1.lora_B.weight" + rank_key_fc2 = f"{name}.fc2.lora_B.weight" + + rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1] + rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + def load_lora_into_transformer( + cls, state_dict, network_alphas, transformer, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None + ): + """ + This will load the LoRA layers specified in `state_dict` into `transformer`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + See `LoRALinearLayer` for more details. + unet (`UNet2DConditionModel`): + The UNet model to load the LoRA layers into. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT + + keys = list(state_dict.keys()) + + transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] + state_dict = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys + } + + if network_alphas is not None: + alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)] + network_alphas = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + if len(state_dict.keys()) > 0: + if adapter_name in getattr(transformer, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name." + ) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict) + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(transformer) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @property + def lora_scale(self) -> float: + # property function that returns the lora scale which can be set at run time by the pipeline. + # if _lora_scale has not been set, return 1 + return self._lora_scale if hasattr(self, "_lora_scale") else 1.0 + + def _remove_text_encoder_monkey_patch(self): + remove_method = recurse_remove_peft_layers + if hasattr(self, "text_encoder"): + remove_method(self.text_encoder) + # In case text encoder have no Lora attached + if getattr(self.text_encoder, "peft_config", None) is not None: + del self.text_encoder.peft_config + self.text_encoder._hf_peft_config_loaded = None + + if hasattr(self, "text_encoder_2"): + remove_method(self.text_encoder_2) + if getattr(self.text_encoder_2, "peft_config", None) is not None: + del self.text_encoder_2.peft_config + self.text_encoder_2._hf_peft_config_loaded = None + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, + transformer_lora_layers: Dict[str, torch.nn.Module] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `unet`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from 🤗 Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + def pack_weights(layers, prefix): + layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers + layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} + return layers_state_dict + + if not (unet_lora_layers or text_encoder_lora_layers or transformer_lora_layers): + raise ValueError( + "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, or `transformer_lora_layers`." + ) + + if unet_lora_layers: + state_dict.update(pack_weights(unet_lora_layers, cls.unet_name)) + + if text_encoder_lora_layers: + state_dict.update(pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) + + if transformer_lora_layers: + state_dict.update(pack_weights(transformer_lora_layers, "transformer")) + + # Save the model + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + @staticmethod + def write_lora_layers( + state_dict: Dict[str, torch.Tensor], + save_directory: str, + is_main_process: bool, + weight_name: str, + save_function: Callable, + safe_serialization: bool, + ): + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + if save_function is None: + if safe_serialization: + + def save_function(weights, filename): + return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"}) + + else: + save_function = torch.save + + os.makedirs(save_directory, exist_ok=True) + + if weight_name is None: + if safe_serialization: + weight_name = LORA_WEIGHT_NAME_SAFE + else: + weight_name = LORA_WEIGHT_NAME + + save_path = Path(save_directory, weight_name).as_posix() + save_function(state_dict, save_path) + logger.info(f"Model weights saved in {save_path}") + + def unload_lora_weights(self): + """ + Unloads the LoRA parameters. + + Examples: + + ```python + >>> # Assuming `pipeline` is already loaded with the LoRA parameters. + >>> pipeline.unload_lora_weights() + >>> ... + ``` + """ + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + + if not USE_PEFT_BACKEND: + if version.parse(__version__) > version.parse("0.23"): + logger.warning( + "You are using `unload_lora_weights` to disable and unload lora weights. If you want to iteratively enable and disable adapter weights," + "you can use `pipe.enable_lora()` or `pipe.disable_lora()`. After installing the latest version of PEFT." + ) + + for _, module in unet.named_modules(): + if hasattr(module, "set_lora_layer"): + module.set_lora_layer(None) + else: + recurse_remove_peft_layers(unet) + if hasattr(unet, "peft_config"): + del unet.peft_config + + # Safe to call the following regardless of LoRA. + self._remove_text_encoder_monkey_patch() + + def fuse_lora( + self, + fuse_unet: bool = True, + fuse_text_encoder: bool = True, + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters. + fuse_text_encoder (`bool`, defaults to `True`): + Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + if fuse_unet or fuse_text_encoder: + self.num_fused_loras += 1 + if self.num_fused_loras > 1: + logger.warning( + "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.", + ) + + if fuse_unet: + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) + + def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None): + merge_kwargs = {"safe_merge": safe_fusing} + + for module in text_encoder.modules(): + if isinstance(module, BaseTunerLayer): + if lora_scale != 1.0: + module.scale_layer(lora_scale) + + # For BC with previous PEFT versions, we need to check the signature + # of the `merge` method to see if it supports the `adapter_names` argument. + supported_merge_kwargs = list(inspect.signature(module.merge).parameters) + if "adapter_names" in supported_merge_kwargs: + merge_kwargs["adapter_names"] = adapter_names + elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None: + raise ValueError( + "The `adapter_names` argument is not supported with your PEFT version. " + "Please upgrade to the latest version of PEFT. `pip install -U peft`" + ) + + module.merge(**merge_kwargs) + + if fuse_text_encoder: + if hasattr(self, "text_encoder"): + fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing, adapter_names=adapter_names) + if hasattr(self, "text_encoder_2"): + fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing, adapter_names=adapter_names) + + def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. + unfuse_text_encoder (`bool`, defaults to `True`): + Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + if unfuse_unet: + for module in unet.modules(): + if isinstance(module, BaseTunerLayer): + module.unmerge() + + def unfuse_text_encoder_lora(text_encoder): + for module in text_encoder.modules(): + if isinstance(module, BaseTunerLayer): + module.unmerge() + + if unfuse_text_encoder: + if hasattr(self, "text_encoder"): + unfuse_text_encoder_lora(self.text_encoder) + if hasattr(self, "text_encoder_2"): + unfuse_text_encoder_lora(self.text_encoder_2) + + self.num_fused_loras -= 1 + + def set_adapters_for_text_encoder( + self, + adapter_names: Union[List[str], str], + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 + text_encoder_weights: List[float] = None, + ): + """ + Sets the adapter layers for the text encoder. + + Args: + adapter_names (`List[str]` or `str`): + The names of the adapters to use. + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` + attribute. + text_encoder_weights (`List[float]`, *optional*): + The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + def process_weights(adapter_names, weights): + if weights is None: + weights = [1.0] * len(adapter_names) + elif isinstance(weights, float): + weights = [weights] + + if len(adapter_names) != len(weights): + raise ValueError( + f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}" + ) + return weights + + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + text_encoder_weights = process_weights(adapter_names, text_encoder_weights) + text_encoder = text_encoder or getattr(self, "text_encoder", None) + if text_encoder is None: + raise ValueError( + "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead." + ) + set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights) + + def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None): + """ + Disables the LoRA layers for the text encoder. + + Args: + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to disable the LoRA layers for. If `None`, it will try to get the + `text_encoder` attribute. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + text_encoder = text_encoder or getattr(self, "text_encoder", None) + if text_encoder is None: + raise ValueError("Text Encoder not found.") + set_adapter_layers(text_encoder, enabled=False) + + def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None): + """ + Enables the LoRA layers for the text encoder. + + Args: + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder` + attribute. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + text_encoder = text_encoder or getattr(self, "text_encoder", None) + if text_encoder is None: + raise ValueError("Text Encoder not found.") + set_adapter_layers(self.text_encoder, enabled=True) + + def set_adapters( + self, + adapter_names: Union[List[str], str], + adapter_weights: Optional[List[float]] = None, + ): + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + # Handle the UNET + unet.set_adapters(adapter_names, adapter_weights) + + # Handle the Text Encoder + if hasattr(self, "text_encoder"): + self.set_adapters_for_text_encoder(adapter_names, self.text_encoder, adapter_weights) + if hasattr(self, "text_encoder_2"): + self.set_adapters_for_text_encoder(adapter_names, self.text_encoder_2, adapter_weights) + + def disable_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # Disable unet adapters + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet.disable_lora() + + # Disable text encoder adapters + if hasattr(self, "text_encoder"): + self.disable_lora_for_text_encoder(self.text_encoder) + if hasattr(self, "text_encoder_2"): + self.disable_lora_for_text_encoder(self.text_encoder_2) + + def enable_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # Enable unet adapters + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet.enable_lora() + + # Enable text encoder adapters + if hasattr(self, "text_encoder"): + self.enable_lora_for_text_encoder(self.text_encoder) + if hasattr(self, "text_encoder_2"): + self.enable_lora_for_text_encoder(self.text_encoder_2) + + def delete_adapters(self, adapter_names: Union[List[str], str]): + """ + Args: + Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s). + adapter_names (`Union[List[str], str]`): + The names of the adapter to delete. Can be a single string or a list of strings + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + # Delete unet adapters + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet.delete_adapters(adapter_names) + + for adapter_name in adapter_names: + # Delete text encoder adapters + if hasattr(self, "text_encoder"): + delete_adapter_layers(self.text_encoder, adapter_name) + if hasattr(self, "text_encoder_2"): + delete_adapter_layers(self.text_encoder_2, adapter_name) + + def get_active_adapters(self) -> List[str]: + """ + Gets the list of the current active adapters. + + Example: + + ```python + from diffusers import DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + ).to("cuda") + pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy") + pipeline.get_active_adapters() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError( + "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" + ) + + from peft.tuners.tuners_utils import BaseTunerLayer + + active_adapters = [] + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + for module in unet.modules(): + if isinstance(module, BaseTunerLayer): + active_adapters = module.active_adapters + break + + return active_adapters + + def get_list_adapters(self) -> Dict[str, List[str]]: + """ + Gets the current list of all available adapters in the pipeline. + """ + if not USE_PEFT_BACKEND: + raise ValueError( + "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" + ) + + set_adapters = {} + + if hasattr(self, "text_encoder") and hasattr(self.text_encoder, "peft_config"): + set_adapters["text_encoder"] = list(self.text_encoder.peft_config.keys()) + + if hasattr(self, "text_encoder_2") and hasattr(self.text_encoder_2, "peft_config"): + set_adapters["text_encoder_2"] = list(self.text_encoder_2.peft_config.keys()) + + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + if hasattr(self, self.unet_name) and hasattr(unet, "peft_config"): + set_adapters[self.unet_name] = list(self.unet.peft_config.keys()) + + return set_adapters + + def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None: + """ + Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case + you want to load multiple adapters and free some GPU memory. + + Args: + adapter_names (`List[str]`): + List of adapters to send device to. + device (`Union[torch.device, str, int]`): + Device to send the adapters to. Can be either a torch device, a str or an integer. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + # Handle the UNET + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + for unet_module in unet.modules(): + if isinstance(unet_module, BaseTunerLayer): + for adapter_name in adapter_names: + unet_module.lora_A[adapter_name].to(device) + unet_module.lora_B[adapter_name].to(device) + + # Handle the text encoder + modules_to_process = [] + if hasattr(self, "text_encoder"): + modules_to_process.append(self.text_encoder) + + if hasattr(self, "text_encoder_2"): + modules_to_process.append(self.text_encoder_2) + + for text_encoder in modules_to_process: + # loop over submodules + for text_encoder_module in text_encoder.modules(): + if isinstance(text_encoder_module, BaseTunerLayer): + for adapter_name in adapter_names: + text_encoder_module.lora_A[adapter_name].to(device) + text_encoder_module.lora_B[adapter_name].to(device) + + +class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin): + """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL""" + + # Override to properly handle the loading and unloading of the additional text encoder. + def load_lora_weights( + self, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + adapter_name: Optional[str] = None, + **kwargs, + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. + + See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into + `self.unet`. + + See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded + into `self.text_encoder`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.LoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + kwargs (`dict`, *optional*): + See [`~loaders.LoraLoaderMixin.lora_state_dict`]. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # We could have accessed the unet config from `lora_state_dict()` too. We pass + # it here explicitly to be able to tell that it's coming from an SDXL + # pipeline. + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict, network_alphas = self.lora_state_dict( + pretrained_model_name_or_path_or_dict, + unet_config=self.unet.config, + **kwargs, + ) + is_correct_format = all("lora" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + self.load_lora_into_unet( + state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self + ) + text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} + if len(text_encoder_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_state_dict, + network_alphas=network_alphas, + text_encoder=self.text_encoder, + prefix="text_encoder", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} + if len(text_encoder_2_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_2_state_dict, + network_alphas=network_alphas, + text_encoder=self.text_encoder_2, + prefix="text_encoder_2", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `unet`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from 🤗 Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + def pack_weights(layers, prefix): + layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers + layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} + return layers_state_dict + + if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): + raise ValueError( + "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`." + ) + + if unet_lora_layers: + state_dict.update(pack_weights(unet_lora_layers, "unet")) + + if text_encoder_lora_layers and text_encoder_2_lora_layers: + state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) + state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) + + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + def _remove_text_encoder_monkey_patch(self): + recurse_remove_peft_layers(self.text_encoder) + # TODO: @younesbelkada handle this in transformers side + if getattr(self.text_encoder, "peft_config", None) is not None: + del self.text_encoder.peft_config + self.text_encoder._hf_peft_config_loaded = None + + recurse_remove_peft_layers(self.text_encoder_2) + if getattr(self.text_encoder_2, "peft_config", None) is not None: + del self.text_encoder_2.peft_config + self.text_encoder_2._hf_peft_config_loaded = None diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora_conversion_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora_conversion_utils.py new file mode 100644 index 000000000..e968ef962 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/lora_conversion_utils.py @@ -0,0 +1,284 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from ..utils import logging + + +logger = logging.get_logger(__name__) + + +def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5): + # 1. get all state_dict_keys + all_keys = list(state_dict.keys()) + sgm_patterns = ["input_blocks", "middle_block", "output_blocks"] + + # 2. check if needs remapping, if not return original dict + is_in_sgm_format = False + for key in all_keys: + if any(p in key for p in sgm_patterns): + is_in_sgm_format = True + break + + if not is_in_sgm_format: + return state_dict + + # 3. Else remap from SGM patterns + new_state_dict = {} + inner_block_map = ["resnets", "attentions", "upsamplers"] + + # Retrieves # of down, mid and up blocks + input_block_ids, middle_block_ids, output_block_ids = set(), set(), set() + + for layer in all_keys: + if "text" in layer: + new_state_dict[layer] = state_dict.pop(layer) + else: + layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) + if sgm_patterns[0] in layer: + input_block_ids.add(layer_id) + elif sgm_patterns[1] in layer: + middle_block_ids.add(layer_id) + elif sgm_patterns[2] in layer: + output_block_ids.add(layer_id) + else: + raise ValueError(f"Checkpoint not supported because layer {layer} not supported.") + + input_blocks = { + layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key] + for layer_id in input_block_ids + } + middle_blocks = { + layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key] + for layer_id in middle_block_ids + } + output_blocks = { + layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key] + for layer_id in output_block_ids + } + + # Rename keys accordingly + for i in input_block_ids: + block_id = (i - 1) // (unet_config.layers_per_block + 1) + layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) + + for key in input_blocks[i]: + inner_block_id = int(key.split(delimiter)[block_slice_pos]) + inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers" + inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0" + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + + [str(block_id), inner_block_key, inner_layers_in_block] + + key.split(delimiter)[block_slice_pos + 1 :] + ) + new_state_dict[new_key] = state_dict.pop(key) + + for i in middle_block_ids: + key_part = None + if i == 0: + key_part = [inner_block_map[0], "0"] + elif i == 1: + key_part = [inner_block_map[1], "0"] + elif i == 2: + key_part = [inner_block_map[0], "1"] + else: + raise ValueError(f"Invalid middle block id {i}.") + + for key in middle_blocks[i]: + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:] + ) + new_state_dict[new_key] = state_dict.pop(key) + + for i in output_block_ids: + block_id = i // (unet_config.layers_per_block + 1) + layer_in_block_id = i % (unet_config.layers_per_block + 1) + + for key in output_blocks[i]: + inner_block_id = int(key.split(delimiter)[block_slice_pos]) + inner_block_key = inner_block_map[inner_block_id] + inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0" + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + + [str(block_id), inner_block_key, inner_layers_in_block] + + key.split(delimiter)[block_slice_pos + 1 :] + ) + new_state_dict[new_key] = state_dict.pop(key) + + if len(state_dict) > 0: + raise ValueError("At this point all state dict entries have to be converted.") + + return new_state_dict + + +def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"): + unet_state_dict = {} + te_state_dict = {} + te2_state_dict = {} + network_alphas = {} + + # every down weight has a corresponding up weight and potentially an alpha weight + lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")] + for key in lora_keys: + lora_name = key.split(".")[0] + lora_name_up = lora_name + ".lora_up.weight" + lora_name_alpha = lora_name + ".alpha" + + if lora_name.startswith("lora_unet_"): + diffusers_name = key.replace("lora_unet_", "").replace("_", ".") + + if "input.blocks" in diffusers_name: + diffusers_name = diffusers_name.replace("input.blocks", "down_blocks") + else: + diffusers_name = diffusers_name.replace("down.blocks", "down_blocks") + + if "middle.block" in diffusers_name: + diffusers_name = diffusers_name.replace("middle.block", "mid_block") + else: + diffusers_name = diffusers_name.replace("mid.block", "mid_block") + if "output.blocks" in diffusers_name: + diffusers_name = diffusers_name.replace("output.blocks", "up_blocks") + else: + diffusers_name = diffusers_name.replace("up.blocks", "up_blocks") + + diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks") + diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora") + diffusers_name = diffusers_name.replace("proj.in", "proj_in") + diffusers_name = diffusers_name.replace("proj.out", "proj_out") + diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj") + + # SDXL specificity. + if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name: + pattern = r"\.\d+(?=\D*$)" + diffusers_name = re.sub(pattern, "", diffusers_name, count=1) + if ".in." in diffusers_name: + diffusers_name = diffusers_name.replace("in.layers.2", "conv1") + if ".out." in diffusers_name: + diffusers_name = diffusers_name.replace("out.layers.3", "conv2") + if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name: + diffusers_name = diffusers_name.replace("op", "conv") + if "skip" in diffusers_name: + diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut") + + # LyCORIS specificity. + if "time.emb.proj" in diffusers_name: + diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj") + if "conv.shortcut" in diffusers_name: + diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut") + + # General coverage. + if "transformer_blocks" in diffusers_name: + if "attn1" in diffusers_name or "attn2" in diffusers_name: + diffusers_name = diffusers_name.replace("attn1", "attn1.processor") + diffusers_name = diffusers_name.replace("attn2", "attn2.processor") + unet_state_dict[diffusers_name] = state_dict.pop(key) + unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + elif "ff" in diffusers_name: + unet_state_dict[diffusers_name] = state_dict.pop(key) + unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + elif any(key in diffusers_name for key in ("proj_in", "proj_out")): + unet_state_dict[diffusers_name] = state_dict.pop(key) + unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + else: + unet_state_dict[diffusers_name] = state_dict.pop(key) + unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + elif lora_name.startswith("lora_te_"): + diffusers_name = key.replace("lora_te_", "").replace("_", ".") + diffusers_name = diffusers_name.replace("text.model", "text_model") + diffusers_name = diffusers_name.replace("self.attn", "self_attn") + diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") + if "self_attn" in diffusers_name: + te_state_dict[diffusers_name] = state_dict.pop(key) + te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + elif "mlp" in diffusers_name: + # Be aware that this is the new diffusers convention and the rest of the code might + # not utilize it yet. + diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") + te_state_dict[diffusers_name] = state_dict.pop(key) + te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + # (sayakpaul): Duplicate code. Needs to be cleaned. + elif lora_name.startswith("lora_te1_"): + diffusers_name = key.replace("lora_te1_", "").replace("_", ".") + diffusers_name = diffusers_name.replace("text.model", "text_model") + diffusers_name = diffusers_name.replace("self.attn", "self_attn") + diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") + if "self_attn" in diffusers_name: + te_state_dict[diffusers_name] = state_dict.pop(key) + te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + elif "mlp" in diffusers_name: + # Be aware that this is the new diffusers convention and the rest of the code might + # not utilize it yet. + diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") + te_state_dict[diffusers_name] = state_dict.pop(key) + te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + # (sayakpaul): Duplicate code. Needs to be cleaned. + elif lora_name.startswith("lora_te2_"): + diffusers_name = key.replace("lora_te2_", "").replace("_", ".") + diffusers_name = diffusers_name.replace("text.model", "text_model") + diffusers_name = diffusers_name.replace("self.attn", "self_attn") + diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") + if "self_attn" in diffusers_name: + te2_state_dict[diffusers_name] = state_dict.pop(key) + te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + elif "mlp" in diffusers_name: + # Be aware that this is the new diffusers convention and the rest of the code might + # not utilize it yet. + diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") + te2_state_dict[diffusers_name] = state_dict.pop(key) + te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + # Rename the alphas so that they can be mapped appropriately. + if lora_name_alpha in state_dict: + alpha = state_dict.pop(lora_name_alpha).item() + if lora_name_alpha.startswith("lora_unet_"): + prefix = "unet." + elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")): + prefix = "text_encoder." + else: + prefix = "text_encoder_2." + new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha" + network_alphas.update({new_name: alpha}) + + if len(state_dict) > 0: + raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}") + + logger.info("Kohya-style checkpoint detected.") + unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()} + te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()} + te2_state_dict = ( + {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()} + if len(te2_state_dict) > 0 + else None + ) + if te2_state_dict is not None: + te_state_dict.update(te2_state_dict) + + new_state_dict = {**unet_state_dict, **te_state_dict} + return new_state_dict, network_alphas diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/peft.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/peft.py new file mode 100644 index 000000000..01dbd3494 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/peft.py @@ -0,0 +1,186 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Union + +from ..utils import MIN_PEFT_VERSION, check_peft_version, is_peft_available + + +class PeftAdapterMixin: + """ + A class containing all functions for loading and using adapters weights that are supported in PEFT library. For + more details about adapters and injecting them in a transformer-based model, check out the PEFT [documentation](https://huggingface.co/docs/peft/index). + + Install the latest version of PEFT, and use this mixin to: + + - Attach new adapters in the model. + - Attach multiple adapters and iteratively activate/deactivate them. + - Activate/deactivate all adapters from the model. + - Get a list of the active adapters. + """ + + _hf_peft_config_loaded = False + + def add_adapter(self, adapter_config, adapter_name: str = "default") -> None: + r""" + Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned + to the adapter to follow the convention of the PEFT library. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT + [documentation](https://huggingface.co/docs/peft). + + Args: + adapter_config (`[~peft.PeftConfig]`): + The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt + methods. + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + from peft import PeftConfig, inject_adapter_in_model + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + elif adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if not isinstance(adapter_config, PeftConfig): + raise ValueError( + f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." + ) + + # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is + # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here. + adapter_config.base_model_name_or_path = None + inject_adapter_in_model(adapter_config, self, adapter_name) + self.set_adapter(adapter_name) + + def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: + """ + Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + + Args: + adapter_name (Union[str, List[str]])): + The list of adapters to set or the adapter name in the case of a single adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + missing = set(adapter_name) - set(self.peft_config) + if len(missing) > 0: + raise ValueError( + f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." + f" current loaded adapters are: {list(self.peft_config.keys())}" + ) + + from peft.tuners.tuners_utils import BaseTunerLayer + + _adapters_has_been_set = False + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + # Previous versions of PEFT does not support multi-adapter inference + elif not hasattr(module, "set_adapter") and len(adapter_name) != 1: + raise ValueError( + "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT." + " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`" + ) + else: + module.active_adapter = adapter_name + _adapters_has_been_set = True + + if not _adapters_has_been_set: + raise ValueError( + "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." + ) + + def disable_adapters(self) -> None: + r""" + Disable all adapters attached to the model and fallback to inference with the base model only. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=False) + else: + # support for older PEFT versions + module.disable_adapters = True + + def enable_adapters(self) -> None: + """ + Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the + list of adapters to enable. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=True) + else: + # support for older PEFT versions + module.disable_adapters = False + + def active_adapters(self) -> List[str]: + """ + Gets the current list of active adapters of the model. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + return module.active_adapter diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file.py new file mode 100644 index 000000000..0d384b164 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file.py @@ -0,0 +1,318 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import is_transformers_available, logging +from .single_file_utils import ( + create_diffusers_unet_model_from_ldm, + create_diffusers_vae_model_from_ldm, + create_scheduler_from_ldm, + create_text_encoders_and_tokenizers_from_ldm, + fetch_ldm_config_and_checkpoint, + infer_model_type, +) + + +logger = logging.get_logger(__name__) + +# Pipelines that support the SDXL Refiner checkpoint +REFINER_PIPELINES = [ + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", +] + +if is_transformers_available(): + from transformers import AutoFeatureExtractor + + +def build_sub_model_components( + pipeline_components, + pipeline_class_name, + component_name, + original_config, + checkpoint, + local_files_only=False, + load_safety_checker=False, + model_type=None, + image_size=None, + torch_dtype=None, + **kwargs, +): + if component_name in pipeline_components: + return {} + + if component_name == "unet": + num_in_channels = kwargs.pop("num_in_channels", None) + upcast_attention = kwargs.pop("upcast_attention", None) + + unet_components = create_diffusers_unet_model_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + num_in_channels=num_in_channels, + image_size=image_size, + torch_dtype=torch_dtype, + model_type=model_type, + upcast_attention=upcast_attention, + ) + return unet_components + + if component_name == "vae": + scaling_factor = kwargs.get("scaling_factor", None) + vae_components = create_diffusers_vae_model_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + image_size, + scaling_factor, + torch_dtype, + model_type=model_type, + ) + return vae_components + + if component_name == "scheduler": + scheduler_type = kwargs.get("scheduler_type", "ddim") + prediction_type = kwargs.get("prediction_type", None) + + scheduler_components = create_scheduler_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + scheduler_type=scheduler_type, + prediction_type=prediction_type, + model_type=model_type, + ) + + return scheduler_components + + if component_name in ["text_encoder", "text_encoder_2", "tokenizer", "tokenizer_2"]: + text_encoder_components = create_text_encoders_and_tokenizers_from_ldm( + original_config, + checkpoint, + model_type=model_type, + local_files_only=local_files_only, + torch_dtype=torch_dtype, + ) + return text_encoder_components + + if component_name == "safety_checker": + if load_safety_checker: + from ..pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + safety_checker = StableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only, torch_dtype=torch_dtype + ) + else: + safety_checker = None + return {"safety_checker": safety_checker} + + if component_name == "feature_extractor": + if load_safety_checker: + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + else: + feature_extractor = None + return {"feature_extractor": feature_extractor} + + return + + +def set_additional_components( + pipeline_class_name, + original_config, + checkpoint=None, + model_type=None, +): + components = {} + if pipeline_class_name in REFINER_PIPELINES: + model_type = infer_model_type(original_config, checkpoint=checkpoint, model_type=model_type) + is_refiner = model_type == "SDXL-Refiner" + components.update( + { + "requires_aesthetics_score": is_refiner, + "force_zeros_for_empty_prompt": False if is_refiner else True, + } + ) + + return components + + +class FromSingleFileMixin: + """ + Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`]. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path, **kwargs): + r""" + Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors` + format. The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + - A link to the `.ckpt` file (for example + `"https://huggingface.co//blob/main/.ckpt"`) on the Hub. + - A path to a *file* containing all pipeline weights. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + original_config_file (`str`, *optional*): + The path to the original config file that was used to train the model. If not provided, the config file + will be inferred from the checkpoint file. + model_type (`str`, *optional*): + The type of model to load. If not provided, the model type will be inferred from the checkpoint file. + image_size (`int`, *optional*): + The size of the image output. It's used to configure the `sample_size` parameter of the UNet and VAE model. + load_safety_checker (`bool`, *optional*, defaults to `False`): + Whether to load the safety checker model or not. By default, the safety checker is not loaded unless a `safety_checker` component is passed to the `kwargs`. + num_in_channels (`int`, *optional*): + Specify the number of input channels for the UNet model. Read more about how to configure UNet model with this parameter + [here](https://huggingface.co/docs/diffusers/training/adapt_a_model#configure-unet2dconditionmodel-parameters). + scaling_factor (`float`, *optional*): + The scaling factor to use for the VAE model. If not provided, it is inferred from the config file first. + If the scaling factor is not found in the config file, the default value 0.18215 is used. + scheduler_type (`str`, *optional*): + The type of scheduler to load. If not provided, the scheduler type will be inferred from the checkpoint file. + prediction_type (`str`, *optional*): + The type of prediction to load. If not provided, the prediction type will be inferred from the checkpoint file. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + + Examples: + + ```py + >>> from diffusers import StableDiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> pipeline = StableDiffusionPipeline.from_single_file( + ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" + ... ) + + >>> # Download pipeline from local file + >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt + >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly") + + >>> # Enable float16 and move to GPU + >>> pipeline = StableDiffusionPipeline.from_single_file( + ... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", + ... torch_dtype=torch.float16, + ... ) + >>> pipeline.to("cuda") + ``` + """ + original_config_file = kwargs.pop("original_config_file", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + class_name = cls.__name__ + + original_config, checkpoint = fetch_ldm_config_and_checkpoint( + pretrained_model_link_or_path=pretrained_model_link_or_path, + class_name=class_name, + original_config_file=original_config_file, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + cache_dir=cache_dir, + ) + + from ..pipelines.pipeline_utils import _get_pipeline_class + + pipeline_class = _get_pipeline_class( + cls, + config=None, + cache_dir=cache_dir, + ) + + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + + model_type = kwargs.pop("model_type", None) + image_size = kwargs.pop("image_size", None) + load_safety_checker = (kwargs.pop("load_safety_checker", False)) or ( + passed_class_obj.get("safety_checker", None) is not None + ) + + init_kwargs = {} + for name in expected_modules: + if name in passed_class_obj: + init_kwargs[name] = passed_class_obj[name] + else: + components = build_sub_model_components( + init_kwargs, + class_name, + name, + original_config, + checkpoint, + model_type=model_type, + image_size=image_size, + load_safety_checker=load_safety_checker, + local_files_only=local_files_only, + torch_dtype=torch_dtype, + **kwargs, + ) + if not components: + continue + init_kwargs.update(components) + + additional_components = set_additional_components( + class_name, original_config, checkpoint=checkpoint, model_type=model_type + ) + if additional_components: + init_kwargs.update(additional_components) + + init_kwargs.update(passed_pipe_kwargs) + pipe = pipeline_class(**init_kwargs) + + if torch_dtype is not None: + pipe.to(dtype=torch_dtype) + + return pipe diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file_utils.py new file mode 100644 index 000000000..cdaa0802a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/single_file_utils.py @@ -0,0 +1,1617 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Conversion script for the Stable Diffusion checkpoints.""" + +import os +import re +from contextlib import nullcontext +from io import BytesIO +from urllib.parse import urlparse + +import requests +import yaml + +from ..models.modeling_utils import load_state_dict +from ..schedulers import ( + DDIMScheduler, + DDPMScheduler, + DPMSolverMultistepScheduler, + EDMDPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ..utils import is_accelerate_available, is_transformers_available, logging +from ..utils.hub_utils import _get_model_file + + +if is_transformers_available(): + from transformers import ( + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + ) + +if is_accelerate_available(): + from accelerate import init_empty_weights + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +CONFIG_URLS = { + "v1": "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml", + "v2": "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml", + "xl": "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml", + "xl_refiner": "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml", + "upscale": "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml", + "controlnet": "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml", +} + +CHECKPOINT_KEY_NAMES = { + "v2": "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", + "xl_base": "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias", + "xl_refiner": "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias", +} + +SCHEDULER_DEFAULT_CONFIG = { + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "interpolation_type": "linear", + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "sample_max_value": 1.0, + "set_alpha_to_one": False, + "skip_prk_steps": True, + "steps_offset": 1, + "timestep_spacing": "leading", +} + + +STABLE_CASCADE_DEFAULT_CONFIGS = { + "stage_c": {"pretrained_model_name_or_path": "diffusers/stable-cascade-configs", "subfolder": "prior"}, + "stage_c_lite": {"pretrained_model_name_or_path": "diffusers/stable-cascade-configs", "subfolder": "prior_lite"}, + "stage_b": {"pretrained_model_name_or_path": "diffusers/stable-cascade-configs", "subfolder": "decoder"}, + "stage_b_lite": {"pretrained_model_name_or_path": "diffusers/stable-cascade-configs", "subfolder": "decoder_lite"}, +} + + +def convert_stable_cascade_unet_single_file_to_diffusers(original_state_dict): + is_stage_c = "clip_txt_mapper.weight" in original_state_dict + + if is_stage_c: + state_dict = {} + for key in original_state_dict.keys(): + if key.endswith("in_proj_weight"): + weights = original_state_dict[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] + state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] + state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] + elif key.endswith("in_proj_bias"): + weights = original_state_dict[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] + state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] + state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] + elif key.endswith("out_proj.weight"): + weights = original_state_dict[key] + state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights + elif key.endswith("out_proj.bias"): + weights = original_state_dict[key] + state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights + else: + state_dict[key] = original_state_dict[key] + else: + state_dict = {} + for key in original_state_dict.keys(): + if key.endswith("in_proj_weight"): + weights = original_state_dict[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] + state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] + state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] + elif key.endswith("in_proj_bias"): + weights = original_state_dict[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] + state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] + state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] + elif key.endswith("out_proj.weight"): + weights = original_state_dict[key] + state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights + elif key.endswith("out_proj.bias"): + weights = original_state_dict[key] + state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights + # rename clip_mapper to clip_txt_pooled_mapper + elif key.endswith("clip_mapper.weight"): + weights = original_state_dict[key] + state_dict[key.replace("clip_mapper.weight", "clip_txt_pooled_mapper.weight")] = weights + elif key.endswith("clip_mapper.bias"): + weights = original_state_dict[key] + state_dict[key.replace("clip_mapper.bias", "clip_txt_pooled_mapper.bias")] = weights + else: + state_dict[key] = original_state_dict[key] + + return state_dict + + +def infer_stable_cascade_single_file_config(checkpoint): + is_stage_c = "clip_txt_mapper.weight" in checkpoint + is_stage_b = "down_blocks.1.0.channelwise.0.weight" in checkpoint + + if is_stage_c and (checkpoint["clip_txt_mapper.weight"].shape[0] == 1536): + config_type = "stage_c_lite" + elif is_stage_c and (checkpoint["clip_txt_mapper.weight"].shape[0] == 2048): + config_type = "stage_c" + elif is_stage_b and checkpoint["down_blocks.1.0.channelwise.0.weight"].shape[-1] == 576: + config_type = "stage_b_lite" + elif is_stage_b and checkpoint["down_blocks.1.0.channelwise.0.weight"].shape[-1] == 640: + config_type = "stage_b" + + return STABLE_CASCADE_DEFAULT_CONFIGS[config_type] + + +DIFFUSERS_TO_LDM_MAPPING = { + "unet": { + "layers": { + "time_embedding.linear_1.weight": "time_embed.0.weight", + "time_embedding.linear_1.bias": "time_embed.0.bias", + "time_embedding.linear_2.weight": "time_embed.2.weight", + "time_embedding.linear_2.bias": "time_embed.2.bias", + "conv_in.weight": "input_blocks.0.0.weight", + "conv_in.bias": "input_blocks.0.0.bias", + "conv_norm_out.weight": "out.0.weight", + "conv_norm_out.bias": "out.0.bias", + "conv_out.weight": "out.2.weight", + "conv_out.bias": "out.2.bias", + }, + "class_embed_type": { + "class_embedding.linear_1.weight": "label_emb.0.0.weight", + "class_embedding.linear_1.bias": "label_emb.0.0.bias", + "class_embedding.linear_2.weight": "label_emb.0.2.weight", + "class_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + "addition_embed_type": { + "add_embedding.linear_1.weight": "label_emb.0.0.weight", + "add_embedding.linear_1.bias": "label_emb.0.0.bias", + "add_embedding.linear_2.weight": "label_emb.0.2.weight", + "add_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + }, + "controlnet": { + "layers": { + "time_embedding.linear_1.weight": "time_embed.0.weight", + "time_embedding.linear_1.bias": "time_embed.0.bias", + "time_embedding.linear_2.weight": "time_embed.2.weight", + "time_embedding.linear_2.bias": "time_embed.2.bias", + "conv_in.weight": "input_blocks.0.0.weight", + "conv_in.bias": "input_blocks.0.0.bias", + "controlnet_cond_embedding.conv_in.weight": "input_hint_block.0.weight", + "controlnet_cond_embedding.conv_in.bias": "input_hint_block.0.bias", + "controlnet_cond_embedding.conv_out.weight": "input_hint_block.14.weight", + "controlnet_cond_embedding.conv_out.bias": "input_hint_block.14.bias", + }, + "class_embed_type": { + "class_embedding.linear_1.weight": "label_emb.0.0.weight", + "class_embedding.linear_1.bias": "label_emb.0.0.bias", + "class_embedding.linear_2.weight": "label_emb.0.2.weight", + "class_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + "addition_embed_type": { + "add_embedding.linear_1.weight": "label_emb.0.0.weight", + "add_embedding.linear_1.bias": "label_emb.0.0.bias", + "add_embedding.linear_2.weight": "label_emb.0.2.weight", + "add_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + }, + "vae": { + "encoder.conv_in.weight": "encoder.conv_in.weight", + "encoder.conv_in.bias": "encoder.conv_in.bias", + "encoder.conv_out.weight": "encoder.conv_out.weight", + "encoder.conv_out.bias": "encoder.conv_out.bias", + "encoder.conv_norm_out.weight": "encoder.norm_out.weight", + "encoder.conv_norm_out.bias": "encoder.norm_out.bias", + "decoder.conv_in.weight": "decoder.conv_in.weight", + "decoder.conv_in.bias": "decoder.conv_in.bias", + "decoder.conv_out.weight": "decoder.conv_out.weight", + "decoder.conv_out.bias": "decoder.conv_out.bias", + "decoder.conv_norm_out.weight": "decoder.norm_out.weight", + "decoder.conv_norm_out.bias": "decoder.norm_out.bias", + "quant_conv.weight": "quant_conv.weight", + "quant_conv.bias": "quant_conv.bias", + "post_quant_conv.weight": "post_quant_conv.weight", + "post_quant_conv.bias": "post_quant_conv.bias", + }, + "openclip": { + "layers": { + "text_model.embeddings.position_embedding.weight": "positional_embedding", + "text_model.embeddings.token_embedding.weight": "token_embedding.weight", + "text_model.final_layer_norm.weight": "ln_final.weight", + "text_model.final_layer_norm.bias": "ln_final.bias", + "text_projection.weight": "text_projection", + }, + "transformer": { + "text_model.encoder.layers.": "resblocks.", + "layer_norm1": "ln_1", + "layer_norm2": "ln_2", + ".fc1.": ".c_fc.", + ".fc2.": ".c_proj.", + ".self_attn": ".attn", + "transformer.text_model.final_layer_norm.": "ln_final.", + "transformer.text_model.embeddings.token_embedding.weight": "token_embedding.weight", + "transformer.text_model.embeddings.position_embedding.weight": "positional_embedding", + }, + }, +} + +LDM_VAE_KEY = "first_stage_model." +LDM_VAE_DEFAULT_SCALING_FACTOR = 0.18215 +PLAYGROUND_VAE_SCALING_FACTOR = 0.5 +LDM_UNET_KEY = "model.diffusion_model." +LDM_CONTROLNET_KEY = "control_model." +LDM_CLIP_PREFIX_TO_REMOVE = ["cond_stage_model.transformer.", "conditioner.embedders.0.transformer."] +LDM_OPEN_CLIP_TEXT_PROJECTION_DIM = 1024 + +SD_2_TEXT_ENCODER_KEYS_TO_IGNORE = [ + "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_bias", + "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_weight", + "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.bias", + "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.weight", + "cond_stage_model.model.transformer.resblocks.23.ln_1.bias", + "cond_stage_model.model.transformer.resblocks.23.ln_1.weight", + "cond_stage_model.model.transformer.resblocks.23.ln_2.bias", + "cond_stage_model.model.transformer.resblocks.23.ln_2.weight", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.bias", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.weight", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.bias", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.weight", + "cond_stage_model.model.text_projection", +] + + +VALID_URL_PREFIXES = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"] + + +def _extract_repo_id_and_weights_name(pretrained_model_name_or_path): + pattern = r"([^/]+)/([^/]+)/(?:blob/main/)?(.+)" + weights_name = None + repo_id = (None,) + for prefix in VALID_URL_PREFIXES: + pretrained_model_name_or_path = pretrained_model_name_or_path.replace(prefix, "") + match = re.match(pattern, pretrained_model_name_or_path) + if not match: + return repo_id, weights_name + + repo_id = f"{match.group(1)}/{match.group(2)}" + weights_name = match.group(3) + + return repo_id, weights_name + + +def fetch_ldm_config_and_checkpoint( + pretrained_model_link_or_path, + class_name, + original_config_file=None, + resume_download=False, + force_download=False, + proxies=None, + token=None, + cache_dir=None, + local_files_only=None, + revision=None, +): + checkpoint = load_single_file_model_checkpoint( + pretrained_model_link_or_path, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + ) + original_config = fetch_original_config(class_name, checkpoint, original_config_file) + + return original_config, checkpoint + + +def load_single_file_model_checkpoint( + pretrained_model_link_or_path, + resume_download=False, + force_download=False, + proxies=None, + token=None, + cache_dir=None, + local_files_only=None, + revision=None, +): + if os.path.isfile(pretrained_model_link_or_path): + checkpoint = load_state_dict(pretrained_model_link_or_path) + else: + repo_id, weights_name = _extract_repo_id_and_weights_name(pretrained_model_link_or_path) + checkpoint_path = _get_model_file( + repo_id, + weights_name=weights_name, + force_download=force_download, + cache_dir=cache_dir, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + ) + checkpoint = load_state_dict(checkpoint_path) + + # some checkpoints contain the model state dict under a "state_dict" key + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + return checkpoint + + +def infer_original_config_file(class_name, checkpoint): + if CHECKPOINT_KEY_NAMES["v2"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["v2"]].shape[-1] == 1024: + config_url = CONFIG_URLS["v2"] + + elif CHECKPOINT_KEY_NAMES["xl_base"] in checkpoint: + config_url = CONFIG_URLS["xl"] + + elif CHECKPOINT_KEY_NAMES["xl_refiner"] in checkpoint: + config_url = CONFIG_URLS["xl_refiner"] + + elif class_name == "StableDiffusionUpscalePipeline": + config_url = CONFIG_URLS["upscale"] + + elif class_name == "ControlNetModel": + config_url = CONFIG_URLS["controlnet"] + + else: + config_url = CONFIG_URLS["v1"] + + original_config_file = BytesIO(requests.get(config_url).content) + + return original_config_file + + +def fetch_original_config(pipeline_class_name, checkpoint, original_config_file=None): + def is_valid_url(url): + result = urlparse(url) + if result.scheme and result.netloc: + return True + + return False + + if original_config_file is None: + original_config_file = infer_original_config_file(pipeline_class_name, checkpoint) + + elif os.path.isfile(original_config_file): + with open(original_config_file, "r") as fp: + original_config_file = fp.read() + + elif is_valid_url(original_config_file): + original_config_file = BytesIO(requests.get(original_config_file).content) + + else: + raise ValueError("Invalid `original_config_file` provided. Please set it to a valid file path or URL.") + + original_config = yaml.safe_load(original_config_file) + + return original_config + + +def infer_model_type(original_config, checkpoint, model_type=None): + if model_type is not None: + return model_type + + has_cond_stage_config = ( + "cond_stage_config" in original_config["model"]["params"] + and original_config["model"]["params"]["cond_stage_config"] is not None + ) + has_network_config = ( + "network_config" in original_config["model"]["params"] + and original_config["model"]["params"]["network_config"] is not None + ) + + if has_cond_stage_config: + model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1] + + elif has_network_config: + context_dim = original_config["model"]["params"]["network_config"]["params"]["context_dim"] + if "edm_mean" in checkpoint and "edm_std" in checkpoint: + model_type = "Playground" + elif context_dim == 2048: + model_type = "SDXL" + else: + model_type = "SDXL-Refiner" + else: + raise ValueError("Unable to infer model type from config") + + logger.debug(f"No `model_type` given, `model_type` inferred as: {model_type}") + + return model_type + + +def get_default_scheduler_config(): + return SCHEDULER_DEFAULT_CONFIG + + +def set_image_size(pipeline_class_name, original_config, checkpoint, image_size=None, model_type=None): + if image_size: + return image_size + + global_step = checkpoint["global_step"] if "global_step" in checkpoint else None + model_type = infer_model_type(original_config, checkpoint, model_type) + + if pipeline_class_name == "StableDiffusionUpscalePipeline": + image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"] + return image_size + + elif model_type in ["SDXL", "SDXL-Refiner", "Playground"]: + image_size = 1024 + return image_size + + elif ( + "parameterization" in original_config["model"]["params"] + and original_config["model"]["params"]["parameterization"] == "v" + ): + # NOTE: For stable diffusion 2 base one has to pass `image_size==512` + # as it relies on a brittle global step parameter here + image_size = 512 if global_step == 875000 else 768 + return image_size + + else: + image_size = 512 + return image_size + + +# Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear +def conv_attn_to_linear(checkpoint): + keys = list(checkpoint.keys()) + attn_keys = ["query.weight", "key.weight", "value.weight"] + for key in keys: + if ".".join(key.split(".")[-2:]) in attn_keys: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0, 0] + elif "proj_attn.weight" in key: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0] + + +def create_unet_diffusers_config(original_config, image_size: int): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + if ( + "unet_config" in original_config["model"]["params"] + and original_config["model"]["params"]["unet_config"] is not None + ): + unet_params = original_config["model"]["params"]["unet_config"]["params"] + else: + unet_params = original_config["model"]["params"]["network_config"]["params"] + + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] + + down_block_types = [] + resolution = 1 + for i in range(len(block_out_channels)): + block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" + down_block_types.append(block_type) + if i != len(block_out_channels) - 1: + resolution *= 2 + + up_block_types = [] + for i in range(len(block_out_channels)): + block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" + up_block_types.append(block_type) + resolution //= 2 + + if unet_params["transformer_depth"] is not None: + transformer_layers_per_block = ( + unet_params["transformer_depth"] + if isinstance(unet_params["transformer_depth"], int) + else list(unet_params["transformer_depth"]) + ) + else: + transformer_layers_per_block = 1 + + vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) + + head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None + use_linear_projection = ( + unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False + ) + if use_linear_projection: + # stable diffusion 2-base-512 and 2-768 + if head_dim is None: + head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] + head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] + + class_embed_type = None + addition_embed_type = None + addition_time_embed_dim = None + projection_class_embeddings_input_dim = None + context_dim = None + + if unet_params["context_dim"] is not None: + context_dim = ( + unet_params["context_dim"] + if isinstance(unet_params["context_dim"], int) + else unet_params["context_dim"][0] + ) + + if "num_classes" in unet_params: + if unet_params["num_classes"] == "sequential": + if context_dim in [2048, 1280]: + # SDXL + addition_embed_type = "text_time" + addition_time_embed_dim = 256 + else: + class_embed_type = "projection" + assert "adm_in_channels" in unet_params + projection_class_embeddings_input_dim = unet_params["adm_in_channels"] + + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": unet_params["in_channels"], + "down_block_types": down_block_types, + "block_out_channels": block_out_channels, + "layers_per_block": unet_params["num_res_blocks"], + "cross_attention_dim": context_dim, + "attention_head_dim": head_dim, + "use_linear_projection": use_linear_projection, + "class_embed_type": class_embed_type, + "addition_embed_type": addition_embed_type, + "addition_time_embed_dim": addition_time_embed_dim, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + "transformer_layers_per_block": transformer_layers_per_block, + } + + if "disable_self_attentions" in unet_params: + config["only_cross_attention"] = unet_params["disable_self_attentions"] + + if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): + config["num_class_embeds"] = unet_params["num_classes"] + + config["out_channels"] = unet_params["out_channels"] + config["up_block_types"] = up_block_types + + return config + + +def create_controlnet_diffusers_config(original_config, image_size: int): + unet_params = original_config["model"]["params"]["control_stage_config"]["params"] + diffusers_unet_config = create_unet_diffusers_config(original_config, image_size=image_size) + + controlnet_config = { + "conditioning_channels": unet_params["hint_channels"], + "in_channels": diffusers_unet_config["in_channels"], + "down_block_types": diffusers_unet_config["down_block_types"], + "block_out_channels": diffusers_unet_config["block_out_channels"], + "layers_per_block": diffusers_unet_config["layers_per_block"], + "cross_attention_dim": diffusers_unet_config["cross_attention_dim"], + "attention_head_dim": diffusers_unet_config["attention_head_dim"], + "use_linear_projection": diffusers_unet_config["use_linear_projection"], + "class_embed_type": diffusers_unet_config["class_embed_type"], + "addition_embed_type": diffusers_unet_config["addition_embed_type"], + "addition_time_embed_dim": diffusers_unet_config["addition_time_embed_dim"], + "projection_class_embeddings_input_dim": diffusers_unet_config["projection_class_embeddings_input_dim"], + "transformer_layers_per_block": diffusers_unet_config["transformer_layers_per_block"], + } + + return controlnet_config + + +def create_vae_diffusers_config(original_config, image_size, scaling_factor=None, latents_mean=None, latents_std=None): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + if (scaling_factor is None) and (latents_mean is not None) and (latents_std is not None): + scaling_factor = PLAYGROUND_VAE_SCALING_FACTOR + elif (scaling_factor is None) and ("scale_factor" in original_config["model"]["params"]): + scaling_factor = original_config["model"]["params"]["scale_factor"] + elif scaling_factor is None: + scaling_factor = LDM_VAE_DEFAULT_SCALING_FACTOR + + block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] + down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) + up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) + + config = { + "sample_size": image_size, + "in_channels": vae_params["in_channels"], + "out_channels": vae_params["out_ch"], + "down_block_types": down_block_types, + "up_block_types": up_block_types, + "block_out_channels": block_out_channels, + "latent_channels": vae_params["z_channels"], + "layers_per_block": vae_params["num_res_blocks"], + "scaling_factor": scaling_factor, + } + if latents_mean is not None and latents_std is not None: + config.update({"latents_mean": latents_mean, "latents_std": latents_std}) + + return config + + +def update_unet_resnet_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping=None): + for ldm_key in ldm_keys: + diffusers_key = ( + ldm_key.replace("in_layers.0", "norm1") + .replace("in_layers.2", "conv1") + .replace("out_layers.0", "norm2") + .replace("out_layers.3", "conv2") + .replace("emb_layers.1", "time_emb_proj") + .replace("skip_connection", "conv_shortcut") + ) + if mapping: + diffusers_key = diffusers_key.replace(mapping["old"], mapping["new"]) + new_checkpoint[diffusers_key] = checkpoint.pop(ldm_key) + + +def update_unet_attention_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping): + for ldm_key in ldm_keys: + diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]) + new_checkpoint[diffusers_key] = checkpoint.pop(ldm_key) + + +def convert_ldm_unet_checkpoint(checkpoint, config, extract_ema=False): + """ + Takes a state dict and a config, and returns a converted checkpoint. + """ + # extract state_dict for UNet + unet_state_dict = {} + keys = list(checkpoint.keys()) + unet_key = LDM_UNET_KEY + + # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA + if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: + logger.warning("Checkpoint has both EMA and non-EMA weights.") + logger.warning( + "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" + " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." + ) + for key in keys: + if key.startswith("model.diffusion_model"): + flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + else: + if sum(k.startswith("model_ema") for k in keys) > 100: + logger.warning( + "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" + " weights (usually better for inference), please make sure to add the `--extract_ema` flag." + ) + for key in keys: + if key.startswith(unet_key): + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) + + new_checkpoint = {} + ldm_unet_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["layers"] + for diffusers_key, ldm_key in ldm_unet_keys.items(): + if ldm_key not in unet_state_dict: + continue + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + if ("class_embed_type" in config) and (config["class_embed_type"] in ["timestep", "projection"]): + class_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["class_embed_type"] + for diffusers_key, ldm_key in class_embed_keys.items(): + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + if ("addition_embed_type" in config) and (config["addition_embed_type"] == "text_time"): + addition_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["addition_embed_type"] + for diffusers_key, ldm_key in addition_embed_keys.items(): + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + # Relevant to StableDiffusionUpscalePipeline + if "num_class_embeds" in config: + if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): + new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] + + # Retrieves the keys for the input blocks only + num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) + input_blocks = { + layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) + middle_blocks = { + layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + + # Retrieves the keys for the output blocks only + num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) + output_blocks = { + layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] + for layer_id in range(num_output_blocks) + } + + # Down blocks + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + unet_state_dict, + {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + if f"input_blocks.{i}.0.op.weight" in unet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.bias" + ) + + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + unet_state_dict, + {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + # Mid blocks + resnet_0 = middle_blocks[0] + attentions = middle_blocks[1] + resnet_1 = middle_blocks[2] + + update_unet_resnet_ldm_to_diffusers( + resnet_0, new_checkpoint, unet_state_dict, mapping={"old": "middle_block.0", "new": "mid_block.resnets.0"} + ) + update_unet_resnet_ldm_to_diffusers( + resnet_1, new_checkpoint, unet_state_dict, mapping={"old": "middle_block.2", "new": "mid_block.resnets.1"} + ) + update_unet_attention_ldm_to_diffusers( + attentions, new_checkpoint, unet_state_dict, mapping={"old": "middle_block.1", "new": "mid_block.attentions.0"} + ) + + # Up Blocks + for i in range(num_output_blocks): + block_id = i // (config["layers_per_block"] + 1) + layer_in_block_id = i % (config["layers_per_block"] + 1) + + resnets = [ + key for key in output_blocks[i] if f"output_blocks.{i}.0" in key and f"output_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + unet_state_dict, + {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + attentions = [ + key for key in output_blocks[i] if f"output_blocks.{i}.1" in key and f"output_blocks.{i}.1.conv" not in key + ] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + unet_state_dict, + {"old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + if f"output_blocks.{i}.1.conv.weight" in unet_state_dict: + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.1.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.1.conv.bias" + ] + if f"output_blocks.{i}.2.conv.weight" in unet_state_dict: + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.2.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.2.conv.bias" + ] + + return new_checkpoint + + +def convert_controlnet_checkpoint( + checkpoint, + config, +): + # Some controlnet ckpt files are distributed independently from the rest of the + # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ + if "time_embed.0.weight" in checkpoint: + controlnet_state_dict = checkpoint + + else: + controlnet_state_dict = {} + keys = list(checkpoint.keys()) + controlnet_key = LDM_CONTROLNET_KEY + for key in keys: + if key.startswith(controlnet_key): + controlnet_state_dict[key.replace(controlnet_key, "")] = checkpoint.pop(key) + + new_checkpoint = {} + ldm_controlnet_keys = DIFFUSERS_TO_LDM_MAPPING["controlnet"]["layers"] + for diffusers_key, ldm_key in ldm_controlnet_keys.items(): + if ldm_key not in controlnet_state_dict: + continue + new_checkpoint[diffusers_key] = controlnet_state_dict[ldm_key] + + # Retrieves the keys for the input blocks only + num_input_blocks = len( + {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "input_blocks" in layer} + ) + input_blocks = { + layer_id: [key for key in controlnet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Down blocks + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + controlnet_state_dict, + {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + if f"input_blocks.{i}.0.op.weight" in controlnet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = controlnet_state_dict.pop( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = controlnet_state_dict.pop( + f"input_blocks.{i}.0.op.bias" + ) + + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + controlnet_state_dict, + {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + # controlnet down blocks + for i in range(num_input_blocks): + new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = controlnet_state_dict.pop(f"zero_convs.{i}.0.weight") + new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = controlnet_state_dict.pop(f"zero_convs.{i}.0.bias") + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len( + {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "middle_block" in layer} + ) + middle_blocks = { + layer_id: [key for key in controlnet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + if middle_blocks: + resnet_0 = middle_blocks[0] + attentions = middle_blocks[1] + resnet_1 = middle_blocks[2] + + update_unet_resnet_ldm_to_diffusers( + resnet_0, + new_checkpoint, + controlnet_state_dict, + mapping={"old": "middle_block.0", "new": "mid_block.resnets.0"}, + ) + update_unet_resnet_ldm_to_diffusers( + resnet_1, + new_checkpoint, + controlnet_state_dict, + mapping={"old": "middle_block.2", "new": "mid_block.resnets.1"}, + ) + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + controlnet_state_dict, + mapping={"old": "middle_block.1", "new": "mid_block.attentions.0"}, + ) + + # mid block + new_checkpoint["controlnet_mid_block.weight"] = controlnet_state_dict.pop("middle_block_out.0.weight") + new_checkpoint["controlnet_mid_block.bias"] = controlnet_state_dict.pop("middle_block_out.0.bias") + + # controlnet cond embedding blocks + cond_embedding_blocks = { + ".".join(layer.split(".")[:2]) + for layer in controlnet_state_dict + if "input_hint_block" in layer and ("input_hint_block.0" not in layer) and ("input_hint_block.14" not in layer) + } + num_cond_embedding_blocks = len(cond_embedding_blocks) + + for idx in range(1, num_cond_embedding_blocks + 1): + diffusers_idx = idx - 1 + cond_block_id = 2 * idx + + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.weight"] = controlnet_state_dict.pop( + f"input_hint_block.{cond_block_id}.weight" + ) + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.bias"] = controlnet_state_dict.pop( + f"input_hint_block.{cond_block_id}.bias" + ) + + return new_checkpoint + + +def create_diffusers_controlnet_model_from_ldm( + pipeline_class_name, original_config, checkpoint, upcast_attention=False, image_size=None, torch_dtype=None +): + # import here to avoid circular imports + from ..models import ControlNetModel + + image_size = set_image_size(pipeline_class_name, original_config, checkpoint, image_size=image_size) + + diffusers_config = create_controlnet_diffusers_config(original_config, image_size=image_size) + diffusers_config["upcast_attention"] = upcast_attention + + diffusers_format_controlnet_checkpoint = convert_controlnet_checkpoint(checkpoint, diffusers_config) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + controlnet = ControlNetModel(**diffusers_config) + + if is_accelerate_available(): + from ..models.modeling_utils import load_model_dict_into_meta + + unexpected_keys = load_model_dict_into_meta( + controlnet, diffusers_format_controlnet_checkpoint, dtype=torch_dtype + ) + if controlnet._keys_to_ignore_on_load_unexpected is not None: + for pat in controlnet._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {controlnet.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + else: + controlnet.load_state_dict(diffusers_format_controlnet_checkpoint) + + if torch_dtype is not None: + controlnet = controlnet.to(torch_dtype) + + return {"controlnet": controlnet} + + +def update_vae_resnet_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): + for ldm_key in keys: + diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]).replace("nin_shortcut", "conv_shortcut") + new_checkpoint[diffusers_key] = checkpoint.pop(ldm_key) + + +def update_vae_attentions_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): + for ldm_key in keys: + diffusers_key = ( + ldm_key.replace(mapping["old"], mapping["new"]) + .replace("norm.weight", "group_norm.weight") + .replace("norm.bias", "group_norm.bias") + .replace("q.weight", "to_q.weight") + .replace("q.bias", "to_q.bias") + .replace("k.weight", "to_k.weight") + .replace("k.bias", "to_k.bias") + .replace("v.weight", "to_v.weight") + .replace("v.bias", "to_v.bias") + .replace("proj_out.weight", "to_out.0.weight") + .replace("proj_out.bias", "to_out.0.bias") + ) + new_checkpoint[diffusers_key] = checkpoint.pop(ldm_key) + + # proj_attn.weight has to be converted from conv 1D to linear + shape = new_checkpoint[diffusers_key].shape + + if len(shape) == 3: + new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0] + elif len(shape) == 4: + new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0, 0] + + +def convert_ldm_vae_checkpoint(checkpoint, config): + # extract state dict for VAE + # remove the LDM_VAE_KEY prefix from the ldm checkpoint keys so that it is easier to map them to diffusers keys + vae_state_dict = {} + keys = list(checkpoint.keys()) + vae_key = LDM_VAE_KEY if any(k.startswith(LDM_VAE_KEY) for k in keys) else "" + for key in keys: + if key.startswith(vae_key): + vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + vae_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["vae"] + for diffusers_key, ldm_key in vae_diffusers_ldm_map.items(): + if ldm_key not in vae_state_dict: + continue + new_checkpoint[diffusers_key] = vae_state_dict[ldm_key] + + # Retrieves the keys for the encoder down blocks only + num_down_blocks = len(config["down_block_types"]) + down_blocks = { + layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) + } + + for i in range(num_down_blocks): + resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}, + ) + if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.weight" + ) + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.bias" + ) + + mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, + ) + + mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] + update_vae_attentions_ldm_to_diffusers( + mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} + ) + + # Retrieves the keys for the decoder up blocks only + num_up_blocks = len(config["up_block_types"]) + up_blocks = { + layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) + } + + for i in range(num_up_blocks): + block_id = num_up_blocks - 1 - i + resnets = [ + key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key + ] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}, + ) + if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.weight" + ] + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.bias" + ] + + mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, + ) + + mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] + update_vae_attentions_ldm_to_diffusers( + mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} + ) + conv_attn_to_linear(new_checkpoint) + + return new_checkpoint + + +def create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_files_only=False, torch_dtype=None): + try: + config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModel(config) + + keys = list(checkpoint.keys()) + text_model_dict = {} + + remove_prefixes = LDM_CLIP_PREFIX_TO_REMOVE + + for key in keys: + for prefix in remove_prefixes: + if key.startswith(prefix): + diffusers_key = key.replace(prefix, "") + text_model_dict[diffusers_key] = checkpoint[key] + + if is_accelerate_available(): + from ..models.modeling_utils import load_model_dict_into_meta + + unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict, dtype=torch_dtype) + if text_model._keys_to_ignore_on_load_unexpected is not None: + for pat in text_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {text_model.__class__.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + if torch_dtype is not None: + text_model = text_model.to(torch_dtype) + + return text_model + + +def create_text_encoder_from_open_clip_checkpoint( + config_name, + checkpoint, + prefix="cond_stage_model.model.", + has_projection=False, + local_files_only=False, + torch_dtype=None, + **config_kwargs, +): + try: + config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) + + text_model_dict = {} + text_proj_key = prefix + "text_projection" + text_proj_dim = ( + int(checkpoint[text_proj_key].shape[0]) if text_proj_key in checkpoint else LDM_OPEN_CLIP_TEXT_PROJECTION_DIM + ) + text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") + + keys = list(checkpoint.keys()) + keys_to_ignore = SD_2_TEXT_ENCODER_KEYS_TO_IGNORE + + openclip_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["layers"] + for diffusers_key, ldm_key in openclip_diffusers_ldm_map.items(): + ldm_key = prefix + ldm_key + if ldm_key not in checkpoint: + continue + if ldm_key in keys_to_ignore: + continue + if ldm_key.endswith("text_projection"): + text_model_dict[diffusers_key] = checkpoint[ldm_key].T.contiguous() + else: + text_model_dict[diffusers_key] = checkpoint[ldm_key] + + for key in keys: + if key in keys_to_ignore: + continue + + if not key.startswith(prefix + "transformer."): + continue + + diffusers_key = key.replace(prefix + "transformer.", "") + transformer_diffusers_to_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["transformer"] + for new_key, old_key in transformer_diffusers_to_ldm_map.items(): + diffusers_key = ( + diffusers_key.replace(old_key, new_key).replace(".in_proj_weight", "").replace(".in_proj_bias", "") + ) + + if key.endswith(".in_proj_weight"): + weight_value = checkpoint[key] + + text_model_dict[diffusers_key + ".q_proj.weight"] = weight_value[:text_proj_dim, :] + text_model_dict[diffusers_key + ".k_proj.weight"] = weight_value[text_proj_dim : text_proj_dim * 2, :] + text_model_dict[diffusers_key + ".v_proj.weight"] = weight_value[text_proj_dim * 2 :, :] + + elif key.endswith(".in_proj_bias"): + weight_value = checkpoint[key] + text_model_dict[diffusers_key + ".q_proj.bias"] = weight_value[:text_proj_dim] + text_model_dict[diffusers_key + ".k_proj.bias"] = weight_value[text_proj_dim : text_proj_dim * 2] + text_model_dict[diffusers_key + ".v_proj.bias"] = weight_value[text_proj_dim * 2 :] + else: + text_model_dict[diffusers_key] = checkpoint[key] + + if is_accelerate_available(): + from ..models.modeling_utils import load_model_dict_into_meta + + unexpected_keys = load_model_dict_into_meta(text_model, text_model_dict, dtype=torch_dtype) + if text_model._keys_to_ignore_on_load_unexpected is not None: + for pat in text_model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {text_model.__class__.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + if torch_dtype is not None: + text_model = text_model.to(torch_dtype) + + return text_model + + +def create_diffusers_unet_model_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + num_in_channels=None, + upcast_attention=None, + extract_ema=False, + image_size=None, + torch_dtype=None, + model_type=None, +): + from ..models import UNet2DConditionModel + + if num_in_channels is None: + if pipeline_class_name in [ + "StableDiffusionInpaintPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + ]: + num_in_channels = 9 + + elif pipeline_class_name == "StableDiffusionUpscalePipeline": + num_in_channels = 7 + + else: + num_in_channels = 4 + + image_size = set_image_size( + pipeline_class_name, original_config, checkpoint, image_size=image_size, model_type=model_type + ) + unet_config = create_unet_diffusers_config(original_config, image_size=image_size) + unet_config["in_channels"] = num_in_channels + if upcast_attention is not None: + unet_config["upcast_attention"] = upcast_attention + + diffusers_format_unet_checkpoint = convert_ldm_unet_checkpoint(checkpoint, unet_config, extract_ema=extract_ema) + ctx = init_empty_weights if is_accelerate_available() else nullcontext + + with ctx(): + unet = UNet2DConditionModel(**unet_config) + + if is_accelerate_available(): + from ..models.modeling_utils import load_model_dict_into_meta + + unexpected_keys = load_model_dict_into_meta(unet, diffusers_format_unet_checkpoint, dtype=torch_dtype) + if unet._keys_to_ignore_on_load_unexpected is not None: + for pat in unet._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {unet.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + else: + unet.load_state_dict(diffusers_format_unet_checkpoint) + + if torch_dtype is not None: + unet = unet.to(torch_dtype) + + return {"unet": unet} + + +def create_diffusers_vae_model_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + image_size=None, + scaling_factor=None, + torch_dtype=None, + model_type=None, +): + # import here to avoid circular imports + from ..models import AutoencoderKL + + image_size = set_image_size( + pipeline_class_name, original_config, checkpoint, image_size=image_size, model_type=model_type + ) + model_type = infer_model_type(original_config, checkpoint, model_type) + + if model_type == "Playground": + edm_mean = ( + checkpoint["edm_mean"].to(dtype=torch_dtype).tolist() if torch_dtype else checkpoint["edm_mean"].tolist() + ) + edm_std = ( + checkpoint["edm_std"].to(dtype=torch_dtype).tolist() if torch_dtype else checkpoint["edm_std"].tolist() + ) + else: + edm_mean = None + edm_std = None + + vae_config = create_vae_diffusers_config( + original_config, + image_size=image_size, + scaling_factor=scaling_factor, + latents_mean=edm_mean, + latents_std=edm_std, + ) + diffusers_format_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) + ctx = init_empty_weights if is_accelerate_available() else nullcontext + + with ctx(): + vae = AutoencoderKL(**vae_config) + + if is_accelerate_available(): + from ..models.modeling_utils import load_model_dict_into_meta + + unexpected_keys = load_model_dict_into_meta(vae, diffusers_format_vae_checkpoint, dtype=torch_dtype) + if vae._keys_to_ignore_on_load_unexpected is not None: + for pat in vae._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {vae.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + else: + vae.load_state_dict(diffusers_format_vae_checkpoint) + + if torch_dtype is not None: + vae = vae.to(torch_dtype) + + return {"vae": vae} + + +def create_text_encoders_and_tokenizers_from_ldm( + original_config, + checkpoint, + model_type=None, + local_files_only=False, + torch_dtype=None, +): + model_type = infer_model_type(original_config, checkpoint=checkpoint, model_type=model_type) + + if model_type == "FrozenOpenCLIPEmbedder": + config_name = "stabilityai/stable-diffusion-2" + config_kwargs = {"subfolder": "text_encoder"} + + try: + text_encoder = create_text_encoder_from_open_clip_checkpoint( + config_name, checkpoint, local_files_only=local_files_only, torch_dtype=torch_dtype, **config_kwargs + ) + tokenizer = CLIPTokenizer.from_pretrained( + config_name, subfolder="tokenizer", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the text_encoder in the following path: '{config_name}'." + ) + else: + return {"text_encoder": text_encoder, "tokenizer": tokenizer} + + elif model_type == "FrozenCLIPEmbedder": + try: + config_name = "openai/clip-vit-large-patch14" + text_encoder = create_text_encoder_from_ldm_clip_checkpoint( + config_name, + checkpoint, + local_files_only=local_files_only, + torch_dtype=torch_dtype, + ) + tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only) + + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: '{config_name}'." + ) + else: + return {"text_encoder": text_encoder, "tokenizer": tokenizer} + + elif model_type == "SDXL-Refiner": + config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config_kwargs = {"projection_dim": 1280} + prefix = "conditioner.embedders.0.model." + + try: + tokenizer_2 = CLIPTokenizer.from_pretrained(config_name, pad_token="!", local_files_only=local_files_only) + text_encoder_2 = create_text_encoder_from_open_clip_checkpoint( + config_name, + checkpoint, + prefix=prefix, + has_projection=True, + local_files_only=local_files_only, + torch_dtype=torch_dtype, + **config_kwargs, + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the text_encoder_2 and tokenizer_2 in the following path: {config_name} with `pad_token` set to '!'." + ) + + else: + return { + "text_encoder": None, + "tokenizer": None, + "tokenizer_2": tokenizer_2, + "text_encoder_2": text_encoder_2, + } + + elif model_type in ["SDXL", "Playground"]: + try: + config_name = "openai/clip-vit-large-patch14" + tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only) + text_encoder = create_text_encoder_from_ldm_clip_checkpoint( + config_name, checkpoint, local_files_only=local_files_only, torch_dtype=torch_dtype + ) + + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the text_encoder and tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + + try: + config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config_kwargs = {"projection_dim": 1280} + prefix = "conditioner.embedders.1.model." + tokenizer_2 = CLIPTokenizer.from_pretrained(config_name, pad_token="!", local_files_only=local_files_only) + text_encoder_2 = create_text_encoder_from_open_clip_checkpoint( + config_name, + checkpoint, + prefix=prefix, + has_projection=True, + local_files_only=local_files_only, + torch_dtype=torch_dtype, + **config_kwargs, + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the text_encoder_2 and tokenizer_2 in the following path: {config_name} with `pad_token` set to '!'." + ) + + return { + "tokenizer": tokenizer, + "text_encoder": text_encoder, + "tokenizer_2": tokenizer_2, + "text_encoder_2": text_encoder_2, + } + + return + + +def create_scheduler_from_ldm( + pipeline_class_name, + original_config, + checkpoint, + prediction_type=None, + scheduler_type="ddim", + model_type=None, +): + scheduler_config = get_default_scheduler_config() + model_type = infer_model_type(original_config, checkpoint=checkpoint, model_type=model_type) + + global_step = checkpoint["global_step"] if "global_step" in checkpoint else None + + num_train_timesteps = getattr(original_config["model"]["params"], "timesteps", None) or 1000 + scheduler_config["num_train_timesteps"] = num_train_timesteps + + if ( + "parameterization" in original_config["model"]["params"] + and original_config["model"]["params"]["parameterization"] == "v" + ): + if prediction_type is None: + # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` + # as it relies on a brittle global step parameter here + prediction_type = "epsilon" if global_step == 875000 else "v_prediction" + + else: + prediction_type = prediction_type or "epsilon" + + scheduler_config["prediction_type"] = prediction_type + + if model_type in ["SDXL", "SDXL-Refiner"]: + scheduler_type = "euler" + elif model_type == "Playground": + scheduler_type = "edm_dpm_solver_multistep" + else: + beta_start = original_config["model"]["params"].get("linear_start", 0.02) + beta_end = original_config["model"]["params"].get("linear_end", 0.085) + scheduler_config["beta_start"] = beta_start + scheduler_config["beta_end"] = beta_end + scheduler_config["beta_schedule"] = "scaled_linear" + scheduler_config["clip_sample"] = False + scheduler_config["set_alpha_to_one"] = False + + if scheduler_type == "pndm": + scheduler_config["skip_prk_steps"] = True + scheduler = PNDMScheduler.from_config(scheduler_config) + + elif scheduler_type == "lms": + scheduler = LMSDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "heun": + scheduler = HeunDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "euler": + scheduler = EulerDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "euler-ancestral": + scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "dpm": + scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) + + elif scheduler_type == "ddim": + scheduler = DDIMScheduler.from_config(scheduler_config) + + elif scheduler_type == "edm_dpm_solver_multistep": + scheduler_config = { + "algorithm_type": "dpmsolver++", + "dynamic_thresholding_ratio": 0.995, + "euler_at_final": False, + "final_sigmas_type": "zero", + "lower_order_final": True, + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "rho": 7.0, + "sample_max_value": 1.0, + "sigma_data": 0.5, + "sigma_max": 80.0, + "sigma_min": 0.002, + "solver_order": 2, + "solver_type": "midpoint", + "thresholding": False, + } + scheduler = EDMDPMSolverMultistepScheduler(**scheduler_config) + + else: + raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") + + if pipeline_class_name == "StableDiffusionUpscalePipeline": + scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler") + low_res_scheduler = DDPMScheduler.from_pretrained( + "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler" + ) + + return { + "scheduler": scheduler, + "low_res_scheduler": low_res_scheduler, + } + + return {"scheduler": scheduler} diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/textual_inversion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/textual_inversion.py new file mode 100644 index 000000000..aaaf4b68b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/textual_inversion.py @@ -0,0 +1,562 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, List, Optional, Union + +import safetensors +import torch +from huggingface_hub.utils import validate_hf_hub_args +from torch import nn + +from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging + + +if is_transformers_available(): + from transformers import PreTrainedModel, PreTrainedTokenizer + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + +TEXT_INVERSION_NAME = "learned_embeds.bin" +TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors" + + +@validate_hf_hub_args +def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs): + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "text_inversion", + "framework": "pytorch", + } + state_dicts = [] + for pretrained_model_name_or_path in pretrained_model_name_or_paths: + if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)): + # 3.1. Load textual inversion file + model_file = None + + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=weight_name or TEXT_INVERSION_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except Exception as e: + if not allow_pickle: + raise e + + model_file = None + + if model_file is None: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=weight_name or TEXT_INVERSION_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = torch.load(model_file, map_location="cpu") + else: + state_dict = pretrained_model_name_or_path + + state_dicts.append(state_dict) + + return state_dicts + + +class TextualInversionLoaderMixin: + r""" + Load Textual Inversion tokens and embeddings to the tokenizer and text encoder. + """ + + def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821 + r""" + Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to + be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual + inversion token or if the textual inversion token is a single vector, the input prompt is returned. + + Parameters: + prompt (`str` or list of `str`): + The prompt or prompts to guide the image generation. + tokenizer (`PreTrainedTokenizer`): + The tokenizer responsible for encoding the prompt into input tokens. + + Returns: + `str` or list of `str`: The converted prompt + """ + if not isinstance(prompt, List): + prompts = [prompt] + else: + prompts = prompt + + prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts] + + if not isinstance(prompt, List): + return prompts[0] + + return prompts + + def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821 + r""" + Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds + to a multi-vector textual inversion embedding, this function will process the prompt so that the special token + is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual + inversion token or a textual inversion token that is a single vector, the input prompt is simply returned. + + Parameters: + prompt (`str`): + The prompt to guide the image generation. + tokenizer (`PreTrainedTokenizer`): + The tokenizer responsible for encoding the prompt into input tokens. + + Returns: + `str`: The converted prompt + """ + tokens = tokenizer.tokenize(prompt) + unique_tokens = set(tokens) + for token in unique_tokens: + if token in tokenizer.added_tokens_encoder: + replacement = token + i = 1 + while f"{token}_{i}" in tokenizer.added_tokens_encoder: + replacement += f" {token}_{i}" + i += 1 + + prompt = prompt.replace(token, replacement) + + return prompt + + def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens): + if tokenizer is None: + raise ValueError( + f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling" + f" `{self.load_textual_inversion.__name__}`" + ) + + if text_encoder is None: + raise ValueError( + f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling" + f" `{self.load_textual_inversion.__name__}`" + ) + + if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens): + raise ValueError( + f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} " + f"Make sure both lists have the same length." + ) + + valid_tokens = [t for t in tokens if t is not None] + if len(set(valid_tokens)) < len(valid_tokens): + raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}") + + @staticmethod + def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer): + all_tokens = [] + all_embeddings = [] + for state_dict, token in zip(state_dicts, tokens): + if isinstance(state_dict, torch.Tensor): + if token is None: + raise ValueError( + "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`." + ) + loaded_token = token + embedding = state_dict + elif len(state_dict) == 1: + # diffusers + loaded_token, embedding = next(iter(state_dict.items())) + elif "string_to_param" in state_dict: + # A1111 + loaded_token = state_dict["name"] + embedding = state_dict["string_to_param"]["*"] + else: + raise ValueError( + f"Loaded state dictionary is incorrect: {state_dict}. \n\n" + "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`" + " input key." + ) + + if token is not None and loaded_token != token: + logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.") + else: + token = loaded_token + + if token in tokenizer.get_vocab(): + raise ValueError( + f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder." + ) + + all_tokens.append(token) + all_embeddings.append(embedding) + + return all_tokens, all_embeddings + + @staticmethod + def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer): + all_tokens = [] + all_embeddings = [] + + for embedding, token in zip(embeddings, tokens): + if f"{token}_1" in tokenizer.get_vocab(): + multi_vector_tokens = [token] + i = 1 + while f"{token}_{i}" in tokenizer.added_tokens_encoder: + multi_vector_tokens.append(f"{token}_{i}") + i += 1 + + raise ValueError( + f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder." + ) + + is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1 + if is_multi_vector: + all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])] + all_embeddings += [e for e in embedding] # noqa: C416 + else: + all_tokens += [token] + all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding] + + return all_tokens, all_embeddings + + @validate_hf_hub_args + def load_textual_inversion( + self, + pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], + token: Optional[Union[str, List[str]]] = None, + tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821 + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 + **kwargs, + ): + r""" + Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and + Automatic1111 formats are supported). + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`): + Can be either one of the following or a list of them: + + - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a + pretrained model hosted on the Hub. + - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual + inversion weights. + - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + token (`str` or `List[str]`, *optional*): + Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a + list, then `token` must also be a list of equal length. + text_encoder ([`~transformers.CLIPTextModel`], *optional*): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + If not specified, function will take self.tokenizer. + tokenizer ([`~transformers.CLIPTokenizer`], *optional*): + A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer. + weight_name (`str`, *optional*): + Name of a custom weight file. This should be used when: + + - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight + name such as `text_inv.bin`. + - The saved textual inversion file is in the Automatic1111 format. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + + Example: + + To load a Textual Inversion embedding vector in 🤗 Diffusers format: + + ```py + from diffusers import StableDiffusionPipeline + import torch + + model_id = "runwayml/stable-diffusion-v1-5" + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") + + pipe.load_textual_inversion("sd-concepts-library/cat-toy") + + prompt = "A backpack" + + image = pipe(prompt, num_inference_steps=50).images[0] + image.save("cat-backpack.png") + ``` + + To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first + (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector + locally: + + ```py + from diffusers import StableDiffusionPipeline + import torch + + model_id = "runwayml/stable-diffusion-v1-5" + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") + + pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2") + + prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details." + + image = pipe(prompt, num_inference_steps=50).images[0] + image.save("character.png") + ``` + + """ + # 1. Set correct tokenizer and text encoder + tokenizer = tokenizer or getattr(self, "tokenizer", None) + text_encoder = text_encoder or getattr(self, "text_encoder", None) + + # 2. Normalize inputs + pretrained_model_name_or_paths = ( + [pretrained_model_name_or_path] + if not isinstance(pretrained_model_name_or_path, list) + else pretrained_model_name_or_path + ) + tokens = [token] if not isinstance(token, list) else token + if tokens[0] is None: + tokens = tokens * len(pretrained_model_name_or_paths) + + # 3. Check inputs + self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens) + + # 4. Load state dicts of textual embeddings + state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs) + + # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens + if len(tokens) > 1 and len(state_dicts) == 1: + if isinstance(state_dicts[0], torch.Tensor): + state_dicts = list(state_dicts[0]) + if len(tokens) != len(state_dicts): + raise ValueError( + f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} " + f"Make sure both have the same length." + ) + + # 4. Retrieve tokens and embeddings + tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer) + + # 5. Extend tokens and embeddings for multi vector + tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer) + + # 6. Make sure all embeddings have the correct size + expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1] + if any(expected_emb_dim != emb.shape[-1] for emb in embeddings): + raise ValueError( + "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding " + "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} " + ) + + # 7. Now we can be sure that loading the embedding matrix works + # < Unsafe code: + + # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again + is_model_cpu_offload = False + is_sequential_cpu_offload = False + for _, component in self.components.items(): + if isinstance(component, nn.Module): + if hasattr(component, "_hf_hook"): + is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload) + is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook) + logger.info( + "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again." + ) + remove_hook_from_module(component, recurse=is_sequential_cpu_offload) + + # 7.2 save expected device and dtype + device = text_encoder.device + dtype = text_encoder.dtype + + # 7.3 Increase token embedding matrix + text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens)) + input_embeddings = text_encoder.get_input_embeddings().weight + + # 7.4 Load token and embedding + for token, embedding in zip(tokens, embeddings): + # add tokens and get ids + tokenizer.add_tokens(token) + token_id = tokenizer.convert_tokens_to_ids(token) + input_embeddings.data[token_id] = embedding + logger.info(f"Loaded textual inversion embedding for {token}.") + + input_embeddings.to(dtype=dtype, device=device) + + # 7.5 Offload the model again + if is_model_cpu_offload: + self.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + self.enable_sequential_cpu_offload() + + # / Unsafe Code > + + def unload_textual_inversion( + self, + tokens: Optional[Union[str, List[str]]] = None, + tokenizer: Optional["PreTrainedTokenizer"] = None, + text_encoder: Optional["PreTrainedModel"] = None, + ): + r""" + Unload Textual Inversion embeddings from the text encoder of [`StableDiffusionPipeline`] + + Example: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + + # Example 1 + pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") + pipeline.load_textual_inversion("sd-concepts-library/moeb-style") + + # Remove all token embeddings + pipeline.unload_textual_inversion() + + # Example 2 + pipeline.load_textual_inversion("sd-concepts-library/moeb-style") + pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") + + # Remove just one token + pipeline.unload_textual_inversion("") + + # Example 3: unload from SDXL + pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + embedding_path = hf_hub_download(repo_id="linoyts/web_y2k", filename="web_y2k_emb.safetensors", repo_type="model") + + # load embeddings to the text encoders + state_dict = load_file(embedding_path) + + # load embeddings of text_encoder 1 (CLIP ViT-L/14) + pipeline.load_textual_inversion(state_dict["clip_l"], token=["", ""], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer) + # load embeddings of text_encoder 2 (CLIP ViT-G/14) + pipeline.load_textual_inversion(state_dict["clip_g"], token=["", ""], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2) + + # Unload explicitly from both text encoders abd tokenizers + pipeline.unload_textual_inversion(tokens=["", ""], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer) + pipeline.unload_textual_inversion(tokens=["", ""], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2) + + ``` + """ + + tokenizer = tokenizer or getattr(self, "tokenizer", None) + text_encoder = text_encoder or getattr(self, "text_encoder", None) + + # Get textual inversion tokens and ids + token_ids = [] + last_special_token_id = None + + if tokens: + if isinstance(tokens, str): + tokens = [tokens] + for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): + if not added_token.special: + if added_token.content in tokens: + token_ids.append(added_token_id) + else: + last_special_token_id = added_token_id + if len(token_ids) == 0: + raise ValueError("No tokens to remove found") + else: + tokens = [] + for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): + if not added_token.special: + token_ids.append(added_token_id) + tokens.append(added_token.content) + else: + last_special_token_id = added_token_id + + # Delete from tokenizer + for token_id, token_to_remove in zip(token_ids, tokens): + del tokenizer._added_tokens_decoder[token_id] + del tokenizer._added_tokens_encoder[token_to_remove] + + # Make all token ids sequential in tokenizer + key_id = 1 + for token_id in tokenizer.added_tokens_decoder: + if token_id > last_special_token_id and token_id > last_special_token_id + key_id: + token = tokenizer._added_tokens_decoder[token_id] + tokenizer._added_tokens_decoder[last_special_token_id + key_id] = token + del tokenizer._added_tokens_decoder[token_id] + tokenizer._added_tokens_encoder[token.content] = last_special_token_id + key_id + key_id += 1 + tokenizer._update_trie() + + # Delete from text encoder + text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim + temp_text_embedding_weights = text_encoder.get_input_embeddings().weight + text_embedding_weights = temp_text_embedding_weights[: last_special_token_id + 1] + to_append = [] + for i in range(last_special_token_id + 1, temp_text_embedding_weights.shape[0]): + if i not in token_ids: + to_append.append(temp_text_embedding_weights[i].unsqueeze(0)) + if len(to_append) > 0: + to_append = torch.cat(to_append, dim=0) + text_embedding_weights = torch.cat([text_embedding_weights, to_append], dim=0) + text_embeddings_filtered = nn.Embedding(text_embedding_weights.shape[0], text_embedding_dim) + text_embeddings_filtered.weight.data = text_embedding_weights + text_encoder.set_input_embeddings(text_embeddings_filtered) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/unet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/unet.py new file mode 100644 index 000000000..0a9544d0d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/unet.py @@ -0,0 +1,1003 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import os +from collections import defaultdict +from contextlib import nullcontext +from functools import partial +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +import safetensors +import torch +import torch.nn.functional as F +from huggingface_hub.utils import validate_hf_hub_args +from torch import nn + +from ..models.embeddings import ( + ImageProjection, + IPAdapterFullImageProjection, + IPAdapterPlusImageProjection, + MultiIPAdapterImageProjection, +) +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta +from ..utils import ( + USE_PEFT_BACKEND, + _get_model_file, + delete_adapter_layers, + is_accelerate_available, + is_torch_version, + logging, + set_adapter_layers, + set_weights_and_activate_adapters, +) +from .single_file_utils import ( + convert_stable_cascade_unet_single_file_to_diffusers, + infer_stable_cascade_single_file_config, + load_single_file_model_checkpoint, +) +from .utils import AttnProcsLayers + + +if is_accelerate_available(): + from accelerate import init_empty_weights + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + + +TEXT_ENCODER_NAME = "text_encoder" +UNET_NAME = "unet" + +LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" +LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" + +CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin" +CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors" + + +class UNet2DConditionLoadersMixin: + """ + Load LoRA layers into a [`UNet2DCondtionModel`]. + """ + + text_encoder_name = TEXT_ENCODER_NAME + unet_name = UNET_NAME + + @validate_hf_hub_args + def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): + r""" + Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be + defined in + [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py) + and be a `torch.nn.Module` class. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a directory (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.unet.load_attn_procs( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + ``` + """ + from ..models.attention_processor import CustomDiffusionAttnProcessor + from ..models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer + + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. + # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + network_alphas = kwargs.pop("network_alphas", None) + + _pipeline = kwargs.pop("_pipeline", None) + + is_network_alphas_none = network_alphas is None + + allow_pickle = False + + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + model_file = None + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except IOError as e: + if not allow_pickle: + raise e + # try loading non-safetensors weights + pass + if model_file is None: + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = torch.load(model_file, map_location="cpu") + else: + state_dict = pretrained_model_name_or_path_or_dict + + # fill attn processors + lora_layers_list = [] + + is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) and not USE_PEFT_BACKEND + is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys()) + + if is_lora: + # correct keys + state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas) + + if network_alphas is not None: + network_alphas_keys = list(network_alphas.keys()) + used_network_alphas_keys = set() + + lora_grouped_dict = defaultdict(dict) + mapped_network_alphas = {} + + all_keys = list(state_dict.keys()) + for key in all_keys: + value = state_dict.pop(key) + attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:]) + lora_grouped_dict[attn_processor_key][sub_key] = value + + # Create another `mapped_network_alphas` dictionary so that we can properly map them. + if network_alphas is not None: + for k in network_alphas_keys: + if k.replace(".alpha", "") in key: + mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)}) + used_network_alphas_keys.add(k) + + if not is_network_alphas_none: + if len(set(network_alphas_keys) - used_network_alphas_keys) > 0: + raise ValueError( + f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}" + ) + + if len(state_dict) > 0: + raise ValueError( + f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}" + ) + + for key, value_dict in lora_grouped_dict.items(): + attn_processor = self + for sub_key in key.split("."): + attn_processor = getattr(attn_processor, sub_key) + + # Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers + # or add_{k,v,q,out_proj}_proj_lora layers. + rank = value_dict["lora.down.weight"].shape[0] + + if isinstance(attn_processor, LoRACompatibleConv): + in_features = attn_processor.in_channels + out_features = attn_processor.out_channels + kernel_size = attn_processor.kernel_size + + ctx = init_empty_weights if low_cpu_mem_usage else nullcontext + with ctx(): + lora = LoRAConv2dLayer( + in_features=in_features, + out_features=out_features, + rank=rank, + kernel_size=kernel_size, + stride=attn_processor.stride, + padding=attn_processor.padding, + network_alpha=mapped_network_alphas.get(key), + ) + elif isinstance(attn_processor, LoRACompatibleLinear): + ctx = init_empty_weights if low_cpu_mem_usage else nullcontext + with ctx(): + lora = LoRALinearLayer( + attn_processor.in_features, + attn_processor.out_features, + rank, + mapped_network_alphas.get(key), + ) + else: + raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.") + + value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()} + lora_layers_list.append((attn_processor, lora)) + + if low_cpu_mem_usage: + device = next(iter(value_dict.values())).device + dtype = next(iter(value_dict.values())).dtype + load_model_dict_into_meta(lora, value_dict, device=device, dtype=dtype) + else: + lora.load_state_dict(value_dict) + + elif is_custom_diffusion: + attn_processors = {} + custom_diffusion_grouped_dict = defaultdict(dict) + for key, value in state_dict.items(): + if len(value) == 0: + custom_diffusion_grouped_dict[key] = {} + else: + if "to_out" in key: + attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:]) + else: + attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:]) + custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value + + for key, value_dict in custom_diffusion_grouped_dict.items(): + if len(value_dict) == 0: + attn_processors[key] = CustomDiffusionAttnProcessor( + train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None + ) + else: + cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1] + hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0] + train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False + attn_processors[key] = CustomDiffusionAttnProcessor( + train_kv=True, + train_q_out=train_q_out, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ) + attn_processors[key].load_state_dict(value_dict) + elif USE_PEFT_BACKEND: + # In that case we have nothing to do as loading the adapter weights is already handled above by `set_peft_model_state_dict` + # on the Unet + pass + else: + raise ValueError( + f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training." + ) + + # + + def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas): + is_new_lora_format = all( + key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys() + ) + if is_new_lora_format: + # Strip the `"unet"` prefix. + is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys()) + if is_text_encoder_present: + warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)." + logger.warning(warn_message) + unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)] + state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys} + + # change processor format to 'pure' LoRACompatibleLinear format + if any("processor" in k.split(".") for k in state_dict.keys()): + + def format_to_lora_compatible(key): + if "processor" not in key.split("."): + return key + return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora") + + state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()} + + if network_alphas is not None: + network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()} + return state_dict, network_alphas + + def save_attn_procs( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + **kwargs, + ): + r""" + Save attention processor layers to a directory so that it can be reloaded with the + [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save an attention processor to (will be created if it doesn't exist). + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or with `pickle`. + + Example: + + ```py + import torch + from diffusers import DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + torch_dtype=torch.float16, + ).to("cuda") + pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") + pipeline.unet.save_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") + ``` + """ + from ..models.attention_processor import ( + CustomDiffusionAttnProcessor, + CustomDiffusionAttnProcessor2_0, + CustomDiffusionXFormersAttnProcessor, + ) + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + if save_function is None: + if safe_serialization: + + def save_function(weights, filename): + return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"}) + + else: + save_function = torch.save + + os.makedirs(save_directory, exist_ok=True) + + is_custom_diffusion = any( + isinstance( + x, + (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor), + ) + for (_, x) in self.attn_processors.items() + ) + if is_custom_diffusion: + model_to_save = AttnProcsLayers( + { + y: x + for (y, x) in self.attn_processors.items() + if isinstance( + x, + ( + CustomDiffusionAttnProcessor, + CustomDiffusionAttnProcessor2_0, + CustomDiffusionXFormersAttnProcessor, + ), + ) + } + ) + state_dict = model_to_save.state_dict() + for name, attn in self.attn_processors.items(): + if len(attn.state_dict()) == 0: + state_dict[name] = {} + else: + model_to_save = AttnProcsLayers(self.attn_processors) + state_dict = model_to_save.state_dict() + + if weight_name is None: + if safe_serialization: + weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE + else: + weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME + + # Save the model + save_path = Path(save_directory, weight_name).as_posix() + save_function(state_dict, save_path) + logger.info(f"Model weights saved in {save_path}") + + def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None): + self.lora_scale = lora_scale + self._safe_fusing = safe_fusing + self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names)) + + def _fuse_lora_apply(self, module, adapter_names=None): + if not USE_PEFT_BACKEND: + if hasattr(module, "_fuse_lora"): + module._fuse_lora(self.lora_scale, self._safe_fusing) + + if adapter_names is not None: + raise ValueError( + "The `adapter_names` argument is not supported in your environment. Please switch" + " to PEFT backend to use this argument by installing latest PEFT and transformers." + " `pip install -U peft transformers`" + ) + else: + from peft.tuners.tuners_utils import BaseTunerLayer + + merge_kwargs = {"safe_merge": self._safe_fusing} + + if isinstance(module, BaseTunerLayer): + if self.lora_scale != 1.0: + module.scale_layer(self.lora_scale) + + # For BC with prevous PEFT versions, we need to check the signature + # of the `merge` method to see if it supports the `adapter_names` argument. + supported_merge_kwargs = list(inspect.signature(module.merge).parameters) + if "adapter_names" in supported_merge_kwargs: + merge_kwargs["adapter_names"] = adapter_names + elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None: + raise ValueError( + "The `adapter_names` argument is not supported with your PEFT version. Please upgrade" + " to the latest version of PEFT. `pip install -U peft`" + ) + + module.merge(**merge_kwargs) + + def unfuse_lora(self): + self.apply(self._unfuse_lora_apply) + + def _unfuse_lora_apply(self, module): + if not USE_PEFT_BACKEND: + if hasattr(module, "_unfuse_lora"): + module._unfuse_lora() + else: + from peft.tuners.tuners_utils import BaseTunerLayer + + if isinstance(module, BaseTunerLayer): + module.unmerge() + + def set_adapters( + self, + adapter_names: Union[List[str], str], + weights: Optional[Union[List[float], float]] = None, + ): + """ + Set the currently active adapters for use in the UNet. + + Args: + adapter_names (`List[str]` or `str`): + The names of the adapters to use. + adapter_weights (`Union[List[float], float]`, *optional*): + The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the + adapters. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5]) + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for `set_adapters()`.") + + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + + if weights is None: + weights = [1.0] * len(adapter_names) + elif isinstance(weights, float): + weights = [weights] * len(adapter_names) + + if len(adapter_names) != len(weights): + raise ValueError( + f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}." + ) + + set_weights_and_activate_adapters(self, adapter_names, weights) + + def disable_lora(self): + """ + Disable the UNet's active LoRA layers. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.disable_lora() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + set_adapter_layers(self, enabled=False) + + def enable_lora(self): + """ + Enable the UNet's active LoRA layers. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.enable_lora() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + set_adapter_layers(self, enabled=True) + + def delete_adapters(self, adapter_names: Union[List[str], str]): + """ + Delete an adapter's LoRA layers from the UNet. + + Args: + adapter_names (`Union[List[str], str]`): + The names (single string or list of strings) of the adapter to delete. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" + ) + pipeline.delete_adapters("cinematic") + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + for adapter_name in adapter_names: + delete_adapter_layers(self, adapter_name) + + # Pop also the corresponding adapter from the config + if hasattr(self, "peft_config"): + self.peft_config.pop(adapter_name, None) + + def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict, low_cpu_mem_usage=False): + if low_cpu_mem_usage: + if is_accelerate_available(): + from accelerate import init_empty_weights + + else: + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + updated_state_dict = {} + image_projection = None + init_context = init_empty_weights if low_cpu_mem_usage else nullcontext + + if "proj.weight" in state_dict: + # IP-Adapter + num_image_text_embeds = 4 + clip_embeddings_dim = state_dict["proj.weight"].shape[-1] + cross_attention_dim = state_dict["proj.weight"].shape[0] // 4 + + with init_context(): + image_projection = ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("proj", "image_embeds") + updated_state_dict[diffusers_name] = value + + elif "proj.3.weight" in state_dict: + # IP-Adapter Full + clip_embeddings_dim = state_dict["proj.0.weight"].shape[0] + cross_attention_dim = state_dict["proj.3.weight"].shape[0] + + with init_context(): + image_projection = IPAdapterFullImageProjection( + cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("proj.0", "ff.net.0.proj") + diffusers_name = diffusers_name.replace("proj.2", "ff.net.2") + diffusers_name = diffusers_name.replace("proj.3", "norm") + updated_state_dict[diffusers_name] = value + + else: + # IP-Adapter Plus + num_image_text_embeds = state_dict["latents"].shape[1] + embed_dims = state_dict["proj_in.weight"].shape[1] + output_dims = state_dict["proj_out.weight"].shape[0] + hidden_dims = state_dict["latents"].shape[2] + heads = state_dict["layers.0.0.to_q.weight"].shape[0] // 64 + + with init_context(): + image_projection = IPAdapterPlusImageProjection( + embed_dims=embed_dims, + output_dims=output_dims, + hidden_dims=hidden_dims, + heads=heads, + num_queries=num_image_text_embeds, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("0.to", "2.to") + diffusers_name = diffusers_name.replace("1.0.weight", "3.0.weight") + diffusers_name = diffusers_name.replace("1.0.bias", "3.0.bias") + diffusers_name = diffusers_name.replace("1.1.weight", "3.1.net.0.proj.weight") + diffusers_name = diffusers_name.replace("1.3.weight", "3.1.net.2.weight") + + if "norm1" in diffusers_name: + updated_state_dict[diffusers_name.replace("0.norm1", "0")] = value + elif "norm2" in diffusers_name: + updated_state_dict[diffusers_name.replace("0.norm2", "1")] = value + elif "to_kv" in diffusers_name: + v_chunk = value.chunk(2, dim=0) + updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0] + updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1] + elif "to_out" in diffusers_name: + updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value + else: + updated_state_dict[diffusers_name] = value + + if not low_cpu_mem_usage: + image_projection.load_state_dict(updated_state_dict) + else: + load_model_dict_into_meta(image_projection, updated_state_dict, device=self.device, dtype=self.dtype) + + return image_projection + + def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=False): + from ..models.attention_processor import ( + AttnProcessor, + AttnProcessor2_0, + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, + ) + + if low_cpu_mem_usage: + if is_accelerate_available(): + from accelerate import init_empty_weights + + else: + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + # set ip-adapter cross-attention processors & load state_dict + attn_procs = {} + key_id = 1 + init_context = init_empty_weights if low_cpu_mem_usage else nullcontext + for name in self.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = self.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(self.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = self.config.block_out_channels[block_id] + + if cross_attention_dim is None or "motion_modules" in name: + attn_processor_class = ( + AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor + ) + attn_procs[name] = attn_processor_class() + else: + attn_processor_class = ( + IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor + ) + num_image_text_embeds = [] + for state_dict in state_dicts: + if "proj.weight" in state_dict["image_proj"]: + # IP-Adapter + num_image_text_embeds += [4] + elif "proj.3.weight" in state_dict["image_proj"]: + # IP-Adapter Full Face + num_image_text_embeds += [257] # 256 CLIP tokens + 1 CLS token + else: + # IP-Adapter Plus + num_image_text_embeds += [state_dict["image_proj"]["latents"].shape[1]] + + with init_context(): + attn_procs[name] = attn_processor_class( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + scale=1.0, + num_tokens=num_image_text_embeds, + ) + + value_dict = {} + for i, state_dict in enumerate(state_dicts): + value_dict.update({f"to_k_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]}) + value_dict.update({f"to_v_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]}) + + if not low_cpu_mem_usage: + attn_procs[name].load_state_dict(value_dict) + else: + device = next(iter(value_dict.values())).device + dtype = next(iter(value_dict.values())).dtype + load_model_dict_into_meta(attn_procs[name], value_dict, device=device, dtype=dtype) + + key_id += 2 + + return attn_procs + + def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False): + if not isinstance(state_dicts, list): + state_dicts = [state_dicts] + # Set encoder_hid_proj after loading ip_adapter weights, + # because `IPAdapterPlusImageProjection` also has `attn_processors`. + self.encoder_hid_proj = None + + attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) + self.set_attn_processor(attn_procs) + + # convert IP-Adapter Image Projection layers to diffusers + image_projection_layers = [] + for state_dict in state_dicts: + image_projection_layer = self._convert_ip_adapter_image_proj_to_diffusers( + state_dict["image_proj"], low_cpu_mem_usage=low_cpu_mem_usage + ) + image_projection_layers.append(image_projection_layer) + + self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers) + self.config.encoder_hid_dim_type = "ip_image_proj" + + self.to(dtype=self.dtype, device=self.device) + + +class FromOriginalUNetMixin: + """ + Load pretrained UNet model weights saved in the `.ckpt` or `.safetensors` format into a [`StableCascadeUNet`]. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path, **kwargs): + r""" + Instantiate a [`StableCascadeUNet`] from pretrained StableCascadeUNet weights saved in the original `.ckpt` or + `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + - A link to the `.ckpt` file (for example + `"https://huggingface.co//blob/main/.ckpt"`) on the Hub. + - A path to a *file* containing all pipeline weights. + config: (`dict`, *optional*): + Dictionary containing the configuration of the model: + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to True, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables of the model. + + """ + class_name = cls.__name__ + if class_name != "StableCascadeUNet": + raise ValueError("FromOriginalUNetMixin is currently only compatible with StableCascadeUNet") + + config = kwargs.pop("config", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", None) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + checkpoint = load_single_file_model_checkpoint( + pretrained_model_link_or_path, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + ) + + if config is None: + config = infer_stable_cascade_single_file_config(checkpoint) + model_config = cls.load_config(**config, **kwargs) + else: + model_config = config + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + model = cls.from_config(model_config, **kwargs) + + diffusers_format_checkpoint = convert_stable_cascade_unet_single_file_to_diffusers(checkpoint) + if is_accelerate_available(): + unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) + if len(unexpected_keys) > 0: + logger.warn( + f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + else: + model.load_state_dict(diffusers_format_checkpoint) + + if torch_dtype is not None: + model.to(torch_dtype) + + return model diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/utils.py new file mode 100644 index 000000000..142d72bf6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/loaders/utils.py @@ -0,0 +1,59 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +import torch + + +class AttnProcsLayers(torch.nn.Module): + def __init__(self, state_dict: Dict[str, torch.Tensor]): + super().__init__() + self.layers = torch.nn.ModuleList(state_dict.values()) + self.mapping = dict(enumerate(state_dict.keys())) + self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} + + # .processor for unet, .self_attn for text encoder + self.split_keys = [".processor", ".self_attn"] + + # we add a hook to state_dict() and load_state_dict() so that the + # naming fits with `unet.attn_processors` + def map_to(module, state_dict, *args, **kwargs): + new_state_dict = {} + for key, value in state_dict.items(): + num = int(key.split(".")[1]) # 0 is always "layers" + new_key = key.replace(f"layers.{num}", module.mapping[num]) + new_state_dict[new_key] = value + + return new_state_dict + + def remap_key(key, state_dict): + for k in self.split_keys: + if k in key: + return key.split(k)[0] + k + + raise ValueError( + f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}." + ) + + def map_from(module, state_dict, *args, **kwargs): + all_keys = list(state_dict.keys()) + for key in all_keys: + replace_key = remap_key(key, state_dict) + new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") + state_dict[new_key] = state_dict[key] + del state_dict[key] + + self._register_state_dict_hook(map_to) + self._register_load_state_dict_pre_hook(map_from, with_module=True) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/README.md new file mode 100644 index 000000000..fb91f5941 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/README.md @@ -0,0 +1,3 @@ +# Models + +For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview). \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/__init__.py new file mode 100644 index 000000000..da77e4450 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/__init__.py @@ -0,0 +1,103 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, + is_flax_available, + is_torch_available, +) + + +_import_structure = {} + +if is_torch_available(): + _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"] + _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"] + _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"] + _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"] + _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"] + _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"] + _import_structure["controlnet"] = ["ControlNetModel"] + _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"] + _import_structure["embeddings"] = ["ImageProjection"] + _import_structure["modeling_utils"] = ["ModelMixin"] + _import_structure["transformers.prior_transformer"] = ["PriorTransformer"] + _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] + _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] + _import_structure["transformers.transformer_temporal"] = ["TransformerTemporalModel"] + _import_structure["unets.unet_1d"] = ["UNet1DModel"] + _import_structure["unets.unet_2d"] = ["UNet2DModel"] + _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"] + _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"] + _import_structure["unets.unet_i2vgen_xl"] = ["I2VGenXLUNet"] + _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"] + _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"] + _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"] + _import_structure["unets.unet_stable_cascade"] = ["StableCascadeUNet"] + _import_structure["unets.uvit_2d"] = ["UVit2DModel"] + _import_structure["vq_model"] = ["VQModel"] + +if is_flax_available(): + _import_structure["controlnet_flax"] = ["FlaxControlNetModel"] + _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] + _import_structure["vae_flax"] = ["FlaxAutoencoderKL"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + if is_torch_available(): + from .adapter import MultiAdapter, T2IAdapter + from .autoencoders import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + AutoencoderKLTemporalDecoder, + AutoencoderTiny, + ConsistencyDecoderVAE, + ) + from .controlnet import ControlNetModel + from .embeddings import ImageProjection + from .modeling_utils import ModelMixin + from .transformers import ( + DualTransformer2DModel, + PriorTransformer, + T5FilmDecoder, + Transformer2DModel, + TransformerTemporalModel, + ) + from .unets import ( + I2VGenXLUNet, + Kandinsky3UNet, + MotionAdapter, + StableCascadeUNet, + UNet1DModel, + UNet2DConditionModel, + UNet2DModel, + UNet3DConditionModel, + UNetMotionModel, + UNetSpatioTemporalConditionModel, + UVit2DModel, + ) + from .vq_model import VQModel + + if is_flax_available(): + from .controlnet_flax import FlaxControlNetModel + from .unets import FlaxUNet2DConditionModel + from .vae_flax import FlaxAutoencoderKL + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/activations.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/activations.py new file mode 100644 index 000000000..4b6914307 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/activations.py @@ -0,0 +1,145 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn.functional as F +from torch import nn +import ixformer.functions as IXF + +from ..utils import deprecate + + +ACTIVATION_FUNCTIONS = { + "swish": nn.SiLU(), + "silu": nn.SiLU(), + "mish": nn.Mish(), + "gelu": nn.GELU(), + "relu": nn.ReLU(), +} + + +def get_activation(act_fn: str) -> nn.Module: + """Helper function to get activation function from string. + + Args: + act_fn (str): Name of activation function. + + Returns: + nn.Module: Activation function. + """ + + act_fn = act_fn.lower() + if act_fn in ACTIVATION_FUNCTIONS: + return ACTIVATION_FUNCTIONS[act_fn] + else: + raise ValueError(f"Unsupported activation function: {act_fn}") + + +class GELU(nn.Module): + r""" + GELU activation function with tanh approximation support with `approximate="tanh"`. + + Parameters: + dim_in (`int`): The number of channels in the input. + dim_out (`int`): The number of channels in the output. + approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out, bias=bias) + self.approximate = approximate + + def gelu(self, gate: torch.Tensor) -> torch.Tensor: + if gate.device.type != "mps": + return F.gelu(gate, approximate=self.approximate) + # mps: gelu is not implemented for float16 + return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) + + def forward(self, hidden_states): + hidden_states = self.proj(hidden_states) + hidden_states = self.gelu(hidden_states) + return hidden_states + + +class GEGLU(nn.Module): + r""" + A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. + + Parameters: + dim_in (`int`): The number of channels in the input. + dim_out (`int`): The number of channels in the output. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__(self, dim_in: int, dim_out: int, bias: bool = True): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) + + def gelu(self, gate: torch.Tensor) -> torch.Tensor: + if gate.device.type != "mps": + return F.gelu(gate) + # mps: gelu is not implemented for float16 + return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) + + def forward(self, hidden_states, *args, **kwargs): + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) + return hidden_states * self.gelu(gate) + + +class IXF_GEGLU(GEGLU): + r""" + A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. + + Parameters: + dim_in (`int`): The number of channels in the input. + dim_out (`int`): The number of channels in the output. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def forward(self, hidden_states, *args, **kwargs): + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + lout = self.proj(hidden_states) + res = IXF.geglu(lout) + + return res + + +class ApproximateGELU(nn.Module): + r""" + The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this + [paper](https://arxiv.org/abs/1606.08415). + + Parameters: + dim_in (`int`): The number of channels in the input. + dim_out (`int`): The number of channels in the output. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__(self, dim_in: int, dim_out: int, bias: bool = True): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out, bias=bias) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + return x * torch.sigmoid(1.702 * x) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/adapter.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/adapter.py new file mode 100644 index 000000000..0f4b2ec03 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/adapter.py @@ -0,0 +1,584 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from typing import Callable, List, Optional, Union + +import torch +import torch.nn as nn + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import logging +from .modeling_utils import ModelMixin + + +logger = logging.get_logger(__name__) + + +class MultiAdapter(ModelMixin): + r""" + MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to + user-assigned weighting. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the model (such as downloading or saving, etc.) + + Parameters: + adapters (`List[T2IAdapter]`, *optional*, defaults to None): + A list of `T2IAdapter` model instances. + """ + + def __init__(self, adapters: List["T2IAdapter"]): + super(MultiAdapter, self).__init__() + + self.num_adapter = len(adapters) + self.adapters = nn.ModuleList(adapters) + + if len(adapters) == 0: + raise ValueError("Expecting at least one adapter") + + if len(adapters) == 1: + raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`") + + # The outputs from each adapter are added together with a weight. + # This means that the change in dimensions from downsampling must + # be the same for all adapters. Inductively, it also means the + # downscale_factor and total_downscale_factor must be the same for all + # adapters. + first_adapter_total_downscale_factor = adapters[0].total_downscale_factor + first_adapter_downscale_factor = adapters[0].downscale_factor + for idx in range(1, len(adapters)): + if ( + adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor + or adapters[idx].downscale_factor != first_adapter_downscale_factor + ): + raise ValueError( + f"Expecting all adapters to have the same downscaling behavior, but got:\n" + f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n" + f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n" + f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n" + f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}" + ) + + self.total_downscale_factor = first_adapter_total_downscale_factor + self.downscale_factor = first_adapter_downscale_factor + + def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]: + r""" + Args: + xs (`torch.Tensor`): + (batch, channel, height, width) input images for multiple adapter models concated along dimension 1, + `channel` should equal to `num_adapter` * "number of channel of image". + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding + them together. + """ + if adapter_weights is None: + adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter) + else: + adapter_weights = torch.tensor(adapter_weights) + + accume_state = None + for x, w, adapter in zip(xs, adapter_weights, self.adapters): + features = adapter(x) + if accume_state is None: + accume_state = features + for i in range(len(accume_state)): + accume_state[i] = w * accume_state[i] + else: + for i in range(len(features)): + accume_state[i] += w * features[i] + return accume_state + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + save_function: Callable = None, + safe_serialization: bool = True, + variant: Optional[str] = None, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + `[`~models.adapter.MultiAdapter.from_pretrained`]` class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + """ + idx = 0 + model_path_to_save = save_directory + for adapter in self.adapters: + adapter.save_pretrained( + model_path_to_save, + is_main_process=is_main_process, + save_function=save_function, + safe_serialization=safe_serialization, + variant=variant, + ) + + idx += 1 + model_path_to_save = model_path_to_save + f"_{idx}" + + @classmethod + def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_path (`os.PathLike`): + A path to a *directory* containing model weights saved using + [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype + will be automatically derived from the model's weights. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading by not initializing the weights and only loading the pre-trained weights. This + also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the + model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, + setting this argument to `True` will raise an error. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the + `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from + `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. + """ + idx = 0 + adapters = [] + + # load adapter and append to list until no adapter directory exists anymore + # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained` + # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ... + model_path_to_load = pretrained_model_path + while os.path.isdir(model_path_to_load): + adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs) + adapters.append(adapter) + + idx += 1 + model_path_to_load = pretrained_model_path + f"_{idx}" + + logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.") + + if len(adapters) == 0: + raise ValueError( + f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." + ) + + return cls(adapters) + + +class T2IAdapter(ModelMixin, ConfigMixin): + r""" + A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model + generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's + architecture follows the original implementation of + [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97) + and + [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235). + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the model (such as downloading or saving, etc.) + + Parameters: + in_channels (`int`, *optional*, defaults to 3): + Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale + image as *control image*. + channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will + also determine the number of downsample blocks in the Adapter. + num_res_blocks (`int`, *optional*, defaults to 2): + Number of ResNet blocks in each downsample block. + downscale_factor (`int`, *optional*, defaults to 8): + A factor that determines the total downscale factor of the Adapter. + adapter_type (`str`, *optional*, defaults to `full_adapter`): + The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`. + """ + + @register_to_config + def __init__( + self, + in_channels: int = 3, + channels: List[int] = [320, 640, 1280, 1280], + num_res_blocks: int = 2, + downscale_factor: int = 8, + adapter_type: str = "full_adapter", + ): + super().__init__() + + if adapter_type == "full_adapter": + self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) + elif adapter_type == "full_adapter_xl": + self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) + elif adapter_type == "light_adapter": + self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) + else: + raise ValueError( + f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or " + "'full_adapter_xl' or 'light_adapter'." + ) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + This function processes the input tensor `x` through the adapter model and returns a list of feature tensors, + each representing information extracted at a different scale from the input. The length of the list is + determined by the number of downsample blocks in the Adapter, as specified by the `channels` and + `num_res_blocks` parameters during initialization. + """ + return self.adapter(x) + + @property + def total_downscale_factor(self): + return self.adapter.total_downscale_factor + + @property + def downscale_factor(self): + """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are + not evenly divisible by the downscale_factor then an exception will be raised. + """ + return self.adapter.unshuffle.downscale_factor + + +# full adapter + + +class FullAdapter(nn.Module): + r""" + See [`T2IAdapter`] for more information. + """ + + def __init__( + self, + in_channels: int = 3, + channels: List[int] = [320, 640, 1280, 1280], + num_res_blocks: int = 2, + downscale_factor: int = 8, + ): + super().__init__() + + in_channels = in_channels * downscale_factor**2 + + self.unshuffle = nn.PixelUnshuffle(downscale_factor) + self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) + + self.body = nn.ModuleList( + [ + AdapterBlock(channels[0], channels[0], num_res_blocks), + *[ + AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True) + for i in range(1, len(channels)) + ], + ] + ) + + self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + This method processes the input tensor `x` through the FullAdapter model and performs operations including + pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each + capturing information at a different stage of processing within the FullAdapter model. The number of feature + tensors in the list is determined by the number of downsample blocks specified during initialization. + """ + x = self.unshuffle(x) + x = self.conv_in(x) + + features = [] + + for block in self.body: + x = block(x) + features.append(x) + + return features + + +class FullAdapterXL(nn.Module): + r""" + See [`T2IAdapter`] for more information. + """ + + def __init__( + self, + in_channels: int = 3, + channels: List[int] = [320, 640, 1280, 1280], + num_res_blocks: int = 2, + downscale_factor: int = 16, + ): + super().__init__() + + in_channels = in_channels * downscale_factor**2 + + self.unshuffle = nn.PixelUnshuffle(downscale_factor) + self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) + + self.body = [] + # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] + for i in range(len(channels)): + if i == 1: + self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) + elif i == 2: + self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) + else: + self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) + + self.body = nn.ModuleList(self.body) + # XL has only one downsampling AdapterBlock. + self.total_downscale_factor = downscale_factor * 2 + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations + including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors. + """ + x = self.unshuffle(x) + x = self.conv_in(x) + + features = [] + + for block in self.body: + x = block(x) + features.append(x) + + return features + + +class AdapterBlock(nn.Module): + r""" + An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and + `FullAdapterXL` models. + + Parameters: + in_channels (`int`): + Number of channels of AdapterBlock's input. + out_channels (`int`): + Number of channels of AdapterBlock's output. + num_res_blocks (`int`): + Number of ResNet blocks in the AdapterBlock. + down (`bool`, *optional*, defaults to `False`): + Whether to perform downsampling on AdapterBlock's input. + """ + + def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): + super().__init__() + + self.downsample = None + if down: + self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) + + self.in_conv = None + if in_channels != out_channels: + self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) + + self.resnets = nn.Sequential( + *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)], + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r""" + This method takes tensor x as input and performs operations downsampling and convolutional layers if the + self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of + residual blocks to the input tensor. + """ + if self.downsample is not None: + x = self.downsample(x) + + if self.in_conv is not None: + x = self.in_conv(x) + + x = self.resnets(x) + + return x + + +class AdapterResnetBlock(nn.Module): + r""" + An `AdapterResnetBlock` is a helper model that implements a ResNet-like block. + + Parameters: + channels (`int`): + Number of channels of AdapterResnetBlock's input and output. + """ + + def __init__(self, channels: int): + super().__init__() + self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.act = nn.ReLU() + self.block2 = nn.Conv2d(channels, channels, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r""" + This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional + layer on the input tensor. It returns addition with the input tensor. + """ + + h = self.act(self.block1(x)) + h = self.block2(h) + + return h + x + + +# light adapter + + +class LightAdapter(nn.Module): + r""" + See [`T2IAdapter`] for more information. + """ + + def __init__( + self, + in_channels: int = 3, + channels: List[int] = [320, 640, 1280], + num_res_blocks: int = 4, + downscale_factor: int = 8, + ): + super().__init__() + + in_channels = in_channels * downscale_factor**2 + + self.unshuffle = nn.PixelUnshuffle(downscale_factor) + + self.body = nn.ModuleList( + [ + LightAdapterBlock(in_channels, channels[0], num_res_blocks), + *[ + LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True) + for i in range(len(channels) - 1) + ], + LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True), + ] + ) + + self.total_downscale_factor = downscale_factor * (2 ** len(channels)) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each + feature tensor corresponds to a different level of processing within the LightAdapter. + """ + x = self.unshuffle(x) + + features = [] + + for block in self.body: + x = block(x) + features.append(x) + + return features + + +class LightAdapterBlock(nn.Module): + r""" + A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the + `LightAdapter` model. + + Parameters: + in_channels (`int`): + Number of channels of LightAdapterBlock's input. + out_channels (`int`): + Number of channels of LightAdapterBlock's output. + num_res_blocks (`int`): + Number of LightAdapterResnetBlocks in the LightAdapterBlock. + down (`bool`, *optional*, defaults to `False`): + Whether to perform downsampling on LightAdapterBlock's input. + """ + + def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): + super().__init__() + mid_channels = out_channels // 4 + + self.downsample = None + if down: + self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) + + self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1) + self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)]) + self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r""" + This method takes tensor x as input and performs downsampling if required. Then it applies in convolution + layer, a sequence of residual blocks, and out convolutional layer. + """ + if self.downsample is not None: + x = self.downsample(x) + + x = self.in_conv(x) + x = self.resnets(x) + x = self.out_conv(x) + + return x + + +class LightAdapterResnetBlock(nn.Module): + """ + A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different + architecture than `AdapterResnetBlock`. + + Parameters: + channels (`int`): + Number of channels of LightAdapterResnetBlock's input and output. + """ + + def __init__(self, channels: int): + super().__init__() + self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.act = nn.ReLU() + self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r""" + This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and + another convolutional layer and adds it to input tensor. + """ + + h = self.act(self.block1(x)) + h = self.block2(h) + + return h + x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention.py new file mode 100644 index 000000000..7cdd4ce6b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention.py @@ -0,0 +1,681 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict, Optional +import os + +import torch +import torch.nn.functional as F +from torch import nn + +from ..utils import deprecate, logging +from ..utils.torch_utils import maybe_allow_in_graph +from .activations import GEGLU, GELU, ApproximateGELU, IXF_GEGLU +from .attention_processor import Attention +from .embeddings import SinusoidalPositionalEmbedding +from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm + + +logger = logging.get_logger(__name__) + + +def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int): + # "feed_forward_chunk_size" can be used to save memory + if hidden_states.shape[chunk_dim] % chunk_size != 0: + raise ValueError( + f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." + ) + + num_chunks = hidden_states.shape[chunk_dim] // chunk_size + ff_output = torch.cat( + [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], + dim=chunk_dim, + ) + return ff_output + + +@maybe_allow_in_graph +class GatedSelfAttentionDense(nn.Module): + r""" + A gated self-attention dense layer that combines visual features and object features. + + Parameters: + query_dim (`int`): The number of channels in the query. + context_dim (`int`): The number of channels in the context. + n_heads (`int`): The number of heads to use for attention. + d_head (`int`): The number of channels in each head. + """ + + def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): + super().__init__() + + # we need a linear projection since we need cat visual feature and obj feature + self.linear = nn.Linear(context_dim, query_dim) + + self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) + self.ff = FeedForward(query_dim, activation_fn="geglu") + + self.norm1 = nn.LayerNorm(query_dim) + self.norm2 = nn.LayerNorm(query_dim) + + self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) + self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) + + self.enabled = True + + def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: + if not self.enabled: + return x + + n_visual = x.shape[1] + objs = self.linear(objs) + + x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] + x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) + + return x + + +@maybe_allow_in_graph +class BasicTransformerBlock(nn.Module): + r""" + A basic Transformer block. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm (: + obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (: + obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the attention computation to float32. This is useful for mixed precision training. + norm_elementwise_affine (`bool`, *optional*, defaults to `True`): + Whether to use learnable elementwise affine parameters for normalization. + norm_type (`str`, *optional*, defaults to `"layer_norm"`): + The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. + final_dropout (`bool` *optional*, defaults to False): + Whether to apply a final dropout after the last feed-forward layer. + attention_type (`str`, *optional*, defaults to `"default"`): + The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. + positional_embeddings (`str`, *optional*, defaults to `None`): + The type of positional embeddings to apply to. + num_positional_embeddings (`int`, *optional*, defaults to `None`): + The maximum number of positional embeddings to apply. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' + norm_eps: float = 1e-5, + final_dropout: bool = False, + attention_type: str = "default", + positional_embeddings: Optional[str] = None, + num_positional_embeddings: Optional[int] = None, + ada_norm_continous_conditioning_embedding_dim: Optional[int] = None, + ada_norm_bias: Optional[int] = None, + ff_inner_dim: Optional[int] = None, + ff_bias: bool = True, + attention_out_bias: bool = True, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + # We keep these boolean flags for backward-compatibility. + self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + self.use_ada_layer_norm_single = norm_type == "ada_norm_single" + self.use_layer_norm = norm_type == "layer_norm" + self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous" + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + self.norm_type = norm_type + self.num_embeds_ada_norm = num_embeds_ada_norm + + if positional_embeddings and (num_positional_embeddings is None): + raise ValueError( + "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." + ) + + if positional_embeddings == "sinusoidal": + self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) + else: + self.pos_embed = None + + # Define 3 blocks. Each block has its own normalization layer. + # 1. Self-Attn + if norm_type == "ada_norm": + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + elif norm_type == "ada_norm_zero": + self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) + elif norm_type == "ada_norm_continuous": + self.norm1 = AdaLayerNormContinuous( + dim, + ada_norm_continous_conditioning_embedding_dim, + norm_elementwise_affine, + norm_eps, + ada_norm_bias, + "rms_norm", + ) + else: + if int(os.environ.get("USE_APEX_LN", 0)): + from apex.normalization import FusedLayerNorm + self.norm1 = FusedLayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) + + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + out_bias=attention_out_bias, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + if norm_type == "ada_norm": + self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) + elif norm_type == "ada_norm_continuous": + self.norm2 = AdaLayerNormContinuous( + dim, + ada_norm_continous_conditioning_embedding_dim, + norm_elementwise_affine, + norm_eps, + ada_norm_bias, + "rms_norm", + ) + else: + if int(os.environ.get("USE_APEX_LN", 0)): + from apex.normalization import FusedLayerNorm + self.norm2 = FusedLayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) + else: + self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) + + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + out_bias=attention_out_bias, + ) # is self-attn if encoder_hidden_states is none + else: + self.norm2 = None + self.attn2 = None + + # 3. Feed-forward + if norm_type == "ada_norm_continuous": + self.norm3 = AdaLayerNormContinuous( + dim, + ada_norm_continous_conditioning_embedding_dim, + norm_elementwise_affine, + norm_eps, + ada_norm_bias, + "layer_norm", + ) + + elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm", "ada_norm_continuous"]: + if int(os.environ.get("USE_APEX_LN", 0)): + from apex.normalization import FusedLayerNorm + self.norm3 = FusedLayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) + else: + self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) + elif norm_type == "layer_norm_i2vgen": + self.norm3 = None + + self.ff = FeedForward( + dim, + dropout=dropout, + activation_fn=activation_fn, + final_dropout=final_dropout, + inner_dim=ff_inner_dim, + bias=ff_bias, + ) + + # 4. Fuser + if attention_type == "gated" or attention_type == "gated-text-image": + self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) + + # 5. Scale-shift for PixArt-Alpha. + if norm_type == "ada_norm_single": + self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) + + # let chunk size default to None + self._chunk_size = None + self._chunk_dim = 0 + + def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): + # Sets chunk feed-forward + self._chunk_size = chunk_size + self._chunk_dim = dim + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + timestep: Optional[torch.LongTensor] = None, + cross_attention_kwargs: Dict[str, Any] = None, + class_labels: Optional[torch.LongTensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + # Notice that normalization is always applied before the real computation in the following blocks. + # 0. Self-Attention + batch_size = hidden_states.shape[0] + if self.norm_type == "ada_norm": + norm_hidden_states = self.norm1(hidden_states, timestep) + elif self.norm_type == "ada_norm_zero": + norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( + hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype + ) + elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]: + norm_hidden_states = self.norm1(hidden_states) + elif self.norm_type == "ada_norm_continuous": + norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"]) + elif self.norm_type == "ada_norm_single": + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( + self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) + ).chunk(6, dim=1) + norm_hidden_states = self.norm1(hidden_states) + norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa + norm_hidden_states = norm_hidden_states.squeeze(1) + else: + raise ValueError("Incorrect norm used") + + if self.pos_embed is not None: + norm_hidden_states = self.pos_embed(norm_hidden_states) + + # 1. Prepare GLIGEN inputs + cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} + gligen_kwargs = cross_attention_kwargs.pop("gligen", None) + + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + if self.norm_type == "ada_norm_zero": + attn_output = gate_msa.unsqueeze(1) * attn_output + elif self.norm_type == "ada_norm_single": + attn_output = gate_msa * attn_output + + hidden_states = attn_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + # 1.2 GLIGEN Control + if gligen_kwargs is not None: + hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) + + # 3. Cross-Attention + if self.attn2 is not None: + if self.norm_type == "ada_norm": + norm_hidden_states = self.norm2(hidden_states, timestep) + elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]: + norm_hidden_states = self.norm2(hidden_states) + elif self.norm_type == "ada_norm_single": + # For PixArt norm2 isn't applied here: + # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 + norm_hidden_states = hidden_states + elif self.norm_type == "ada_norm_continuous": + norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"]) + else: + raise ValueError("Incorrect norm") + + if self.pos_embed is not None and self.norm_type != "ada_norm_single": + norm_hidden_states = self.pos_embed(norm_hidden_states) + + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + hidden_states = attn_output + hidden_states + + # 4. Feed-forward + # i2vgen doesn't have this norm 🤷‍♂️ + if self.norm_type == "ada_norm_continuous": + norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"]) + elif not self.norm_type == "ada_norm_single": + norm_hidden_states = self.norm3(hidden_states) + + if self.norm_type == "ada_norm_zero": + norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + + if self.norm_type == "ada_norm_single": + norm_hidden_states = self.norm2(hidden_states) + norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp + if self._chunk_size is not None: + # "feed_forward_chunk_size" can be used to save memory + ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) + else: + ff_output = self.ff(norm_hidden_states) + + if self.norm_type == "ada_norm_zero": + ff_output = gate_mlp.unsqueeze(1) * ff_output + elif self.norm_type == "ada_norm_single": + ff_output = gate_mlp * ff_output + + hidden_states = ff_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + return hidden_states + + +@maybe_allow_in_graph +class TemporalBasicTransformerBlock(nn.Module): + r""" + A basic Transformer block for video like data. + + Parameters: + dim (`int`): The number of channels in the input and output. + time_mix_inner_dim (`int`): The number of channels for temporal attention. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + """ + + def __init__( + self, + dim: int, + time_mix_inner_dim: int, + num_attention_heads: int, + attention_head_dim: int, + cross_attention_dim: Optional[int] = None, + ): + super().__init__() + self.is_res = dim == time_mix_inner_dim + + self.norm_in = nn.LayerNorm(dim) + + # Define 3 blocks. Each block has its own normalization layer. + # 1. Self-Attn + self.ff_in = FeedForward( + dim, + dim_out=time_mix_inner_dim, + activation_fn="geglu", + ) + + self.norm1 = nn.LayerNorm(time_mix_inner_dim) + self.attn1 = Attention( + query_dim=time_mix_inner_dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + cross_attention_dim=None, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = nn.LayerNorm(time_mix_inner_dim) + self.attn2 = Attention( + query_dim=time_mix_inner_dim, + cross_attention_dim=cross_attention_dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + ) # is self-attn if encoder_hidden_states is none + else: + self.norm2 = None + self.attn2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(time_mix_inner_dim) + self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") + + # let chunk size default to None + self._chunk_size = None + self._chunk_dim = None + + def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): + # Sets chunk feed-forward + self._chunk_size = chunk_size + # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off + self._chunk_dim = 1 + + def forward( + self, + hidden_states: torch.FloatTensor, + num_frames: int, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + # Notice that normalization is always applied before the real computation in the following blocks. + # 0. Self-Attention + batch_size = hidden_states.shape[0] + + batch_frames, seq_length, channels = hidden_states.shape + batch_size = batch_frames // num_frames + + hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) + hidden_states = hidden_states.permute(0, 2, 1, 3) + hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) + + residual = hidden_states + hidden_states = self.norm_in(hidden_states) + + if self._chunk_size is not None: + hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size) + else: + hidden_states = self.ff_in(hidden_states) + + if self.is_res: + hidden_states = hidden_states + residual + + norm_hidden_states = self.norm1(hidden_states) + attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) + hidden_states = attn_output + hidden_states + + # 3. Cross-Attention + if self.attn2 is not None: + norm_hidden_states = self.norm2(hidden_states) + attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) + hidden_states = attn_output + hidden_states + + # 4. Feed-forward + norm_hidden_states = self.norm3(hidden_states) + + if self._chunk_size is not None: + ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) + else: + ff_output = self.ff(norm_hidden_states) + + if self.is_res: + hidden_states = ff_output + hidden_states + else: + hidden_states = ff_output + + hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) + hidden_states = hidden_states.permute(0, 2, 1, 3) + hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) + + return hidden_states + + +class SkipFFTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + kv_input_dim: int, + kv_input_dim_proj_use_bias: bool, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + attention_out_bias: bool = True, + ): + super().__init__() + if kv_input_dim != dim: + self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias) + else: + self.kv_mapper = None + + self.norm1 = RMSNorm(dim, 1e-06) + + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim, + out_bias=attention_out_bias, + ) + + self.norm2 = RMSNorm(dim, 1e-06) + + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + out_bias=attention_out_bias, + ) + + def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs): + cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} + + if self.kv_mapper is not None: + encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) + + norm_hidden_states = self.norm1(hidden_states) + + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + norm_hidden_states = self.norm2(hidden_states) + + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + return hidden_states + + +class FeedForward(nn.Module): + r""" + A feed-forward layer. + + Parameters: + dim (`int`): The number of channels in the input. + dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. + mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + mult: int = 4, + dropout: float = 0.0, + activation_fn: str = "geglu", + final_dropout: bool = False, + inner_dim=None, + bias: bool = True, + ): + super().__init__() + if inner_dim is None: + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + linear_cls = nn.Linear + + if activation_fn == "gelu": + act_fn = GELU(dim, inner_dim, bias=bias) + if activation_fn == "gelu-approximate": + act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias) + elif activation_fn == "geglu": + import os + if int(os.environ.get("USE_IXFORMER_GEGLU", 0)): + print("==> use IXF_GEGLU") + act_fn = IXF_GEGLU(dim, inner_dim, bias=bias) + else: + act_fn = GEGLU(dim, inner_dim, bias=bias) + elif activation_fn == "geglu-approximate": + act_fn = ApproximateGELU(dim, inner_dim, bias=bias) + + self.net = nn.ModuleList([]) + # project in + self.net.append(act_fn) + # project dropout + self.net.append(nn.Dropout(dropout)) + # project out + self.net.append(linear_cls(inner_dim, dim_out, bias=bias)) + # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout + if final_dropout: + self.net.append(nn.Dropout(dropout)) + + def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + for module in self.net: + hidden_states = module(hidden_states) + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_flax.py new file mode 100644 index 000000000..25ae5d0a5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_flax.py @@ -0,0 +1,494 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import math + +import flax.linen as nn +import jax +import jax.numpy as jnp + + +def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096): + """Multi-head dot product attention with a limited number of queries.""" + num_kv, num_heads, k_features = key.shape[-3:] + v_features = value.shape[-1] + key_chunk_size = min(key_chunk_size, num_kv) + query = query / jnp.sqrt(k_features) + + @functools.partial(jax.checkpoint, prevent_cse=False) + def summarize_chunk(query, key, value): + attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision) + + max_score = jnp.max(attn_weights, axis=-1, keepdims=True) + max_score = jax.lax.stop_gradient(max_score) + exp_weights = jnp.exp(attn_weights - max_score) + + exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision) + max_score = jnp.einsum("...qhk->...qh", max_score) + + return (exp_values, exp_weights.sum(axis=-1), max_score) + + def chunk_scanner(chunk_idx): + # julienne key array + key_chunk = jax.lax.dynamic_slice( + operand=key, + start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d] + slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d] + ) + + # julienne value array + value_chunk = jax.lax.dynamic_slice( + operand=value, + start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d] + slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d] + ) + + return summarize_chunk(query, key_chunk, value_chunk) + + chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size)) + + global_max = jnp.max(chunk_max, axis=0, keepdims=True) + max_diffs = jnp.exp(chunk_max - global_max) + + chunk_values *= jnp.expand_dims(max_diffs, axis=-1) + chunk_weights *= max_diffs + + all_values = chunk_values.sum(axis=0) + all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0) + + return all_values / all_weights + + +def jax_memory_efficient_attention( + query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096 +): + r""" + Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2 + https://github.com/AminRezaei0x443/memory-efficient-attention + + Args: + query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head) + key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head) + value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head) + precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`): + numerical precision for computation + query_chunk_size (`int`, *optional*, defaults to 1024): + chunk size to divide query array value must divide query_length equally without remainder + key_chunk_size (`int`, *optional*, defaults to 4096): + chunk size to divide key and value array value must divide key_value_length equally without remainder + + Returns: + (`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head) + """ + num_q, num_heads, q_features = query.shape[-3:] + + def chunk_scanner(chunk_idx, _): + # julienne query array + query_chunk = jax.lax.dynamic_slice( + operand=query, + start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d] + slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d] + ) + + return ( + chunk_idx + query_chunk_size, # unused ignore it + _query_chunk_attention( + query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size + ), + ) + + _, res = jax.lax.scan( + f=chunk_scanner, + init=0, + xs=None, + length=math.ceil(num_q / query_chunk_size), # start counter # stop counter + ) + + return jnp.concatenate(res, axis=-3) # fuse the chunked result back + + +class FlaxAttention(nn.Module): + r""" + A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762 + + Parameters: + query_dim (:obj:`int`): + Input hidden states dimension + heads (:obj:`int`, *optional*, defaults to 8): + Number of heads + dim_head (:obj:`int`, *optional*, defaults to 64): + Hidden states dimension inside each head + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + + """ + + query_dim: int + heads: int = 8 + dim_head: int = 64 + dropout: float = 0.0 + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + dtype: jnp.dtype = jnp.float32 + + def setup(self): + inner_dim = self.dim_head * self.heads + self.scale = self.dim_head**-0.5 + + # Weights were exported with old names {to_q, to_k, to_v, to_out} + self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q") + self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k") + self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v") + + self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0") + self.dropout_layer = nn.Dropout(rate=self.dropout) + + def reshape_heads_to_batch_dim(self, tensor): + batch_size, seq_len, dim = tensor.shape + head_size = self.heads + tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) + tensor = jnp.transpose(tensor, (0, 2, 1, 3)) + tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) + return tensor + + def reshape_batch_dim_to_heads(self, tensor): + batch_size, seq_len, dim = tensor.shape + head_size = self.heads + tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) + tensor = jnp.transpose(tensor, (0, 2, 1, 3)) + tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size) + return tensor + + def __call__(self, hidden_states, context=None, deterministic=True): + context = hidden_states if context is None else context + + query_proj = self.query(hidden_states) + key_proj = self.key(context) + value_proj = self.value(context) + + if self.split_head_dim: + b = hidden_states.shape[0] + query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head)) + key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head)) + value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head)) + else: + query_states = self.reshape_heads_to_batch_dim(query_proj) + key_states = self.reshape_heads_to_batch_dim(key_proj) + value_states = self.reshape_heads_to_batch_dim(value_proj) + + if self.use_memory_efficient_attention: + query_states = query_states.transpose(1, 0, 2) + key_states = key_states.transpose(1, 0, 2) + value_states = value_states.transpose(1, 0, 2) + + # this if statement create a chunk size for each layer of the unet + # the chunk size is equal to the query_length dimension of the deepest layer of the unet + + flatten_latent_dim = query_states.shape[-3] + if flatten_latent_dim % 64 == 0: + query_chunk_size = int(flatten_latent_dim / 64) + elif flatten_latent_dim % 16 == 0: + query_chunk_size = int(flatten_latent_dim / 16) + elif flatten_latent_dim % 4 == 0: + query_chunk_size = int(flatten_latent_dim / 4) + else: + query_chunk_size = int(flatten_latent_dim) + + hidden_states = jax_memory_efficient_attention( + query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4 + ) + + hidden_states = hidden_states.transpose(1, 0, 2) + else: + # compute attentions + if self.split_head_dim: + attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states) + else: + attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states) + + attention_scores = attention_scores * self.scale + attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2) + + # attend to values + if self.split_head_dim: + hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states) + b = hidden_states.shape[0] + hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head)) + else: + hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states) + hidden_states = self.reshape_batch_dim_to_heads(hidden_states) + + hidden_states = self.proj_attn(hidden_states) + return self.dropout_layer(hidden_states, deterministic=deterministic) + + +class FlaxBasicTransformerBlock(nn.Module): + r""" + A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in: + https://arxiv.org/abs/1706.03762 + + + Parameters: + dim (:obj:`int`): + Inner hidden states dimension + n_heads (:obj:`int`): + Number of heads + d_head (:obj:`int`): + Hidden states dimension inside each head + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + only_cross_attention (`bool`, defaults to `False`): + Whether to only apply cross attention. + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + """ + + dim: int + n_heads: int + d_head: int + dropout: float = 0.0 + only_cross_attention: bool = False + dtype: jnp.dtype = jnp.float32 + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + + def setup(self): + # self attention (or cross_attention if only_cross_attention is True) + self.attn1 = FlaxAttention( + self.dim, + self.n_heads, + self.d_head, + self.dropout, + self.use_memory_efficient_attention, + self.split_head_dim, + dtype=self.dtype, + ) + # cross attention + self.attn2 = FlaxAttention( + self.dim, + self.n_heads, + self.d_head, + self.dropout, + self.use_memory_efficient_attention, + self.split_head_dim, + dtype=self.dtype, + ) + self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype) + self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) + self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) + self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) + self.dropout_layer = nn.Dropout(rate=self.dropout) + + def __call__(self, hidden_states, context, deterministic=True): + # self attention + residual = hidden_states + if self.only_cross_attention: + hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic) + else: + hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic) + hidden_states = hidden_states + residual + + # cross attention + residual = hidden_states + hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic) + hidden_states = hidden_states + residual + + # feed forward + residual = hidden_states + hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic) + hidden_states = hidden_states + residual + + return self.dropout_layer(hidden_states, deterministic=deterministic) + + +class FlaxTransformer2DModel(nn.Module): + r""" + A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in: + https://arxiv.org/pdf/1506.02025.pdf + + + Parameters: + in_channels (:obj:`int`): + Input number of channels + n_heads (:obj:`int`): + Number of heads + d_head (:obj:`int`): + Hidden states dimension inside each head + depth (:obj:`int`, *optional*, defaults to 1): + Number of transformers block + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + use_linear_projection (`bool`, defaults to `False`): tbd + only_cross_attention (`bool`, defaults to `False`): tbd + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + """ + + in_channels: int + n_heads: int + d_head: int + depth: int = 1 + dropout: float = 0.0 + use_linear_projection: bool = False + only_cross_attention: bool = False + dtype: jnp.dtype = jnp.float32 + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + + def setup(self): + self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5) + + inner_dim = self.n_heads * self.d_head + if self.use_linear_projection: + self.proj_in = nn.Dense(inner_dim, dtype=self.dtype) + else: + self.proj_in = nn.Conv( + inner_dim, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + + self.transformer_blocks = [ + FlaxBasicTransformerBlock( + inner_dim, + self.n_heads, + self.d_head, + dropout=self.dropout, + only_cross_attention=self.only_cross_attention, + dtype=self.dtype, + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + ) + for _ in range(self.depth) + ] + + if self.use_linear_projection: + self.proj_out = nn.Dense(inner_dim, dtype=self.dtype) + else: + self.proj_out = nn.Conv( + inner_dim, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + + self.dropout_layer = nn.Dropout(rate=self.dropout) + + def __call__(self, hidden_states, context, deterministic=True): + batch, height, width, channels = hidden_states.shape + residual = hidden_states + hidden_states = self.norm(hidden_states) + if self.use_linear_projection: + hidden_states = hidden_states.reshape(batch, height * width, channels) + hidden_states = self.proj_in(hidden_states) + else: + hidden_states = self.proj_in(hidden_states) + hidden_states = hidden_states.reshape(batch, height * width, channels) + + for transformer_block in self.transformer_blocks: + hidden_states = transformer_block(hidden_states, context, deterministic=deterministic) + + if self.use_linear_projection: + hidden_states = self.proj_out(hidden_states) + hidden_states = hidden_states.reshape(batch, height, width, channels) + else: + hidden_states = hidden_states.reshape(batch, height, width, channels) + hidden_states = self.proj_out(hidden_states) + + hidden_states = hidden_states + residual + return self.dropout_layer(hidden_states, deterministic=deterministic) + + +class FlaxFeedForward(nn.Module): + r""" + Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's + [`FeedForward`] class, with the following simplifications: + - The activation function is currently hardcoded to a gated linear unit from: + https://arxiv.org/abs/2002.05202 + - `dim_out` is equal to `dim`. + - The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`]. + + Parameters: + dim (:obj:`int`): + Inner hidden states dimension + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + dim: int + dropout: float = 0.0 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + # The second linear layer needs to be called + # net_2 for now to match the index of the Sequential layer + self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype) + self.net_2 = nn.Dense(self.dim, dtype=self.dtype) + + def __call__(self, hidden_states, deterministic=True): + hidden_states = self.net_0(hidden_states, deterministic=deterministic) + hidden_states = self.net_2(hidden_states) + return hidden_states + + +class FlaxGEGLU(nn.Module): + r""" + Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from + https://arxiv.org/abs/2002.05202. + + Parameters: + dim (:obj:`int`): + Input hidden states dimension + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + dim: int + dropout: float = 0.0 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + inner_dim = self.dim * 4 + self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype) + self.dropout_layer = nn.Dropout(rate=self.dropout) + + def __call__(self, hidden_states, deterministic=True): + hidden_states = self.proj(hidden_states) + hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2) + return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_processor.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_processor.py new file mode 100644 index 000000000..30baef416 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/attention_processor.py @@ -0,0 +1,2507 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from importlib import import_module +from typing import Callable, Optional, Union +import os + +import torch +import torch.nn.functional as F +from torch import nn + +from ..utils import USE_PEFT_BACKEND, deprecate, logging +from ..utils.import_utils import is_xformers_available +from ..utils.torch_utils import maybe_allow_in_graph +from .lora import LoRACompatibleLinear, LoRALinearLayer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +if is_xformers_available(): + import xformers + import xformers.ops +else: + xformers = None + + +@maybe_allow_in_graph +class Attention(nn.Module): + r""" + A cross attention layer. + + Parameters: + query_dim (`int`): + The number of channels in the query. + cross_attention_dim (`int`, *optional*): + The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. + heads (`int`, *optional*, defaults to 8): + The number of heads to use for multi-head attention. + dim_head (`int`, *optional*, defaults to 64): + The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability to use. + bias (`bool`, *optional*, defaults to False): + Set to `True` for the query, key, and value linear layers to contain a bias parameter. + upcast_attention (`bool`, *optional*, defaults to False): + Set to `True` to upcast the attention computation to `float32`. + upcast_softmax (`bool`, *optional*, defaults to False): + Set to `True` to upcast the softmax computation to `float32`. + cross_attention_norm (`str`, *optional*, defaults to `None`): + The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. + cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use for the group norm in the cross attention. + added_kv_proj_dim (`int`, *optional*, defaults to `None`): + The number of channels to use for the added key and value projections. If `None`, no projection is used. + norm_num_groups (`int`, *optional*, defaults to `None`): + The number of groups to use for the group norm in the attention. + spatial_norm_dim (`int`, *optional*, defaults to `None`): + The number of channels to use for the spatial normalization. + out_bias (`bool`, *optional*, defaults to `True`): + Set to `True` to use a bias in the output linear layer. + scale_qk (`bool`, *optional*, defaults to `True`): + Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. + only_cross_attention (`bool`, *optional*, defaults to `False`): + Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if + `added_kv_proj_dim` is not `None`. + eps (`float`, *optional*, defaults to 1e-5): + An additional value added to the denominator in group normalization that is used for numerical stability. + rescale_output_factor (`float`, *optional*, defaults to 1.0): + A factor to rescale the output by dividing it with this value. + residual_connection (`bool`, *optional*, defaults to `False`): + Set to `True` to add the residual connection to the output. + _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): + Set to `True` if the attention block is loaded from a deprecated state dict. + processor (`AttnProcessor`, *optional*, defaults to `None`): + The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and + `AttnProcessor` otherwise. + """ + + def __init__( + self, + query_dim: int, + cross_attention_dim: Optional[int] = None, + heads: int = 8, + dim_head: int = 64, + dropout: float = 0.0, + bias: bool = False, + upcast_attention: bool = False, + upcast_softmax: bool = False, + cross_attention_norm: Optional[str] = None, + cross_attention_norm_num_groups: int = 32, + added_kv_proj_dim: Optional[int] = None, + norm_num_groups: Optional[int] = None, + spatial_norm_dim: Optional[int] = None, + out_bias: bool = True, + scale_qk: bool = True, + only_cross_attention: bool = False, + eps: float = 1e-5, + rescale_output_factor: float = 1.0, + residual_connection: bool = False, + _from_deprecated_attn_block: bool = False, + processor: Optional["AttnProcessor"] = None, + out_dim: int = None, + ): + super().__init__() + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.query_dim = query_dim + self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim + self.upcast_attention = upcast_attention + self.upcast_softmax = upcast_softmax + self.rescale_output_factor = rescale_output_factor + self.residual_connection = residual_connection + self.dropout = dropout + self.fused_projections = False + self.out_dim = out_dim if out_dim is not None else query_dim + + # we make use of this private variable to know whether this class is loaded + # with an deprecated state dict so that we can convert it on the fly + self._from_deprecated_attn_block = _from_deprecated_attn_block + + self.scale_qk = scale_qk + self.scale = dim_head**-0.5 if self.scale_qk else 1.0 + + self.heads = out_dim // dim_head if out_dim is not None else heads + # for slice_size > 0 the attention score computation + # is split across the batch axis to save memory + # You can set slice_size with `set_attention_slice` + self.sliceable_head_dim = heads + + self.added_kv_proj_dim = added_kv_proj_dim + self.only_cross_attention = only_cross_attention + + if self.added_kv_proj_dim is None and self.only_cross_attention: + raise ValueError( + "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." + ) + + if norm_num_groups is not None: + self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True) + else: + self.group_norm = None + + if spatial_norm_dim is not None: + self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim) + else: + self.spatial_norm = None + + if cross_attention_norm is None: + self.norm_cross = None + elif cross_attention_norm == "layer_norm": + self.norm_cross = nn.LayerNorm(self.cross_attention_dim) + elif cross_attention_norm == "group_norm": + if self.added_kv_proj_dim is not None: + # The given `encoder_hidden_states` are initially of shape + # (batch_size, seq_len, added_kv_proj_dim) before being projected + # to (batch_size, seq_len, cross_attention_dim). The norm is applied + # before the projection, so we need to use `added_kv_proj_dim` as + # the number of channels for the group norm. + norm_cross_num_channels = added_kv_proj_dim + else: + norm_cross_num_channels = self.cross_attention_dim + + self.norm_cross = nn.GroupNorm( + num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True + ) + else: + raise ValueError( + f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" + ) + + if USE_PEFT_BACKEND: + linear_cls = nn.Linear + else: + linear_cls = LoRACompatibleLinear + + self.linear_cls = linear_cls + self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias) + + if not self.only_cross_attention: + # only relevant for the `AddedKVProcessor` classes + self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) + self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) + else: + self.to_k = None + self.to_v = None + + if self.added_kv_proj_dim is not None: + self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim) + self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim) + + self.to_out = nn.ModuleList([]) + self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias)) + self.to_out.append(nn.Dropout(dropout)) + + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 + if processor is None: + # processor = (AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor()) + # processor = (AttnProcessor()) + processor = (AttnProcessor()) if int(os.environ.get("USE_NATIVE_ATTN", 0)) else (AttnProcessor3_0()) + + self.set_processor(processor) + + def set_use_memory_efficient_attention_xformers( + self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None + ) -> None: + r""" + Set whether to use memory efficient attention from `xformers` or not. + + Args: + use_memory_efficient_attention_xformers (`bool`): + Whether to use memory efficient attention from `xformers` or not. + attention_op (`Callable`, *optional*): + The attention operation to use. Defaults to `None` which uses the default attention operation from + `xformers`. + """ + is_lora = hasattr(self, "processor") and isinstance( + self.processor, + LORA_ATTENTION_PROCESSORS, + ) + is_custom_diffusion = hasattr(self, "processor") and isinstance( + self.processor, + (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0), + ) + is_added_kv_processor = hasattr(self, "processor") and isinstance( + self.processor, + ( + AttnAddedKVProcessor, + AttnAddedKVProcessor2_0, + SlicedAttnAddedKVProcessor, + XFormersAttnAddedKVProcessor, + LoRAAttnAddedKVProcessor, + ), + ) + + if use_memory_efficient_attention_xformers: + if is_added_kv_processor and (is_lora or is_custom_diffusion): + raise NotImplementedError( + f"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}" + ) + if not is_xformers_available(): + raise ModuleNotFoundError( + ( + "Refer to https://github.com/facebookresearch/xformers for more information on how to install" + " xformers" + ), + name="xformers", + ) + elif not torch.cuda.is_available(): + raise ValueError( + "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" + " only available for GPU " + ) + else: + try: + # Make sure we can run the memory efficient attention + _ = xformers.ops.memory_efficient_attention( + torch.randn((1, 2, 40), device="cuda"), + torch.randn((1, 2, 40), device="cuda"), + torch.randn((1, 2, 40), device="cuda"), + ) + except Exception as e: + raise e + + if is_lora: + # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers + # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0? + processor = LoRAXFormersAttnProcessor( + hidden_size=self.processor.hidden_size, + cross_attention_dim=self.processor.cross_attention_dim, + rank=self.processor.rank, + attention_op=attention_op, + ) + processor.load_state_dict(self.processor.state_dict()) + processor.to(self.processor.to_q_lora.up.weight.device) + elif is_custom_diffusion: + processor = CustomDiffusionXFormersAttnProcessor( + train_kv=self.processor.train_kv, + train_q_out=self.processor.train_q_out, + hidden_size=self.processor.hidden_size, + cross_attention_dim=self.processor.cross_attention_dim, + attention_op=attention_op, + ) + processor.load_state_dict(self.processor.state_dict()) + if hasattr(self.processor, "to_k_custom_diffusion"): + processor.to(self.processor.to_k_custom_diffusion.weight.device) + elif is_added_kv_processor: + # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP + # which uses this type of cross attention ONLY because the attention mask of format + # [0, ..., -10.000, ..., 0, ...,] is not supported + # throw warning + logger.info( + "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation." + ) + processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) + else: + processor = XFormersAttnProcessor(attention_op=attention_op) + else: + if is_lora: + attn_processor_class = ( + LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor + ) + processor = attn_processor_class( + hidden_size=self.processor.hidden_size, + cross_attention_dim=self.processor.cross_attention_dim, + rank=self.processor.rank, + ) + processor.load_state_dict(self.processor.state_dict()) + processor.to(self.processor.to_q_lora.up.weight.device) + elif is_custom_diffusion: + attn_processor_class = ( + CustomDiffusionAttnProcessor2_0 + if hasattr(F, "scaled_dot_product_attention") + else CustomDiffusionAttnProcessor + ) + processor = attn_processor_class( + train_kv=self.processor.train_kv, + train_q_out=self.processor.train_q_out, + hidden_size=self.processor.hidden_size, + cross_attention_dim=self.processor.cross_attention_dim, + ) + processor.load_state_dict(self.processor.state_dict()) + if hasattr(self.processor, "to_k_custom_diffusion"): + processor.to(self.processor.to_k_custom_diffusion.weight.device) + else: + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 + processor = ( + AttnProcessor2_0() + if hasattr(F, "scaled_dot_product_attention") and self.scale_qk + else AttnProcessor() + ) + + self.set_processor(processor) + + def set_attention_slice(self, slice_size: int) -> None: + r""" + Set the slice size for attention computation. + + Args: + slice_size (`int`): + The slice size for attention computation. + """ + if slice_size is not None and slice_size > self.sliceable_head_dim: + raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") + + if slice_size is not None and self.added_kv_proj_dim is not None: + processor = SlicedAttnAddedKVProcessor(slice_size) + elif slice_size is not None: + processor = SlicedAttnProcessor(slice_size) + elif self.added_kv_proj_dim is not None: + processor = AttnAddedKVProcessor() + else: + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 + processor = (AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor()) + + self.set_processor(processor) + + def set_processor(self, processor: "AttnProcessor") -> None: + r""" + Set the attention processor to use. + + Args: + processor (`AttnProcessor`): + The attention processor to use. + """ + # if current processor is in `self._modules` and if passed `processor` is not, we need to + # pop `processor` from `self._modules` + if ( + hasattr(self, "processor") + and isinstance(self.processor, torch.nn.Module) + and not isinstance(processor, torch.nn.Module) + ): + logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") + self._modules.pop("processor") + + self.processor = processor + + def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": + r""" + Get the attention processor in use. + + Args: + return_deprecated_lora (`bool`, *optional*, defaults to `False`): + Set to `True` to return the deprecated LoRA attention processor. + + Returns: + "AttentionProcessor": The attention processor in use. + """ + if not return_deprecated_lora: + return self.processor + + # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible + # serialization format for LoRA Attention Processors. It should be deleted once the integration + # with PEFT is completed. + is_lora_activated = { + name: module.lora_layer is not None + for name, module in self.named_modules() + if hasattr(module, "lora_layer") + } + + # 1. if no layer has a LoRA activated we can return the processor as usual + if not any(is_lora_activated.values()): + return self.processor + + # If doesn't apply LoRA do `add_k_proj` or `add_v_proj` + is_lora_activated.pop("add_k_proj", None) + is_lora_activated.pop("add_v_proj", None) + # 2. else it is not posssible that only some layers have LoRA activated + if not all(is_lora_activated.values()): + raise ValueError( + f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}" + ) + + # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor + non_lora_processor_cls_name = self.processor.__class__.__name__ + lora_processor_cls = getattr(import_module(__name__), "LoRA" + non_lora_processor_cls_name) + + hidden_size = self.inner_dim + + # now create a LoRA attention processor from the LoRA layers + if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]: + kwargs = { + "cross_attention_dim": self.cross_attention_dim, + "rank": self.to_q.lora_layer.rank, + "network_alpha": self.to_q.lora_layer.network_alpha, + "q_rank": self.to_q.lora_layer.rank, + "q_hidden_size": self.to_q.lora_layer.out_features, + "k_rank": self.to_k.lora_layer.rank, + "k_hidden_size": self.to_k.lora_layer.out_features, + "v_rank": self.to_v.lora_layer.rank, + "v_hidden_size": self.to_v.lora_layer.out_features, + "out_rank": self.to_out[0].lora_layer.rank, + "out_hidden_size": self.to_out[0].lora_layer.out_features, + } + + if hasattr(self.processor, "attention_op"): + kwargs["attention_op"] = self.processor.attention_op + + lora_processor = lora_processor_cls(hidden_size, **kwargs) + lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) + lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) + lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) + lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) + elif lora_processor_cls == LoRAAttnAddedKVProcessor: + lora_processor = lora_processor_cls( + hidden_size, + cross_attention_dim=self.add_k_proj.weight.shape[0], + rank=self.to_q.lora_layer.rank, + network_alpha=self.to_q.lora_layer.network_alpha, + ) + lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) + lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) + lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) + lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) + + # only save if used + if self.add_k_proj.lora_layer is not None: + lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict()) + lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict()) + else: + lora_processor.add_k_proj_lora = None + lora_processor.add_v_proj_lora = None + else: + raise ValueError(f"{lora_processor_cls} does not exist.") + + return lora_processor + + def forward( + self, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + **cross_attention_kwargs, + ) -> torch.Tensor: + r""" + The forward method of the `Attention` class. + + Args: + hidden_states (`torch.Tensor`): + The hidden states of the query. + encoder_hidden_states (`torch.Tensor`, *optional*): + The hidden states of the encoder. + attention_mask (`torch.Tensor`, *optional*): + The attention mask to use. If `None`, no mask is applied. + **cross_attention_kwargs: + Additional keyword arguments to pass along to the cross attention. + + Returns: + `torch.Tensor`: The output of the attention layer. + """ + # The `Attention` class can call different attention processors / attention functions + # here we simply pass along all tensors to the selected processor class + # For standard processors that are defined here, `**cross_attention_kwargs` is empty + + out = self.processor( + self, + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + return out + + + + def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: + r""" + Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` + is the number of heads initialized while constructing the `Attention` class. + + Args: + tensor (`torch.Tensor`): The tensor to reshape. + + Returns: + `torch.Tensor`: The reshaped tensor. + """ + head_size = self.heads + batch_size, seq_len, dim = tensor.shape + tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) + tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) + return tensor + + def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: + r""" + Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is + the number of heads initialized while constructing the `Attention` class. + + Args: + tensor (`torch.Tensor`): The tensor to reshape. + out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is + reshaped to `[batch_size * heads, seq_len, dim // heads]`. + + Returns: + `torch.Tensor`: The reshaped tensor. + """ + head_size = self.heads + batch_size, seq_len, dim = tensor.shape + tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) + tensor = tensor.permute(0, 2, 1, 3) + + if out_dim == 3: + tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) + + return tensor + + def get_attention_scores( + self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None + ) -> torch.Tensor: + r""" + Compute the attention scores. + + Args: + query (`torch.Tensor`): The query tensor. + key (`torch.Tensor`): The key tensor. + attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. + + Returns: + `torch.Tensor`: The attention probabilities/scores. + """ + dtype = query.dtype + if self.upcast_attention: + query = query.float() + key = key.float() + + if attention_mask is None: + baddbmm_input = torch.empty( + query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device + ) + beta = 0 + else: + baddbmm_input = attention_mask + beta = 1 + + attention_scores = torch.baddbmm( + baddbmm_input, + query, + key.transpose(-1, -2), + beta=beta, + alpha=self.scale, + ) + del baddbmm_input + + if self.upcast_softmax: + attention_scores = attention_scores.float() + + attention_probs = attention_scores.softmax(dim=-1) + del attention_scores + + attention_probs = attention_probs.to(dtype) + + return attention_probs + + def prepare_attention_mask( + self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 + ) -> torch.Tensor: + r""" + Prepare the attention mask for the attention computation. + + Args: + attention_mask (`torch.Tensor`): + The attention mask to prepare. + target_length (`int`): + The target length of the attention mask. This is the length of the attention mask after padding. + batch_size (`int`): + The batch size, which is used to repeat the attention mask. + out_dim (`int`, *optional*, defaults to `3`): + The output dimension of the attention mask. Can be either `3` or `4`. + + Returns: + `torch.Tensor`: The prepared attention mask. + """ + head_size = self.heads + if attention_mask is None: + return attention_mask + + current_length: int = attention_mask.shape[-1] + if current_length != target_length: + if attention_mask.device.type == "mps": + # HACK: MPS: Does not support padding by greater than dimension of input tensor. + # Instead, we can manually construct the padding tensor. + padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) + padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) + attention_mask = torch.cat([attention_mask, padding], dim=2) + else: + # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: + # we want to instead pad by (0, remaining_length), where remaining_length is: + # remaining_length: int = target_length - current_length + # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding + attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) + + if out_dim == 3: + if attention_mask.shape[0] < batch_size * head_size: + attention_mask = attention_mask.repeat_interleave(head_size, dim=0) + elif out_dim == 4: + attention_mask = attention_mask.unsqueeze(1) + attention_mask = attention_mask.repeat_interleave(head_size, dim=1) + + return attention_mask + + def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: + r""" + Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the + `Attention` class. + + Args: + encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. + + Returns: + `torch.Tensor`: The normalized encoder hidden states. + """ + assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" + + if isinstance(self.norm_cross, nn.LayerNorm): + encoder_hidden_states = self.norm_cross(encoder_hidden_states) + elif isinstance(self.norm_cross, nn.GroupNorm): + # Group norm norms along the channels dimension and expects + # input to be in the shape of (N, C, *). In this case, we want + # to norm along the hidden dimension, so we need to move + # (batch_size, sequence_length, hidden_size) -> + # (batch_size, hidden_size, sequence_length) + encoder_hidden_states = encoder_hidden_states.transpose(1, 2) + encoder_hidden_states = self.norm_cross(encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states.transpose(1, 2) + else: + assert False + + return encoder_hidden_states + + @torch.no_grad() + def fuse_projections(self, fuse=True): + is_cross_attention = self.cross_attention_dim != self.query_dim + device = self.to_q.weight.data.device + dtype = self.to_q.weight.data.dtype + + if not is_cross_attention: + # fetch weight matrices. + concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) + in_features = concatenated_weights.shape[1] + out_features = concatenated_weights.shape[0] + + # create a new single projection layer and copy over the weights. + self.to_qkv = self.linear_cls(in_features, out_features, bias=False, device=device, dtype=dtype) + self.to_qkv.weight.copy_(concatenated_weights) + + else: + concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) + in_features = concatenated_weights.shape[1] + out_features = concatenated_weights.shape[0] + + self.to_kv = self.linear_cls(in_features, out_features, bias=False, device=device, dtype=dtype) + self.to_kv.weight.copy_(concatenated_weights) + + self.fused_projections = fuse + + +class AttnProcessor: + r""" + Default processor for performing attention-related computations. + """ + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.Tensor: + residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states, *args) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states, *args) + value = attn.to_v(encoder_hidden_states, *args) + # # ============ + # from flash_attn import flash_attn_func + # inner_dim = key.shape[-1] + # head_dim = inner_dim // attn.heads + # query_bak = query.view(batch_size, -1, attn.heads, head_dim) + # key_bak = key.view(batch_size, -1, attn.heads, head_dim) + # value_bak = value.view(batch_size, -1, attn.heads, head_dim) + # hidden_states_bak = flash_attn_func(query_bak, key_bak, value_bak, dropout_p=0.0, causal=False, imp_mode=1) + # hidden_states_bak = hidden_states_bak.reshape(batch_size, -1, attn.heads * head_dim) + # #================ + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + # print("=====") + # print("[before] q.data_prt=", query.data_ptr(), "|| q.shape=", query.shape) + # print("[before] k.data_prt=", key.data_ptr(), "|| q.shape=", key.shape) + # print("[before] v.data_prt=", value.data_ptr(), "|| q.shape=", value.shape) + # print("+++++") + + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class CustomDiffusionAttnProcessor(nn.Module): + r""" + Processor for implementing attention for the Custom Diffusion method. + + Args: + train_kv (`bool`, defaults to `True`): + Whether to newly train the key and value matrices corresponding to the text features. + train_q_out (`bool`, defaults to `True`): + Whether to newly train query matrices corresponding to the latent image features. + hidden_size (`int`, *optional*, defaults to `None`): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*, defaults to `None`): + The number of channels in the `encoder_hidden_states`. + out_bias (`bool`, defaults to `True`): + Whether to include the bias parameter in `train_q_out`. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability to use. + """ + + def __init__( + self, + train_kv: bool = True, + train_q_out: bool = True, + hidden_size: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + out_bias: bool = True, + dropout: float = 0.0, + ): + super().__init__() + self.train_kv = train_kv + self.train_q_out = train_q_out + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + # `_custom_diffusion` id for easy serialization and loading. + if self.train_kv: + self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + if self.train_q_out: + self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_out_custom_diffusion = nn.ModuleList([]) + self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) + self.to_out_custom_diffusion.append(nn.Dropout(dropout)) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + if self.train_q_out: + query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) + else: + query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) + + if encoder_hidden_states is None: + crossattn = False + encoder_hidden_states = hidden_states + else: + crossattn = True + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + if self.train_kv: + key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) + value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) + key = key.to(attn.to_q.weight.dtype) + value = value.to(attn.to_q.weight.dtype) + else: + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + if crossattn: + detach = torch.ones_like(key) + detach[:, :1, :] = detach[:, :1, :] * 0.0 + key = detach * key + (1 - detach) * key.detach() + value = detach * value + (1 - detach) * value.detach() + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + if self.train_q_out: + # linear proj + hidden_states = self.to_out_custom_diffusion[0](hidden_states) + # dropout + hidden_states = self.to_out_custom_diffusion[1](hidden_states) + else: + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class AttnAddedKVProcessor: + r""" + Processor for performing attention-related computations with extra learnable key and value matrices for the text + encoder. + """ + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.Tensor: + residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) + batch_size, sequence_length, _ = hidden_states.shape + + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states, *args) + query = attn.head_to_batch_dim(query) + + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states, *args) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states, *args) + encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) + encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) + + if not attn.only_cross_attention: + key = attn.to_k(hidden_states, *args) + value = attn.to_v(hidden_states, *args) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) + else: + key = encoder_hidden_states_key_proj + value = encoder_hidden_states_value_proj + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) + hidden_states = hidden_states + residual + + return hidden_states + + +class AttnAddedKVProcessor2_0: + r""" + Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra + learnable key and value matrices for the text encoder. + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.Tensor: + residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) + batch_size, sequence_length, _ = hidden_states.shape + + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states, *args) + query = attn.head_to_batch_dim(query, out_dim=4) + + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4) + encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) + + if not attn.only_cross_attention: + key = attn.to_k(hidden_states, *args) + value = attn.to_v(hidden_states, *args) + key = attn.head_to_batch_dim(key, out_dim=4) + value = attn.head_to_batch_dim(value, out_dim=4) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) + else: + key = encoder_hidden_states_key_proj + value = encoder_hidden_states_value_proj + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) + hidden_states = hidden_states + residual + + return hidden_states + + +class XFormersAttnAddedKVProcessor: + r""" + Processor for implementing memory efficient attention using xFormers. + + Args: + attention_op (`Callable`, *optional*, defaults to `None`): + The base + [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to + use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best + operator. + """ + + def __init__(self, attention_op: Optional[Callable] = None): + self.attention_op = attention_op + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) + batch_size, sequence_length, _ = hidden_states.shape + + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + query = attn.head_to_batch_dim(query) + + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) + encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) + + if not attn.only_cross_attention: + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) + else: + key = encoder_hidden_states_key_proj + value = encoder_hidden_states_value_proj + + hidden_states = xformers.ops.memory_efficient_attention( + query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale + ) + hidden_states = hidden_states.to(query.dtype) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) + hidden_states = hidden_states + residual + + return hidden_states + + +class XFormersAttnProcessor: + r""" + Processor for implementing memory efficient attention using xFormers. + + Args: + attention_op (`Callable`, *optional*, defaults to `None`): + The base + [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to + use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best + operator. + """ + + def __init__(self, attention_op: Optional[Callable] = None): + self.attention_op = attention_op + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.FloatTensor: + residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, key_tokens, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size) + if attention_mask is not None: + # expand our mask's singleton query_tokens dimension: + # [batch*heads, 1, key_tokens] -> + # [batch*heads, query_tokens, key_tokens] + # so that it can be added as a bias onto the attention scores that xformers computes: + # [batch*heads, query_tokens, key_tokens] + # we do this explicitly because xformers doesn't broadcast the singleton dimension for us. + _, query_tokens, _ = hidden_states.shape + attention_mask = attention_mask.expand(-1, query_tokens, -1) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states, *args) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states, *args) + value = attn.to_v(encoder_hidden_states, *args) + + query = attn.head_to_batch_dim(query).contiguous() + key = attn.head_to_batch_dim(key).contiguous() + value = attn.head_to_batch_dim(value).contiguous() + + hidden_states = xformers.ops.memory_efficient_attention( + query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale + ) + hidden_states = hidden_states.to(query.dtype) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class AttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.FloatTensor: + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + args = () if USE_PEFT_BACKEND else (scale,) + query = attn.to_q(hidden_states, *args) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states, *args) + value = attn.to_v(encoder_hidden_states, *args) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + # print(query.shape, key.shape, value.shape,) + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + +class AttnProcessor3_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.FloatTensor: + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + args = () if USE_PEFT_BACKEND else (scale,) + query = attn.to_q(hidden_states, *args) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states, *args) + value = attn.to_v(encoder_hidden_states, *args) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + # query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + # key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + # value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # # the output of sdp = (batch, num_heads, seq_len, head_dim) + # # TODO: add support for attn.scale when we move to Torch 2.1 + # # print(query.shape, key.shape, value.shape,) + # hidden_states = F.scaled_dot_product_attention( + # query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + # ) + # hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + + + query = query.view(batch_size, -1, attn.heads, head_dim).contiguous() + key = key.view(batch_size, -1, attn.heads, head_dim).contiguous() + value = value.view(batch_size, -1, attn.heads, head_dim).contiguous() + + #print("query.shape, key.shape, value.shape: ", query.shape, key.shape, value.shape) + #print("query.stride, key.stride, value.stride: ", query.stride(), key.stride(), value.stride()) + #print("attention_mask is: ", attention_mask) + from flash_attn import flash_attn_func + + # torch.cuda.synchronize() + #expand_q = torch.zeros(1, *query.shape[1:]).to(dtype=query.dtype, device=query.device) + #expand_kv = torch.zeros(1, *key.shape[1:]).to(dtype=key.dtype, device=key.device) + #query = torch.cat((query, expand_q), dim=0) + #key = torch.cat((key, expand_kv), dim=0) + #value = torch.cat((value, expand_kv), dim=0) + + hidden_states = flash_attn_func(query, key, value, dropout_p=0.0, causal=False, imp_mode=1) + + #hidden_states = hidden_states[:-1] + # torch.cuda.synchronize() + hidden_states = hidden_states.reshape(batch_size, -1, attn.heads * head_dim) + + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + + return hidden_states + + +class FusedAttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + It uses fused projection layers. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is currently 🧪 experimental in nature and can change in future. + + + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + "FusedAttnProcessor2_0 requires at least PyTorch 2.0, to use it. Please upgrade PyTorch to > 2.0." + ) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + scale: float = 1.0, + ) -> torch.FloatTensor: + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + args = () if USE_PEFT_BACKEND else (scale,) + if encoder_hidden_states is None: + qkv = attn.to_qkv(hidden_states, *args) + split_size = qkv.shape[-1] // 3 + query, key, value = torch.split(qkv, split_size, dim=-1) + else: + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + query = attn.to_q(hidden_states, *args) + + kv = attn.to_kv(encoder_hidden_states, *args) + split_size = kv.shape[-1] // 2 + key, value = torch.split(kv, split_size, dim=-1) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states, *args) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class CustomDiffusionXFormersAttnProcessor(nn.Module): + r""" + Processor for implementing memory efficient attention using xFormers for the Custom Diffusion method. + + Args: + train_kv (`bool`, defaults to `True`): + Whether to newly train the key and value matrices corresponding to the text features. + train_q_out (`bool`, defaults to `True`): + Whether to newly train query matrices corresponding to the latent image features. + hidden_size (`int`, *optional*, defaults to `None`): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*, defaults to `None`): + The number of channels in the `encoder_hidden_states`. + out_bias (`bool`, defaults to `True`): + Whether to include the bias parameter in `train_q_out`. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability to use. + attention_op (`Callable`, *optional*, defaults to `None`): + The base + [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use + as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. + """ + + def __init__( + self, + train_kv: bool = True, + train_q_out: bool = False, + hidden_size: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + out_bias: bool = True, + dropout: float = 0.0, + attention_op: Optional[Callable] = None, + ): + super().__init__() + self.train_kv = train_kv + self.train_q_out = train_q_out + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.attention_op = attention_op + + # `_custom_diffusion` id for easy serialization and loading. + if self.train_kv: + self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + if self.train_q_out: + self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_out_custom_diffusion = nn.ModuleList([]) + self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) + self.to_out_custom_diffusion.append(nn.Dropout(dropout)) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if self.train_q_out: + query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) + else: + query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) + + if encoder_hidden_states is None: + crossattn = False + encoder_hidden_states = hidden_states + else: + crossattn = True + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + if self.train_kv: + key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) + value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) + key = key.to(attn.to_q.weight.dtype) + value = value.to(attn.to_q.weight.dtype) + else: + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + if crossattn: + detach = torch.ones_like(key) + detach[:, :1, :] = detach[:, :1, :] * 0.0 + key = detach * key + (1 - detach) * key.detach() + value = detach * value + (1 - detach) * value.detach() + + query = attn.head_to_batch_dim(query).contiguous() + key = attn.head_to_batch_dim(key).contiguous() + value = attn.head_to_batch_dim(value).contiguous() + + hidden_states = xformers.ops.memory_efficient_attention( + query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale + ) + hidden_states = hidden_states.to(query.dtype) + hidden_states = attn.batch_to_head_dim(hidden_states) + + if self.train_q_out: + # linear proj + hidden_states = self.to_out_custom_diffusion[0](hidden_states) + # dropout + hidden_states = self.to_out_custom_diffusion[1](hidden_states) + else: + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class CustomDiffusionAttnProcessor2_0(nn.Module): + r""" + Processor for implementing attention for the Custom Diffusion method using PyTorch 2.0’s memory-efficient scaled + dot-product attention. + + Args: + train_kv (`bool`, defaults to `True`): + Whether to newly train the key and value matrices corresponding to the text features. + train_q_out (`bool`, defaults to `True`): + Whether to newly train query matrices corresponding to the latent image features. + hidden_size (`int`, *optional*, defaults to `None`): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*, defaults to `None`): + The number of channels in the `encoder_hidden_states`. + out_bias (`bool`, defaults to `True`): + Whether to include the bias parameter in `train_q_out`. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability to use. + """ + + def __init__( + self, + train_kv: bool = True, + train_q_out: bool = True, + hidden_size: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + out_bias: bool = True, + dropout: float = 0.0, + ): + super().__init__() + self.train_kv = train_kv + self.train_q_out = train_q_out + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + + # `_custom_diffusion` id for easy serialization and loading. + if self.train_kv: + self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + if self.train_q_out: + self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) + self.to_out_custom_diffusion = nn.ModuleList([]) + self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) + self.to_out_custom_diffusion.append(nn.Dropout(dropout)) + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + if self.train_q_out: + query = self.to_q_custom_diffusion(hidden_states) + else: + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + crossattn = False + encoder_hidden_states = hidden_states + else: + crossattn = True + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + if self.train_kv: + key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) + value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) + key = key.to(attn.to_q.weight.dtype) + value = value.to(attn.to_q.weight.dtype) + + else: + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + if crossattn: + detach = torch.ones_like(key) + detach[:, :1, :] = detach[:, :1, :] * 0.0 + key = detach * key + (1 - detach) * key.detach() + value = detach * value + (1 - detach) * value.detach() + + inner_dim = hidden_states.shape[-1] + + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + if self.train_q_out: + # linear proj + hidden_states = self.to_out_custom_diffusion[0](hidden_states) + # dropout + hidden_states = self.to_out_custom_diffusion[1](hidden_states) + else: + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class SlicedAttnProcessor: + r""" + Processor for implementing sliced attention. + + Args: + slice_size (`int`, *optional*): + The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and + `attention_head_dim` must be a multiple of the `slice_size`. + """ + + def __init__(self, slice_size: int): + self.slice_size = slice_size + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + residual = hidden_states + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + dim = query.shape[-1] + query = attn.head_to_batch_dim(query) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + batch_size_attention, query_tokens, _ = query.shape + hidden_states = torch.zeros( + (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype + ) + + for i in range(batch_size_attention // self.slice_size): + start_idx = i * self.slice_size + end_idx = (i + 1) * self.slice_size + + query_slice = query[start_idx:end_idx] + key_slice = key[start_idx:end_idx] + attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None + + attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) + + attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) + + hidden_states[start_idx:end_idx] = attn_slice + + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class SlicedAttnAddedKVProcessor: + r""" + Processor for implementing sliced attention with extra learnable key and value matrices for the text encoder. + + Args: + slice_size (`int`, *optional*): + The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and + `attention_head_dim` must be a multiple of the `slice_size`. + """ + + def __init__(self, slice_size): + self.slice_size = slice_size + + def __call__( + self, + attn: "Attention", + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) + + batch_size, sequence_length, _ = hidden_states.shape + + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + dim = query.shape[-1] + query = attn.head_to_batch_dim(query) + + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + + encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) + encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) + + if not attn.only_cross_attention: + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) + value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) + else: + key = encoder_hidden_states_key_proj + value = encoder_hidden_states_value_proj + + batch_size_attention, query_tokens, _ = query.shape + hidden_states = torch.zeros( + (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype + ) + + for i in range(batch_size_attention // self.slice_size): + start_idx = i * self.slice_size + end_idx = (i + 1) * self.slice_size + + query_slice = query[start_idx:end_idx] + key_slice = key[start_idx:end_idx] + attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None + + attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) + + attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) + + hidden_states[start_idx:end_idx] = attn_slice + + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) + hidden_states = hidden_states + residual + + return hidden_states + + +class SpatialNorm(nn.Module): + """ + Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. + + Args: + f_channels (`int`): + The number of channels for input to group normalization layer, and output of the spatial norm layer. + zq_channels (`int`): + The number of channels for the quantized vector as described in the paper. + """ + + def __init__( + self, + f_channels: int, + zq_channels: int, + ): + super().__init__() + self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True) + self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) + self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, f: torch.FloatTensor, zq: torch.FloatTensor) -> torch.FloatTensor: + f_size = f.shape[-2:] + zq = F.interpolate(zq, size=f_size, mode="nearest") + norm_f = self.norm_layer(f) + new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) + return new_f + + +## Deprecated +class LoRAAttnProcessor(nn.Module): + r""" + Processor for implementing the LoRA attention mechanism. + + Args: + hidden_size (`int`, *optional*): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*): + The number of channels in the `encoder_hidden_states`. + rank (`int`, defaults to 4): + The dimension of the LoRA update matrices. + network_alpha (`int`, *optional*): + Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. + kwargs (`dict`): + Additional keyword arguments to pass to the `LoRALinearLayer` layers. + """ + + def __init__( + self, + hidden_size: int, + cross_attention_dim: Optional[int] = None, + rank: int = 4, + network_alpha: Optional[int] = None, + **kwargs, + ): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.rank = rank + + q_rank = kwargs.pop("q_rank", None) + q_hidden_size = kwargs.pop("q_hidden_size", None) + q_rank = q_rank if q_rank is not None else rank + q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size + + v_rank = kwargs.pop("v_rank", None) + v_hidden_size = kwargs.pop("v_hidden_size", None) + v_rank = v_rank if v_rank is not None else rank + v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size + + out_rank = kwargs.pop("out_rank", None) + out_hidden_size = kwargs.pop("out_hidden_size", None) + out_rank = out_rank if out_rank is not None else rank + out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size + + self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) + self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) + self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) + self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) + + def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + self_cls_name = self.__class__.__name__ + deprecate( + self_cls_name, + "0.26.0", + ( + f"Make sure use {self_cls_name[4:]} instead by setting" + "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" + " `LoraLoaderMixin.load_lora_weights`" + ), + ) + attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) + attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) + attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) + attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) + + attn._modules.pop("processor") + attn.processor = AttnProcessor() + return attn.processor(attn, hidden_states, *args, **kwargs) + + +class LoRAAttnProcessor2_0(nn.Module): + r""" + Processor for implementing the LoRA attention mechanism using PyTorch 2.0's memory-efficient scaled dot-product + attention. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*): + The number of channels in the `encoder_hidden_states`. + rank (`int`, defaults to 4): + The dimension of the LoRA update matrices. + network_alpha (`int`, *optional*): + Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. + kwargs (`dict`): + Additional keyword arguments to pass to the `LoRALinearLayer` layers. + """ + + def __init__( + self, + hidden_size: int, + cross_attention_dim: Optional[int] = None, + rank: int = 4, + network_alpha: Optional[int] = None, + **kwargs, + ): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.rank = rank + + q_rank = kwargs.pop("q_rank", None) + q_hidden_size = kwargs.pop("q_hidden_size", None) + q_rank = q_rank if q_rank is not None else rank + q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size + + v_rank = kwargs.pop("v_rank", None) + v_hidden_size = kwargs.pop("v_hidden_size", None) + v_rank = v_rank if v_rank is not None else rank + v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size + + out_rank = kwargs.pop("out_rank", None) + out_hidden_size = kwargs.pop("out_hidden_size", None) + out_rank = out_rank if out_rank is not None else rank + out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size + + self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) + self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) + self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) + self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) + + def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + self_cls_name = self.__class__.__name__ + deprecate( + self_cls_name, + "0.26.0", + ( + f"Make sure use {self_cls_name[4:]} instead by setting" + "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" + " `LoraLoaderMixin.load_lora_weights`" + ), + ) + attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) + attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) + attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) + attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) + + attn._modules.pop("processor") + attn.processor = AttnProcessor2_0() + return attn.processor(attn, hidden_states, *args, **kwargs) + + +class LoRAXFormersAttnProcessor(nn.Module): + r""" + Processor for implementing the LoRA attention mechanism with memory efficient attention using xFormers. + + Args: + hidden_size (`int`, *optional*): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*): + The number of channels in the `encoder_hidden_states`. + rank (`int`, defaults to 4): + The dimension of the LoRA update matrices. + attention_op (`Callable`, *optional*, defaults to `None`): + The base + [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to + use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best + operator. + network_alpha (`int`, *optional*): + Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. + kwargs (`dict`): + Additional keyword arguments to pass to the `LoRALinearLayer` layers. + """ + + def __init__( + self, + hidden_size: int, + cross_attention_dim: int, + rank: int = 4, + attention_op: Optional[Callable] = None, + network_alpha: Optional[int] = None, + **kwargs, + ): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.rank = rank + self.attention_op = attention_op + + q_rank = kwargs.pop("q_rank", None) + q_hidden_size = kwargs.pop("q_hidden_size", None) + q_rank = q_rank if q_rank is not None else rank + q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size + + v_rank = kwargs.pop("v_rank", None) + v_hidden_size = kwargs.pop("v_hidden_size", None) + v_rank = v_rank if v_rank is not None else rank + v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size + + out_rank = kwargs.pop("out_rank", None) + out_hidden_size = kwargs.pop("out_hidden_size", None) + out_rank = out_rank if out_rank is not None else rank + out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size + + self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) + self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) + self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) + self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) + + def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + self_cls_name = self.__class__.__name__ + deprecate( + self_cls_name, + "0.26.0", + ( + f"Make sure use {self_cls_name[4:]} instead by setting" + "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" + " `LoraLoaderMixin.load_lora_weights`" + ), + ) + attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) + attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) + attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) + attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) + + attn._modules.pop("processor") + attn.processor = XFormersAttnProcessor() + return attn.processor(attn, hidden_states, *args, **kwargs) + + +class LoRAAttnAddedKVProcessor(nn.Module): + r""" + Processor for implementing the LoRA attention mechanism with extra learnable key and value matrices for the text + encoder. + + Args: + hidden_size (`int`, *optional*): + The hidden size of the attention layer. + cross_attention_dim (`int`, *optional*, defaults to `None`): + The number of channels in the `encoder_hidden_states`. + rank (`int`, defaults to 4): + The dimension of the LoRA update matrices. + network_alpha (`int`, *optional*): + Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. + kwargs (`dict`): + Additional keyword arguments to pass to the `LoRALinearLayer` layers. + """ + + def __init__( + self, + hidden_size: int, + cross_attention_dim: Optional[int] = None, + rank: int = 4, + network_alpha: Optional[int] = None, + ): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.rank = rank + + self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) + self.add_k_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) + self.add_v_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) + self.to_k_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) + self.to_v_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) + self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) + + def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + self_cls_name = self.__class__.__name__ + deprecate( + self_cls_name, + "0.26.0", + ( + f"Make sure use {self_cls_name[4:]} instead by setting" + "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" + " `LoraLoaderMixin.load_lora_weights`" + ), + ) + attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) + attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) + attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) + attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) + + attn._modules.pop("processor") + attn.processor = AttnAddedKVProcessor() + return attn.processor(attn, hidden_states, *args, **kwargs) + + +class IPAdapterAttnProcessor(nn.Module): + r""" + Attention processor for IP-Adapater. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, defaults to 4): + The context length of the image features. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=4, scale=1.0): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.num_tokens = num_tokens + self.scale = scale + + self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + scale=1.0, + ): + if scale != 1.0: + logger.warning("`scale` of IPAttnProcessor should be set with `set_ip_adapter_scale`.") + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + # split hidden states + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states, ip_hidden_states = ( + encoder_hidden_states[:, :end_pos, :], + encoder_hidden_states[:, end_pos:, :], + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + ip_key = attn.head_to_batch_dim(ip_key) + ip_value = attn.head_to_batch_dim(ip_value) + + ip_attention_probs = attn.get_attention_scores(query, ip_key, None) + ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) + ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class IPAdapterAttnProcessor2_0(torch.nn.Module): + r""" + Attention processor for IP-Adapater for PyTorch 2.0. + + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + num_tokens (`int`, defaults to 4): + The context length of the image features. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=4, scale=1.0): + super().__init__() + + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError( + f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." + ) + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.num_tokens = num_tokens + self.scale = scale + + self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + scale=1.0, + ): + if scale != 1.0: + logger.warning("`scale` of IPAttnProcessor should be set by `set_ip_adapter_scale`.") + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + # split hidden states + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states, ip_hidden_states = ( + encoder_hidden_states[:, :end_pos, :], + encoder_hidden_states[:, end_pos:, :], + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + ip_hidden_states = F.scaled_dot_product_attention( + query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False + ) + + ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + ip_hidden_states = ip_hidden_states.to(query.dtype) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +LORA_ATTENTION_PROCESSORS = ( + LoRAAttnProcessor, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + LoRAAttnAddedKVProcessor, +) + +ADDED_KV_ATTENTION_PROCESSORS = ( + AttnAddedKVProcessor, + SlicedAttnAddedKVProcessor, + AttnAddedKVProcessor2_0, + XFormersAttnAddedKVProcessor, + LoRAAttnAddedKVProcessor, +) + +CROSS_ATTENTION_PROCESSORS = ( + AttnProcessor, + AttnProcessor2_0, + XFormersAttnProcessor, + SlicedAttnProcessor, + LoRAAttnProcessor, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, +) + +AttentionProcessor = Union[ + AttnProcessor, + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, + SlicedAttnProcessor, + AttnAddedKVProcessor, + SlicedAttnAddedKVProcessor, + AttnAddedKVProcessor2_0, + XFormersAttnAddedKVProcessor, + CustomDiffusionAttnProcessor, + CustomDiffusionXFormersAttnProcessor, + CustomDiffusionAttnProcessor2_0, + # deprecated + LoRAAttnProcessor, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + LoRAAttnAddedKVProcessor, +] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/__init__.py new file mode 100644 index 000000000..201a40ff1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/__init__.py @@ -0,0 +1,5 @@ +from .autoencoder_asym_kl import AsymmetricAutoencoderKL +from .autoencoder_kl import AutoencoderKL +from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder +from .autoencoder_tiny import AutoencoderTiny +from .consistency_decoder_vae import ConsistencyDecoderVAE diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_asym_kl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_asym_kl.py new file mode 100644 index 000000000..fc2041d2e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_asym_kl.py @@ -0,0 +1,186 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils.accelerate_utils import apply_forward_hook +from ..modeling_outputs import AutoencoderKLOutput +from ..modeling_utils import ModelMixin +from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder + + +class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin): + r""" + Designing a Better Asymmetric VQGAN for StableDiffusion https://arxiv.org/abs/2306.04632 . A VAE model with KL loss + for encoding images into latents and decoding latent representations into images. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): + Tuple of downsample block types. + down_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): + Tuple of down block output channels. + layers_per_down_block (`int`, *optional*, defaults to `1`): + Number layers for down block. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): + Tuple of upsample block types. + up_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): + Tuple of up block output channels. + layers_per_up_block (`int`, *optional*, defaults to `1`): + Number layers for up block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. + sample_size (`int`, *optional*, defaults to `32`): Sample input size. + norm_num_groups (`int`, *optional*, defaults to `32`): + Number of groups to use for the first normalization layer in ResNet blocks. + scaling_factor (`float`, *optional*, defaults to 0.18215): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + """ + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",), + down_block_out_channels: Tuple[int, ...] = (64,), + layers_per_down_block: int = 1, + up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",), + up_block_out_channels: Tuple[int, ...] = (64,), + layers_per_up_block: int = 1, + act_fn: str = "silu", + latent_channels: int = 4, + norm_num_groups: int = 32, + sample_size: int = 32, + scaling_factor: float = 0.18215, + ) -> None: + super().__init__() + + # pass init params to Encoder + self.encoder = Encoder( + in_channels=in_channels, + out_channels=latent_channels, + down_block_types=down_block_types, + block_out_channels=down_block_out_channels, + layers_per_block=layers_per_down_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + double_z=True, + ) + + # pass init params to Decoder + self.decoder = MaskConditionDecoder( + in_channels=latent_channels, + out_channels=out_channels, + up_block_types=up_block_types, + block_out_channels=up_block_out_channels, + layers_per_block=layers_per_up_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + ) + + self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) + self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) + + self.use_slicing = False + self.use_tiling = False + + self.register_to_config(block_out_channels=up_block_out_channels) + self.register_to_config(force_upcast=False) + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[AutoencoderKLOutput, Tuple[torch.FloatTensor]]: + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def _decode( + self, + z: torch.FloatTensor, + image: Optional[torch.FloatTensor] = None, + mask: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + z = self.post_quant_conv(z) + dec = self.decoder(z, image, mask) + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + @apply_forward_hook + def decode( + self, + z: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + image: Optional[torch.FloatTensor] = None, + mask: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + decoded = self._decode(z, image, mask).sample + + if not return_dict: + return (decoded,) + + return DecoderOutput(sample=decoded) + + def forward( + self, + sample: torch.FloatTensor, + mask: Optional[torch.FloatTensor] = None, + sample_posterior: bool = False, + return_dict: bool = True, + generator: Optional[torch.Generator] = None, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + mask (`torch.FloatTensor`, *optional*, defaults to `None`): Optional inpainting mask. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + dec = self.decode(z, sample, mask).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl.py new file mode 100644 index 000000000..9bbf2023e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl.py @@ -0,0 +1,489 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import FromOriginalVAEMixin +from ...utils.accelerate_utils import apply_forward_hook +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..modeling_outputs import AutoencoderKLOutput +from ..modeling_utils import ModelMixin +from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder + + +class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin): + r""" + A VAE model with KL loss for encoding images into latents and decoding latent representations into images. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): + Tuple of downsample block types. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): + Tuple of upsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): + Tuple of block output channels. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. + sample_size (`int`, *optional*, defaults to `32`): Sample input size. + scaling_factor (`float`, *optional*, defaults to 0.18215): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + force_upcast (`bool`, *optional*, default to `True`): + If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE + can be fine-tuned / trained to a lower range without loosing too much precision in which case + `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str] = ("DownEncoderBlock2D",), + up_block_types: Tuple[str] = ("UpDecoderBlock2D",), + block_out_channels: Tuple[int] = (64,), + layers_per_block: int = 1, + act_fn: str = "silu", + latent_channels: int = 4, + norm_num_groups: int = 32, + sample_size: int = 32, + scaling_factor: float = 0.18215, + latents_mean: Optional[Tuple[float]] = None, + latents_std: Optional[Tuple[float]] = None, + force_upcast: float = True, + ): + super().__init__() + + # pass init params to Encoder + self.encoder = Encoder( + in_channels=in_channels, + out_channels=latent_channels, + down_block_types=down_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + double_z=True, + ) + + # pass init params to Decoder + self.decoder = Decoder( + in_channels=latent_channels, + out_channels=out_channels, + up_block_types=up_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + norm_num_groups=norm_num_groups, + act_fn=act_fn, + ) + + self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) + self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) + + self.use_slicing = False + self.use_tiling = False + + # only relevant if vae tiling is enabled + self.tile_sample_min_size = self.config.sample_size + sample_size = ( + self.config.sample_size[0] + if isinstance(self.config.sample_size, (list, tuple)) + else self.config.sample_size + ) + self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) + self.tile_overlap_factor = 0.25 + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (Encoder, Decoder)): + module.gradient_checkpointing = value + + def enable_tiling(self, use_tiling: bool = True): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.use_tiling = use_tiling + + def disable_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.enable_tiling(False) + + def enable_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.use_slicing = True + + def disable_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.use_slicing = False + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: + """ + Encode a batch of images into latents. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. + + Returns: + The latent representations of the encoded images. If `return_dict` is True, a + [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. + """ + if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): + return self.tiled_encode(x, return_dict=return_dict) + + if self.use_slicing and x.shape[0] > 1: + encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] + h = torch.cat(encoded_slices) + else: + h = self.encoder(x) + + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): + return self.tiled_decode(z, return_dict=return_dict) + + z = self.post_quant_conv(z) + dec = self.decoder(z) + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + @apply_forward_hook + def decode( + self, z: torch.FloatTensor, return_dict: bool = True, generator=None + ) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ + if self.use_slicing and z.shape[0] > 1: + decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] + decoded = torch.cat(decoded_slices) + else: + decoded = self._decode(z).sample + + if not return_dict: + return (decoded,) + + return DecoderOutput(sample=decoded) + + def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[2], b.shape[2], blend_extent) + for y in range(blend_extent): + b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) + return b + + def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[3], b.shape[3], blend_extent) + for x in range(blend_extent): + b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) + return b + + def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is + different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the + output, but they should be much less noticeable. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. + + Returns: + [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: + If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain + `tuple` is returned. + """ + overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) + row_limit = self.tile_latent_min_size - blend_extent + + # Split the image into 512x512 tiles and encode them separately. + rows = [] + for i in range(0, x.shape[2], overlap_size): + row = [] + for j in range(0, x.shape[3], overlap_size): + tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] + tile = self.encoder(tile) + tile = self.quant_conv(tile) + row.append(tile) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=3)) + + moments = torch.cat(result_rows, dim=2) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Decode a batch of images using a tiled decoder. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + """ + overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) + row_limit = self.tile_sample_min_size - blend_extent + + # Split z into overlapping 64x64 tiles and decode them separately. + # The tiles have an overlap to avoid seams between tiles. + rows = [] + for i in range(0, z.shape[2], overlap_size): + row = [] + for j in range(0, z.shape[3], overlap_size): + tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] + tile = self.post_quant_conv(tile) + decoded = self.decoder(tile) + row.append(decoded) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=3)) + + dec = torch.cat(result_rows, dim=2) + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def forward( + self, + sample: torch.FloatTensor, + sample_posterior: bool = False, + return_dict: bool = True, + generator: Optional[torch.Generator] = None, + ) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + dec = self.decode(z).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py new file mode 100644 index 000000000..b12226fa4 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py @@ -0,0 +1,399 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import is_torch_version +from ...utils.accelerate_utils import apply_forward_hook +from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor +from ..modeling_outputs import AutoencoderKLOutput +from ..modeling_utils import ModelMixin +from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder +from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder + + +class TemporalDecoder(nn.Module): + def __init__( + self, + in_channels: int = 4, + out_channels: int = 3, + block_out_channels: Tuple[int] = (128, 256, 512, 512), + layers_per_block: int = 2, + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) + self.mid_block = MidBlockTemporalDecoder( + num_layers=self.layers_per_block, + in_channels=block_out_channels[-1], + out_channels=block_out_channels[-1], + attention_head_dim=block_out_channels[-1], + ) + + # up + self.up_blocks = nn.ModuleList([]) + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i in range(len(block_out_channels)): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + + is_final_block = i == len(block_out_channels) - 1 + up_block = UpBlockTemporalDecoder( + num_layers=self.layers_per_block + 1, + in_channels=prev_output_channel, + out_channels=output_channel, + add_upsample=not is_final_block, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6) + + self.conv_act = nn.SiLU() + self.conv_out = torch.nn.Conv2d( + in_channels=block_out_channels[0], + out_channels=out_channels, + kernel_size=3, + padding=1, + ) + + conv_out_kernel_size = (3, 1, 1) + padding = [int(k // 2) for k in conv_out_kernel_size] + self.time_conv_out = torch.nn.Conv3d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=conv_out_kernel_size, + padding=padding, + ) + + self.gradient_checkpointing = False + + def forward( + self, + sample: torch.FloatTensor, + image_only_indicator: torch.FloatTensor, + num_frames: int = 1, + ) -> torch.FloatTensor: + r"""The forward method of the `Decoder` class.""" + + sample = self.conv_in(sample) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), + sample, + image_only_indicator, + use_reentrant=False, + ) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(up_block), + sample, + image_only_indicator, + use_reentrant=False, + ) + else: + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), + sample, + image_only_indicator, + ) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(up_block), + sample, + image_only_indicator, + ) + else: + # middle + sample = self.mid_block(sample, image_only_indicator=image_only_indicator) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = up_block(sample, image_only_indicator=image_only_indicator) + + # post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + batch_frames, channels, height, width = sample.shape + batch_size = batch_frames // num_frames + sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) + sample = self.time_conv_out(sample) + + sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) + + return sample + + +class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): + r""" + A VAE model with KL loss for encoding images into latents and decoding latent representations into images. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): + Tuple of downsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): + Tuple of block output channels. + layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block. + latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. + sample_size (`int`, *optional*, defaults to `32`): Sample input size. + scaling_factor (`float`, *optional*, defaults to 0.18215): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + force_upcast (`bool`, *optional*, default to `True`): + If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE + can be fine-tuned / trained to a lower range without loosing too much precision in which case + `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str] = ("DownEncoderBlock2D",), + block_out_channels: Tuple[int] = (64,), + layers_per_block: int = 1, + latent_channels: int = 4, + sample_size: int = 32, + scaling_factor: float = 0.18215, + force_upcast: float = True, + ): + super().__init__() + + # pass init params to Encoder + self.encoder = Encoder( + in_channels=in_channels, + out_channels=latent_channels, + down_block_types=down_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + double_z=True, + ) + + # pass init params to Decoder + self.decoder = TemporalDecoder( + in_channels=latent_channels, + out_channels=out_channels, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + ) + + self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) + + sample_size = ( + self.config.sample_size[0] + if isinstance(self.config.sample_size, (list, tuple)) + else self.config.sample_size + ) + self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) + self.tile_overlap_factor = 0.25 + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (Encoder, TemporalDecoder)): + module.gradient_checkpointing = value + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: + """ + Encode a batch of images into latents. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. + + Returns: + The latent representations of the encoded images. If `return_dict` is True, a + [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. + """ + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + @apply_forward_hook + def decode( + self, + z: torch.FloatTensor, + num_frames: int, + return_dict: bool = True, + ) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ + batch_size = z.shape[0] // num_frames + image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) + decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) + + if not return_dict: + return (decoded,) + + return DecoderOutput(sample=decoded) + + def forward( + self, + sample: torch.FloatTensor, + sample_posterior: bool = False, + return_dict: bool = True, + generator: Optional[torch.Generator] = None, + num_frames: int = 1, + ) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + + dec = self.decode(z, num_frames=num_frames).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_tiny.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_tiny.py new file mode 100644 index 000000000..ef43526cf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/autoencoder_tiny.py @@ -0,0 +1,347 @@ +# Copyright 2024 Ollin Boer Bohan and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ...utils.accelerate_utils import apply_forward_hook +from ..modeling_utils import ModelMixin +from .vae import DecoderOutput, DecoderTiny, EncoderTiny + + +@dataclass +class AutoencoderTinyOutput(BaseOutput): + """ + Output of AutoencoderTiny encoding method. + + Args: + latents (`torch.Tensor`): Encoded outputs of the `Encoder`. + + """ + + latents: torch.Tensor + + +class AutoencoderTiny(ModelMixin, ConfigMixin): + r""" + A tiny distilled VAE model for encoding images into latents and decoding latent representations into images. + + [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for + all models (such as downloading or saving). + + Parameters: + in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. + out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. + encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): + Tuple of integers representing the number of output channels for each encoder block. The length of the + tuple should be equal to the number of encoder blocks. + decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): + Tuple of integers representing the number of output channels for each decoder block. The length of the + tuple should be equal to the number of decoder blocks. + act_fn (`str`, *optional*, defaults to `"relu"`): + Activation function to be used throughout the model. + latent_channels (`int`, *optional*, defaults to 4): + Number of channels in the latent representation. The latent space acts as a compressed representation of + the input image. + upsampling_scaling_factor (`int`, *optional*, defaults to 2): + Scaling factor for upsampling in the decoder. It determines the size of the output image during the + upsampling process. + num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`): + Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The + length of the tuple should be equal to the number of stages in the encoder. Each stage has a different + number of encoder blocks. + num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`): + Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The + length of the tuple should be equal to the number of stages in the decoder. Each stage has a different + number of decoder blocks. + latent_magnitude (`float`, *optional*, defaults to 3.0): + Magnitude of the latent representation. This parameter scales the latent representation values to control + the extent of information preservation. + latent_shift (float, *optional*, defaults to 0.5): + Shift applied to the latent representation. This parameter controls the center of the latent space. + scaling_factor (`float`, *optional*, defaults to 1.0): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder, + however, no such scaling factor was used, hence the value of 1.0 as the default. + force_upcast (`bool`, *optional*, default to `False`): + If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE + can be fine-tuned / trained to a lower range without losing too much precision, in which case + `force_upcast` can be set to `False` (see this fp16-friendly + [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64), + decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64), + act_fn: str = "relu", + latent_channels: int = 4, + upsampling_scaling_factor: int = 2, + num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3), + num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1), + latent_magnitude: int = 3, + latent_shift: float = 0.5, + force_upcast: bool = False, + scaling_factor: float = 1.0, + ): + super().__init__() + + if len(encoder_block_out_channels) != len(num_encoder_blocks): + raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.") + if len(decoder_block_out_channels) != len(num_decoder_blocks): + raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.") + + self.encoder = EncoderTiny( + in_channels=in_channels, + out_channels=latent_channels, + num_blocks=num_encoder_blocks, + block_out_channels=encoder_block_out_channels, + act_fn=act_fn, + ) + + self.decoder = DecoderTiny( + in_channels=latent_channels, + out_channels=out_channels, + num_blocks=num_decoder_blocks, + block_out_channels=decoder_block_out_channels, + upsampling_scaling_factor=upsampling_scaling_factor, + act_fn=act_fn, + ) + + self.latent_magnitude = latent_magnitude + self.latent_shift = latent_shift + self.scaling_factor = scaling_factor + + self.use_slicing = False + self.use_tiling = False + + # only relevant if vae tiling is enabled + self.spatial_scale_factor = 2**out_channels + self.tile_overlap_factor = 0.125 + self.tile_sample_min_size = 512 + self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor + + self.register_to_config(block_out_channels=decoder_block_out_channels) + self.register_to_config(force_upcast=False) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (EncoderTiny, DecoderTiny)): + module.gradient_checkpointing = value + + def scale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor: + """raw latents -> [0, 1]""" + return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1) + + def unscale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor: + """[0, 1] -> raw latents""" + return x.sub(self.latent_shift).mul(2 * self.latent_magnitude) + + def enable_slicing(self) -> None: + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.use_slicing = True + + def disable_slicing(self) -> None: + r""" + Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.use_slicing = False + + def enable_tiling(self, use_tiling: bool = True) -> None: + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.use_tiling = use_tiling + + def disable_tiling(self) -> None: + r""" + Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.enable_tiling(False) + + def _tiled_encode(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. + + Args: + x (`torch.FloatTensor`): Input batch of images. + + Returns: + `torch.FloatTensor`: Encoded batch of images. + """ + # scale of encoder output relative to input + sf = self.spatial_scale_factor + tile_size = self.tile_sample_min_size + + # number of pixels to blend and to traverse between tile + blend_size = int(tile_size * self.tile_overlap_factor) + traverse_size = tile_size - blend_size + + # tiles index (up/left) + ti = range(0, x.shape[-2], traverse_size) + tj = range(0, x.shape[-1], traverse_size) + + # mask for blending + blend_masks = torch.stack( + torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij") + ) + blend_masks = blend_masks.clamp(0, 1).to(x.device) + + # output array + out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device) + for i in ti: + for j in tj: + tile_in = x[..., i : i + tile_size, j : j + tile_size] + # tile result + tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf] + tile = self.encoder(tile_in) + h, w = tile.shape[-2], tile.shape[-1] + # blend tile result into output + blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] + blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] + blend_mask = blend_mask_i * blend_mask_j + tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w] + tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) + return out + + def _tiled_decode(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. + + Args: + x (`torch.FloatTensor`): Input batch of images. + + Returns: + `torch.FloatTensor`: Encoded batch of images. + """ + # scale of decoder output relative to input + sf = self.spatial_scale_factor + tile_size = self.tile_latent_min_size + + # number of pixels to blend and to traverse between tiles + blend_size = int(tile_size * self.tile_overlap_factor) + traverse_size = tile_size - blend_size + + # tiles index (up/left) + ti = range(0, x.shape[-2], traverse_size) + tj = range(0, x.shape[-1], traverse_size) + + # mask for blending + blend_masks = torch.stack( + torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij") + ) + blend_masks = blend_masks.clamp(0, 1).to(x.device) + + # output array + out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device) + for i in ti: + for j in tj: + tile_in = x[..., i : i + tile_size, j : j + tile_size] + # tile result + tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf] + tile = self.decoder(tile_in) + h, w = tile.shape[-2], tile.shape[-1] + # blend tile result into output + blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] + blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] + blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w] + tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) + return out + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[AutoencoderTinyOutput, Tuple[torch.FloatTensor]]: + if self.use_slicing and x.shape[0] > 1: + output = [ + self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x_slice) for x_slice in x.split(1) + ] + output = torch.cat(output) + else: + output = self._tiled_encode(x) if self.use_tiling else self.encoder(x) + + if not return_dict: + return (output,) + + return AutoencoderTinyOutput(latents=output) + + @apply_forward_hook + def decode( + self, x: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + if self.use_slicing and x.shape[0] > 1: + output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)] + output = torch.cat(output) + else: + output = self._tiled_decode(x) if self.use_tiling else self.decoder(x) + + if not return_dict: + return (output,) + + return DecoderOutput(sample=output) + + def forward( + self, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + enc = self.encode(sample).latents + + # scale latents to be in [0, 1], then quantize latents to a byte tensor, + # as if we were storing the latents in an RGBA uint8 image. + scaled_enc = self.scale_latents(enc).mul_(255).round_().byte() + + # unquantize latents back into [0, 1], then unscale latents back to their original range, + # as if we were loading the latents from an RGBA uint8 image. + unscaled_enc = self.unscale_latents(scaled_enc / 255.0) + + dec = self.decode(unscaled_enc) + + if not return_dict: + return (dec,) + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/consistency_decoder_vae.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/consistency_decoder_vae.py new file mode 100644 index 000000000..72c512da9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/consistency_decoder_vae.py @@ -0,0 +1,435 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...schedulers import ConsistencyDecoderScheduler +from ...utils import BaseOutput +from ...utils.accelerate_utils import apply_forward_hook +from ...utils.torch_utils import randn_tensor +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..modeling_utils import ModelMixin +from ..unets.unet_2d import UNet2DModel +from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder + + +@dataclass +class ConsistencyDecoderVAEOutput(BaseOutput): + """ + Output of encoding method. + + Args: + latent_dist (`DiagonalGaussianDistribution`): + Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. + `DiagonalGaussianDistribution` allows for sampling latents from the distribution. + """ + + latent_dist: "DiagonalGaussianDistribution" + + +class ConsistencyDecoderVAE(ModelMixin, ConfigMixin): + r""" + The consistency decoder used with DALL-E 3. + + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE + + >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) + >>> pipe = StableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16 + ... ).to("cuda") + + >>> pipe("horse", generator=torch.manual_seed(0)).images + ``` + """ + + @register_to_config + def __init__( + self, + scaling_factor: float = 0.18215, + latent_channels: int = 4, + encoder_act_fn: str = "silu", + encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), + encoder_double_z: bool = True, + encoder_down_block_types: Tuple[str, ...] = ( + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + ), + encoder_in_channels: int = 3, + encoder_layers_per_block: int = 2, + encoder_norm_num_groups: int = 32, + encoder_out_channels: int = 4, + decoder_add_attention: bool = False, + decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024), + decoder_down_block_types: Tuple[str, ...] = ( + "ResnetDownsampleBlock2D", + "ResnetDownsampleBlock2D", + "ResnetDownsampleBlock2D", + "ResnetDownsampleBlock2D", + ), + decoder_downsample_padding: int = 1, + decoder_in_channels: int = 7, + decoder_layers_per_block: int = 3, + decoder_norm_eps: float = 1e-05, + decoder_norm_num_groups: int = 32, + decoder_num_train_timesteps: int = 1024, + decoder_out_channels: int = 6, + decoder_resnet_time_scale_shift: str = "scale_shift", + decoder_time_embedding_type: str = "learned", + decoder_up_block_types: Tuple[str, ...] = ( + "ResnetUpsampleBlock2D", + "ResnetUpsampleBlock2D", + "ResnetUpsampleBlock2D", + "ResnetUpsampleBlock2D", + ), + ): + super().__init__() + self.encoder = Encoder( + act_fn=encoder_act_fn, + block_out_channels=encoder_block_out_channels, + double_z=encoder_double_z, + down_block_types=encoder_down_block_types, + in_channels=encoder_in_channels, + layers_per_block=encoder_layers_per_block, + norm_num_groups=encoder_norm_num_groups, + out_channels=encoder_out_channels, + ) + + self.decoder_unet = UNet2DModel( + add_attention=decoder_add_attention, + block_out_channels=decoder_block_out_channels, + down_block_types=decoder_down_block_types, + downsample_padding=decoder_downsample_padding, + in_channels=decoder_in_channels, + layers_per_block=decoder_layers_per_block, + norm_eps=decoder_norm_eps, + norm_num_groups=decoder_norm_num_groups, + num_train_timesteps=decoder_num_train_timesteps, + out_channels=decoder_out_channels, + resnet_time_scale_shift=decoder_resnet_time_scale_shift, + time_embedding_type=decoder_time_embedding_type, + up_block_types=decoder_up_block_types, + ) + self.decoder_scheduler = ConsistencyDecoderScheduler() + self.register_to_config(block_out_channels=encoder_block_out_channels) + self.register_to_config(force_upcast=False) + self.register_buffer( + "means", + torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None], + persistent=False, + ) + self.register_buffer( + "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False + ) + + self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) + + self.use_slicing = False + self.use_tiling = False + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling + def enable_tiling(self, use_tiling: bool = True): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.use_tiling = use_tiling + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling + def disable_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.enable_tiling(False) + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing + def enable_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.use_slicing = True + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing + def disable_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.use_slicing = False + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]: + """ + Encode a batch of images into latents. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.consistecy_decoder_vae.ConsistencyDecoderOoutput`] instead of a plain + tuple. + + Returns: + The latent representations of the encoded images. If `return_dict` is True, a + [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple` + is returned. + """ + if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): + return self.tiled_encode(x, return_dict=return_dict) + + if self.use_slicing and x.shape[0] > 1: + encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] + h = torch.cat(encoded_slices) + else: + h = self.encoder(x) + + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return ConsistencyDecoderVAEOutput(latent_dist=posterior) + + @apply_forward_hook + def decode( + self, + z: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + num_inference_steps: int = 2, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + z = (z * self.config.scaling_factor - self.means) / self.stds + + scale_factor = 2 ** (len(self.config.block_out_channels) - 1) + z = F.interpolate(z, mode="nearest", scale_factor=scale_factor) + + batch_size, _, height, width = z.shape + + self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device) + + x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor( + (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device + ) + + for t in self.decoder_scheduler.timesteps: + model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1) + model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :] + prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample + x_t = prev_sample + + x_0 = x_t + + if not return_dict: + return (x_0,) + + return DecoderOutput(sample=x_0) + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v + def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[2], b.shape[2], blend_extent) + for y in range(blend_extent): + b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) + return b + + # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h + def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[3], b.shape[3], blend_extent) + for x in range(blend_extent): + b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) + return b + + def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> ConsistencyDecoderVAEOutput: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is + different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the + output, but they should be much less noticeable. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a + plain tuple. + + Returns: + [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`: + If return_dict is True, a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, + otherwise a plain `tuple` is returned. + """ + overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) + row_limit = self.tile_latent_min_size - blend_extent + + # Split the image into 512x512 tiles and encode them separately. + rows = [] + for i in range(0, x.shape[2], overlap_size): + row = [] + for j in range(0, x.shape[3], overlap_size): + tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] + tile = self.encoder(tile) + tile = self.quant_conv(tile) + row.append(tile) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=3)) + + moments = torch.cat(result_rows, dim=2) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return ConsistencyDecoderVAEOutput(latent_dist=posterior) + + def forward( + self, + sample: torch.FloatTensor, + sample_posterior: bool = False, + return_dict: bool = True, + generator: Optional[torch.Generator] = None, + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + generator (`torch.Generator`, *optional*, defaults to `None`): + Generator to use for sampling. + + Returns: + [`DecoderOutput`] or `tuple`: + If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + dec = self.decode(z, generator=generator).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/vae.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/vae.py new file mode 100644 index 000000000..885a0a10c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/autoencoders/vae.py @@ -0,0 +1,992 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Optional, Tuple +import os + +import numpy as np +import torch +import torch.nn as nn + +from ...utils import BaseOutput, is_torch_version +from ...utils.torch_utils import randn_tensor +from ..activations import get_activation +from ..attention_processor import SpatialNorm +from ..unets.unet_2d_blocks import ( + AutoencoderTinyBlock, + UNetMidBlock2D, + get_down_block, + get_up_block, +) +from ..nhwc_groupnorm.custom_gn import GN_NHWC + + +@dataclass +class DecoderOutput(BaseOutput): + r""" + Output of decoding method. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The decoded output sample from the last layer of the model. + """ + + sample: torch.FloatTensor + + +class Encoder(nn.Module): + r""" + The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`): + The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available + options. + block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): + The number of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + act_fn (`str`, *optional*, defaults to `"silu"`): + The activation function to use. See `~diffusers.models.activations.get_activation` for available options. + double_z (`bool`, *optional*, defaults to `True`): + Whether to double the number of output channels for the last block. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + act_fn: str = "silu", + double_z: bool = True, + mid_block_add_attention=True, + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = nn.Conv2d( + in_channels, + block_out_channels[0], + kernel_size=3, + stride=1, + padding=1, + ) + + self.mid_block = None + self.down_blocks = nn.ModuleList([]) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=self.layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + add_downsample=not is_final_block, + resnet_eps=1e-6, + downsample_padding=0, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=output_channel, + temb_channels=None, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock2D( + in_channels=block_out_channels[-1], + resnet_eps=1e-6, + resnet_act_fn=act_fn, + output_scale_factor=1, + resnet_time_scale_shift="default", + attention_head_dim=block_out_channels[-1], + resnet_groups=norm_num_groups, + temb_channels=None, + add_attention=mid_block_add_attention, + ) + # out + self.fuse_gn_silu = True if int(os.environ.get("USE_NHWC_GN", 0)) else False + if self.fuse_gn_silu: + self.conv_norm_out = GN_NHWC(norm_num_groups, block_out_channels[-1], activation="silu") + else: + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6) + self.conv_act = nn.SiLU() + + conv_out_channels = 2 * out_channels if double_z else out_channels + self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1) + + self.gradient_checkpointing = False + + def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `Encoder` class.""" + + sample = self.conv_in(sample) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + # down + if is_torch_version(">=", "1.11.0"): + for down_block in self.down_blocks: + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(down_block), sample, use_reentrant=False + ) + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), sample, use_reentrant=False + ) + else: + for down_block in self.down_blocks: + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample) + # middle + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample) + + else: + # down + for down_block in self.down_blocks: + sample = down_block(sample) + + # middle + sample = self.mid_block(sample) + + # post-process + sample = self.conv_norm_out(sample) + if not self.fuse_gn_silu: + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return sample + + +class Decoder(nn.Module): + r""" + The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`): + The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options. + block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): + The number of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + act_fn (`str`, *optional*, defaults to `"silu"`): + The activation function to use. See `~diffusers.models.activations.get_activation` for available options. + norm_type (`str`, *optional*, defaults to `"group"`): + The normalization type to use. Can be either `"group"` or `"spatial"`. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + act_fn: str = "silu", + norm_type: str = "group", # group, spatial + mid_block_add_attention=True, + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = nn.Conv2d( + in_channels, + block_out_channels[-1], + kernel_size=3, + stride=1, + padding=1, + ) + + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + temb_channels = in_channels if norm_type == "spatial" else None + + # mid + self.mid_block = UNetMidBlock2D( + in_channels=block_out_channels[-1], + resnet_eps=1e-6, + resnet_act_fn=act_fn, + output_scale_factor=1, + resnet_time_scale_shift="default" if norm_type == "group" else norm_type, + attention_head_dim=block_out_channels[-1], + resnet_groups=norm_num_groups, + temb_channels=temb_channels, + add_attention=mid_block_add_attention, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=self.layers_per_block + 1, + in_channels=prev_output_channel, + out_channels=output_channel, + prev_output_channel=None, + add_upsample=not is_final_block, + resnet_eps=1e-6, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=output_channel, + temb_channels=temb_channels, + resnet_time_scale_shift=norm_type, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_type == "spatial": + self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels) + else: + if int(os.environ.get("USE_NHWC_GN", 0)): + self.conv_norm_out = GN_NHWC(norm_num_groups, block_out_channels[0], activation="identity") + else: + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) + + self.gradient_checkpointing = False + + def forward( + self, + sample: torch.FloatTensor, + latent_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + r"""The forward method of the `Decoder` class.""" + + sample = self.conv_in(sample) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), + sample, + latent_embeds, + use_reentrant=False, + ) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(up_block), + sample, + latent_embeds, + use_reentrant=False, + ) + else: + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), sample, latent_embeds + ) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds) + else: + # middle + sample = self.mid_block(sample, latent_embeds) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = up_block(sample, latent_embeds) + + # post-process + if latent_embeds is None: + sample = self.conv_norm_out(sample) + else: + sample = self.conv_norm_out(sample, latent_embeds) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return sample + + +class UpSample(nn.Module): + r""" + The `UpSample` layer of a variational autoencoder that upsamples its input. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + ) -> None: + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1) + + def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `UpSample` class.""" + x = torch.relu(x) + x = self.deconv(x) + return x + + +class MaskConditionEncoder(nn.Module): + """ + used in AsymmetricAutoencoderKL + """ + + def __init__( + self, + in_ch: int, + out_ch: int = 192, + res_ch: int = 768, + stride: int = 16, + ) -> None: + super().__init__() + + channels = [] + while stride > 1: + stride = stride // 2 + in_ch_ = out_ch * 2 + if out_ch > res_ch: + out_ch = res_ch + if stride == 1: + in_ch_ = res_ch + channels.append((in_ch_, out_ch)) + out_ch *= 2 + + out_channels = [] + for _in_ch, _out_ch in channels: + out_channels.append(_out_ch) + out_channels.append(channels[-1][0]) + + layers = [] + in_ch_ = in_ch + for l in range(len(out_channels)): + out_ch_ = out_channels[l] + if l == 0 or l == 1: + layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1)) + else: + layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1)) + in_ch_ = out_ch_ + + self.layers = nn.Sequential(*layers) + + def forward(self, x: torch.FloatTensor, mask=None) -> torch.FloatTensor: + r"""The forward method of the `MaskConditionEncoder` class.""" + out = {} + for l in range(len(self.layers)): + layer = self.layers[l] + x = layer(x) + out[str(tuple(x.shape))] = x + x = torch.relu(x) + return out + + +class MaskConditionDecoder(nn.Module): + r"""The `MaskConditionDecoder` should be used in combination with [`AsymmetricAutoencoderKL`] to enhance the model's + decoder with a conditioner on the mask and masked image. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`): + The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options. + block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): + The number of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + act_fn (`str`, *optional*, defaults to `"silu"`): + The activation function to use. See `~diffusers.models.activations.get_activation` for available options. + norm_type (`str`, *optional*, defaults to `"group"`): + The normalization type to use. Can be either `"group"` or `"spatial"`. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + act_fn: str = "silu", + norm_type: str = "group", # group, spatial + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = nn.Conv2d( + in_channels, + block_out_channels[-1], + kernel_size=3, + stride=1, + padding=1, + ) + + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + temb_channels = in_channels if norm_type == "spatial" else None + + # mid + self.mid_block = UNetMidBlock2D( + in_channels=block_out_channels[-1], + resnet_eps=1e-6, + resnet_act_fn=act_fn, + output_scale_factor=1, + resnet_time_scale_shift="default" if norm_type == "group" else norm_type, + attention_head_dim=block_out_channels[-1], + resnet_groups=norm_num_groups, + temb_channels=temb_channels, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=self.layers_per_block + 1, + in_channels=prev_output_channel, + out_channels=output_channel, + prev_output_channel=None, + add_upsample=not is_final_block, + resnet_eps=1e-6, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=output_channel, + temb_channels=temb_channels, + resnet_time_scale_shift=norm_type, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # condition encoder + self.condition_encoder = MaskConditionEncoder( + in_ch=out_channels, + out_ch=block_out_channels[0], + res_ch=block_out_channels[-1], + ) + + # out + if norm_type == "spatial": + self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels) + else: + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) + + self.gradient_checkpointing = False + + def forward( + self, + z: torch.FloatTensor, + image: Optional[torch.FloatTensor] = None, + mask: Optional[torch.FloatTensor] = None, + latent_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + r"""The forward method of the `MaskConditionDecoder` class.""" + sample = z + sample = self.conv_in(sample) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), + sample, + latent_embeds, + use_reentrant=False, + ) + sample = sample.to(upscale_dtype) + + # condition encoder + if image is not None and mask is not None: + masked_image = (1 - mask) * image + im_x = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.condition_encoder), + masked_image, + mask, + use_reentrant=False, + ) + + # up + for up_block in self.up_blocks: + if image is not None and mask is not None: + sample_ = im_x[str(tuple(sample.shape))] + mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest") + sample = sample * mask_ + sample_ * (1 - mask_) + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(up_block), + sample, + latent_embeds, + use_reentrant=False, + ) + if image is not None and mask is not None: + sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) + else: + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), sample, latent_embeds + ) + sample = sample.to(upscale_dtype) + + # condition encoder + if image is not None and mask is not None: + masked_image = (1 - mask) * image + im_x = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.condition_encoder), + masked_image, + mask, + ) + + # up + for up_block in self.up_blocks: + if image is not None and mask is not None: + sample_ = im_x[str(tuple(sample.shape))] + mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest") + sample = sample * mask_ + sample_ * (1 - mask_) + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds) + if image is not None and mask is not None: + sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) + else: + # middle + sample = self.mid_block(sample, latent_embeds) + sample = sample.to(upscale_dtype) + + # condition encoder + if image is not None and mask is not None: + masked_image = (1 - mask) * image + im_x = self.condition_encoder(masked_image, mask) + + # up + for up_block in self.up_blocks: + if image is not None and mask is not None: + sample_ = im_x[str(tuple(sample.shape))] + mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest") + sample = sample * mask_ + sample_ * (1 - mask_) + sample = up_block(sample, latent_embeds) + if image is not None and mask is not None: + sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) + + # post-process + if latent_embeds is None: + sample = self.conv_norm_out(sample) + else: + sample = self.conv_norm_out(sample, latent_embeds) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return sample + + +class VectorQuantizer(nn.Module): + """ + Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix + multiplications and allows for post-hoc remapping of indices. + """ + + # NOTE: due to a bug the beta term was applied to the wrong term. for + # backwards compatibility we use the buggy version by default, but you can + # specify legacy=False to fix it. + def __init__( + self, + n_e: int, + vq_embed_dim: int, + beta: float, + remap=None, + unknown_index: str = "random", + sane_index_shape: bool = False, + legacy: bool = True, + ): + super().__init__() + self.n_e = n_e + self.vq_embed_dim = vq_embed_dim + self.beta = beta + self.legacy = legacy + + self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim) + self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) + + self.remap = remap + if self.remap is not None: + self.register_buffer("used", torch.tensor(np.load(self.remap))) + self.used: torch.Tensor + self.re_embed = self.used.shape[0] + self.unknown_index = unknown_index # "random" or "extra" or integer + if self.unknown_index == "extra": + self.unknown_index = self.re_embed + self.re_embed = self.re_embed + 1 + print( + f"Remapping {self.n_e} indices to {self.re_embed} indices. " + f"Using {self.unknown_index} for unknown indices." + ) + else: + self.re_embed = n_e + + self.sane_index_shape = sane_index_shape + + def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor: + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + match = (inds[:, :, None] == used[None, None, ...]).long() + new = match.argmax(-1) + unknown = match.sum(2) < 1 + if self.unknown_index == "random": + new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) + else: + new[unknown] = self.unknown_index + return new.reshape(ishape) + + def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor: + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + if self.re_embed > self.used.shape[0]: # extra token + inds[inds >= self.used.shape[0]] = 0 # simply set to zero + back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) + return back.reshape(ishape) + + def forward(self, z: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, Tuple]: + # reshape z -> (batch, height, width, channel) and flatten + z = z.permute(0, 2, 3, 1).contiguous() + z_flattened = z.view(-1, self.vq_embed_dim) + + # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z + min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1) + + z_q = self.embedding(min_encoding_indices).view(z.shape) + perplexity = None + min_encodings = None + + # compute loss for embedding + if not self.legacy: + loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2) + else: + loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2) + + # preserve gradients + z_q: torch.FloatTensor = z + (z_q - z).detach() + + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + if self.remap is not None: + min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis + min_encoding_indices = self.remap_to_used(min_encoding_indices) + min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten + + if self.sane_index_shape: + min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) + + return z_q, loss, (perplexity, min_encodings, min_encoding_indices) + + def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.FloatTensor: + # shape specifying (batch, height, width, channel) + if self.remap is not None: + indices = indices.reshape(shape[0], -1) # add batch axis + indices = self.unmap_to_all(indices) + indices = indices.reshape(-1) # flatten again + + # get quantized latent vectors + z_q: torch.FloatTensor = self.embedding(indices) + + if shape is not None: + z_q = z_q.view(shape) + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters: torch.Tensor, deterministic: bool = False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like( + self.mean, device=self.parameters.device, dtype=self.parameters.dtype + ) + + def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor: + # make sure sample is on the same device as the parameters and has same dtype + sample = randn_tensor( + self.mean.shape, + generator=generator, + device=self.parameters.device, + dtype=self.parameters.dtype, + ) + x = self.mean + self.std * sample + return x + + def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor: + if self.deterministic: + return torch.Tensor([0.0]) + else: + if other is None: + return 0.5 * torch.sum( + torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, + dim=[1, 2, 3], + ) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var + - 1.0 + - self.logvar + + other.logvar, + dim=[1, 2, 3], + ) + + def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor: + if self.deterministic: + return torch.Tensor([0.0]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims, + ) + + def mode(self) -> torch.Tensor: + return self.mean + + +class EncoderTiny(nn.Module): + r""" + The `EncoderTiny` layer is a simpler version of the `Encoder` layer. + + Args: + in_channels (`int`): + The number of input channels. + out_channels (`int`): + The number of output channels. + num_blocks (`Tuple[int, ...]`): + Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to + use. + block_out_channels (`Tuple[int, ...]`): + The number of output channels for each block. + act_fn (`str`): + The activation function to use. See `~diffusers.models.activations.get_activation` for available options. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + num_blocks: Tuple[int, ...], + block_out_channels: Tuple[int, ...], + act_fn: str, + ): + super().__init__() + + layers = [] + for i, num_block in enumerate(num_blocks): + num_channels = block_out_channels[i] + + if i == 0: + layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1)) + else: + layers.append( + nn.Conv2d( + num_channels, + num_channels, + kernel_size=3, + padding=1, + stride=2, + bias=False, + ) + ) + + for _ in range(num_block): + layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn)) + + layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1)) + + self.layers = nn.Sequential(*layers) + self.gradient_checkpointing = False + + def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `EncoderTiny` class.""" + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x) + + else: + # scale image from [-1, 1] to [0, 1] to match TAESD convention + x = self.layers(x.add(1).div(2)) + + return x + + +class DecoderTiny(nn.Module): + r""" + The `DecoderTiny` layer is a simpler version of the `Decoder` layer. + + Args: + in_channels (`int`): + The number of input channels. + out_channels (`int`): + The number of output channels. + num_blocks (`Tuple[int, ...]`): + Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to + use. + block_out_channels (`Tuple[int, ...]`): + The number of output channels for each block. + upsampling_scaling_factor (`int`): + The scaling factor to use for upsampling. + act_fn (`str`): + The activation function to use. See `~diffusers.models.activations.get_activation` for available options. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + num_blocks: Tuple[int, ...], + block_out_channels: Tuple[int, ...], + upsampling_scaling_factor: int, + act_fn: str, + ): + super().__init__() + + layers = [ + nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1), + get_activation(act_fn), + ] + + for i, num_block in enumerate(num_blocks): + is_final_block = i == (len(num_blocks) - 1) + num_channels = block_out_channels[i] + + for _ in range(num_block): + layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn)) + + if not is_final_block: + layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor)) + + conv_out_channel = num_channels if not is_final_block else out_channels + layers.append( + nn.Conv2d( + num_channels, + conv_out_channel, + kernel_size=3, + padding=1, + bias=is_final_block, + ) + ) + + self.layers = nn.Sequential(*layers) + self.gradient_checkpointing = False + + def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `DecoderTiny` class.""" + # Clamp. + x = torch.tanh(x / 3) * 3 + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x) + + else: + x = self.layers(x) + + # scale image from [0, 1] to [-1, 1] to match diffusers convention + return x.mul(2).sub(1) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet.py new file mode 100644 index 000000000..130e6430d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet.py @@ -0,0 +1,868 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import functional as F + +from ..configuration_utils import ConfigMixin, register_to_config +from ..loaders import FromOriginalControlNetMixin +from ..utils import BaseOutput, logging +from .attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps +from .modeling_utils import ModelMixin +from .unets.unet_2d_blocks import ( + CrossAttnDownBlock2D, + DownBlock2D, + UNetMidBlock2D, + UNetMidBlock2DCrossAttn, + get_down_block, +) +from .unets.unet_2d_condition import UNet2DConditionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class ControlNetOutput(BaseOutput): + """ + The output of [`ControlNetModel`]. + + Args: + down_block_res_samples (`tuple[torch.Tensor]`): + A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should + be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be + used to condition the original UNet's downsampling activations. + mid_down_block_re_sample (`torch.Tensor`): + The activation of the midde block (the lowest sample resolution). Each tensor should be of shape + `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. + Output can be used to condition the original UNet's middle block activation. + """ + + down_block_res_samples: Tuple[torch.Tensor] + mid_block_res_sample: torch.Tensor + + +class ControlNetConditioningEmbedding(nn.Module): + """ + Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN + [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized + training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the + convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides + (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full + model) to encode image-space conditions ... into feature maps ..." + """ + + def __init__( + self, + conditioning_embedding_channels: int, + conditioning_channels: int = 3, + block_out_channels: Tuple[int, ...] = (16, 32, 96, 256), + ): + super().__init__() + + self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) + + self.blocks = nn.ModuleList([]) + + for i in range(len(block_out_channels) - 1): + channel_in = block_out_channels[i] + channel_out = block_out_channels[i + 1] + self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) + self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) + + self.conv_out = zero_module( + nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) + ) + + def forward(self, conditioning): + embedding = self.conv_in(conditioning) + embedding = F.silu(embedding) + + for block in self.blocks: + embedding = block(embedding) + embedding = F.silu(embedding) + + embedding = self.conv_out(embedding) + + return embedding + + +class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin): + """ + A ControlNet model. + + Args: + in_channels (`int`, defaults to 4): + The number of channels in the input sample. + flip_sin_to_cos (`bool`, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, defaults to 0): + The frequency shift to apply to the time embedding. + down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): + block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, defaults to 2): + The number of layers per block. + downsample_padding (`int`, defaults to 1): + The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, defaults to 1): + The scale factor to use for the mid block. + act_fn (`str`, defaults to "silu"): + The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use for the normalization. If None, normalization and activation layers is skipped + in post-processing. + norm_eps (`float`, defaults to 1e-5): + The epsilon to use for the normalization. + cross_attention_dim (`int`, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): + The dimension of the attention heads. + use_linear_projection (`bool`, defaults to `False`): + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + num_class_embeds (`int`, *optional*, defaults to 0): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + upcast_attention (`bool`, defaults to `False`): + resnet_time_scale_shift (`str`, defaults to `"default"`): + Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. + projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): + The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when + `class_embed_type="projection"`. + controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): + The channel order of conditional image. Will convert to `rgb` if it's `bgr`. + conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): + The tuple of output channel for each block in the `conditioning_embedding` layer. + global_pool_conditions (`bool`, defaults to `False`): + TODO(Patrick) - unused parameter. + addition_embed_type_num_heads (`int`, defaults to 64): + The number of heads to use for the `TextTimeEmbedding` layer. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 4, + conditioning_channels: int = 3, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1280, + transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int, ...]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + projection_class_embeddings_input_dim: Optional[int] = None, + controlnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), + global_pool_conditions: bool = False, + addition_embed_type_num_heads: int = 64, + ): + super().__init__() + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + # input + conv_in_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + ) + + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + + elif encoder_hid_dim_type is not None: + raise ValueError( + f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." + ) + else: + self.encoder_hid_proj = None + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + + elif addition_embed_type is not None: + raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") + + # control net conditioning embedding + self.controlnet_cond_embedding = ControlNetConditioningEmbedding( + conditioning_embedding_channels=block_out_channels[0], + block_out_channels=conditioning_embedding_out_channels, + conditioning_channels=conditioning_channels, + ) + + self.down_blocks = nn.ModuleList([]) + self.controlnet_down_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + controlnet_block = zero_module(controlnet_block) + self.controlnet_down_blocks.append(controlnet_block) + + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.down_blocks.append(down_block) + + for _ in range(layers_per_block): + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + controlnet_block = zero_module(controlnet_block) + self.controlnet_down_blocks.append(controlnet_block) + + if not is_final_block: + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + controlnet_block = zero_module(controlnet_block) + self.controlnet_down_blocks.append(controlnet_block) + + # mid + mid_block_channel = block_out_channels[-1] + + controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) + controlnet_block = zero_module(controlnet_block) + self.controlnet_mid_block = controlnet_block + + if mid_block_type == "UNetMidBlock2DCrossAttn": + self.mid_block = UNetMidBlock2DCrossAttn( + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=mid_block_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + elif mid_block_type == "UNetMidBlock2D": + self.mid_block = UNetMidBlock2D( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + num_layers=0, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_groups=norm_num_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + add_attention=False, + ) + else: + raise ValueError(f"unknown mid_block_type : {mid_block_type}") + + @classmethod + def from_unet( + cls, + unet: UNet2DConditionModel, + controlnet_conditioning_channel_order: str = "rgb", + conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), + load_weights_from_unet: bool = True, + conditioning_channels: int = 3, + ): + r""" + Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. + + Parameters: + unet (`UNet2DConditionModel`): + The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied + where applicable. + """ + transformer_layers_per_block = ( + unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 + ) + encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None + encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None + addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None + addition_time_embed_dim = ( + unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None + ) + + controlnet = cls( + encoder_hid_dim=encoder_hid_dim, + encoder_hid_dim_type=encoder_hid_dim_type, + addition_embed_type=addition_embed_type, + addition_time_embed_dim=addition_time_embed_dim, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=unet.config.in_channels, + flip_sin_to_cos=unet.config.flip_sin_to_cos, + freq_shift=unet.config.freq_shift, + down_block_types=unet.config.down_block_types, + only_cross_attention=unet.config.only_cross_attention, + block_out_channels=unet.config.block_out_channels, + layers_per_block=unet.config.layers_per_block, + downsample_padding=unet.config.downsample_padding, + mid_block_scale_factor=unet.config.mid_block_scale_factor, + act_fn=unet.config.act_fn, + norm_num_groups=unet.config.norm_num_groups, + norm_eps=unet.config.norm_eps, + cross_attention_dim=unet.config.cross_attention_dim, + attention_head_dim=unet.config.attention_head_dim, + num_attention_heads=unet.config.num_attention_heads, + use_linear_projection=unet.config.use_linear_projection, + class_embed_type=unet.config.class_embed_type, + num_class_embeds=unet.config.num_class_embeds, + upcast_attention=unet.config.upcast_attention, + resnet_time_scale_shift=unet.config.resnet_time_scale_shift, + projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, + mid_block_type=unet.config.mid_block_type, + controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, + conditioning_embedding_out_channels=conditioning_embedding_out_channels, + conditioning_channels=conditioning_channels, + ) + + if load_weights_from_unet: + controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) + controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) + controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) + + if controlnet.class_embedding: + controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) + + controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) + controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) + + return controlnet + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + controlnet_cond: torch.FloatTensor, + conditioning_scale: float = 1.0, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[ControlNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]: + """ + The [`ControlNetModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor. + timestep (`Union[torch.Tensor, float, int]`): + The number of timesteps to denoise an input. + encoder_hidden_states (`torch.Tensor`): + The encoder hidden states. + controlnet_cond (`torch.FloatTensor`): + The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. + conditioning_scale (`float`, defaults to `1.0`): + The scale factor for ControlNet outputs. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): + Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the + timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep + embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + added_cond_kwargs (`dict`): + Additional conditions for the Stable Diffusion XL UNet. + cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): + A kwargs dictionary that if specified is passed along to the `AttnProcessor`. + guess_mode (`bool`, defaults to `False`): + In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if + you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. + return_dict (`bool`, defaults to `True`): + Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. + + Returns: + [`~models.controlnet.ControlNetOutput`] **or** `tuple`: + If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is + returned where the first element is the sample tensor. + """ + # check channel order + channel_order = self.config.controlnet_conditioning_channel_order + + if channel_order == "rgb": + # in rgb order by default + ... + elif channel_order == "bgr": + controlnet_cond = torch.flip(controlnet_cond, dims=[1]) + else: + raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") + + # prepare attention_mask + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) + emb = emb + class_emb + + if self.config.addition_embed_type is not None: + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + + elif self.config.addition_embed_type == "text_time": + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + + emb = emb + aug_emb if aug_emb is not None else emb + + # 2. pre-process + sample = self.conv_in(sample) + + controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) + sample = sample + controlnet_cond + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = self.mid_block(sample, emb) + + # 5. Control net blocks + + controlnet_down_block_res_samples = () + + for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): + down_block_res_sample = controlnet_block(down_block_res_sample) + controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) + + down_block_res_samples = controlnet_down_block_res_samples + + mid_block_res_sample = self.controlnet_mid_block(sample) + + # 6. scaling + if guess_mode and not self.config.global_pool_conditions: + scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 + scales = scales * conditioning_scale + down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] + mid_block_res_sample = mid_block_res_sample * scales[-1] # last one + else: + down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] + mid_block_res_sample = mid_block_res_sample * conditioning_scale + + if self.config.global_pool_conditions: + down_block_res_samples = [ + torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples + ] + mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) + + if not return_dict: + return (down_block_res_samples, mid_block_res_sample) + + return ControlNetOutput( + down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample + ) + + +def zero_module(module): + for p in module.parameters(): + nn.init.zeros_(p) + return module diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet_flax.py new file mode 100644 index 000000000..6f9b201aa --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/controlnet_flax.py @@ -0,0 +1,395 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import flax +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict + +from ..configuration_utils import ConfigMixin, flax_register_to_config +from ..utils import BaseOutput +from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps +from .modeling_flax_utils import FlaxModelMixin +from .unets.unet_2d_blocks_flax import ( + FlaxCrossAttnDownBlock2D, + FlaxDownBlock2D, + FlaxUNetMidBlock2DCrossAttn, +) + + +@flax.struct.dataclass +class FlaxControlNetOutput(BaseOutput): + """ + The output of [`FlaxControlNetModel`]. + + Args: + down_block_res_samples (`jnp.ndarray`): + mid_block_res_sample (`jnp.ndarray`): + """ + + down_block_res_samples: jnp.ndarray + mid_block_res_sample: jnp.ndarray + + +class FlaxControlNetConditioningEmbedding(nn.Module): + conditioning_embedding_channels: int + block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) + dtype: jnp.dtype = jnp.float32 + + def setup(self) -> None: + self.conv_in = nn.Conv( + self.block_out_channels[0], + kernel_size=(3, 3), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + blocks = [] + for i in range(len(self.block_out_channels) - 1): + channel_in = self.block_out_channels[i] + channel_out = self.block_out_channels[i + 1] + conv1 = nn.Conv( + channel_in, + kernel_size=(3, 3), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + blocks.append(conv1) + conv2 = nn.Conv( + channel_out, + kernel_size=(3, 3), + strides=(2, 2), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + blocks.append(conv2) + self.blocks = blocks + + self.conv_out = nn.Conv( + self.conditioning_embedding_channels, + kernel_size=(3, 3), + padding=((1, 1), (1, 1)), + kernel_init=nn.initializers.zeros_init(), + bias_init=nn.initializers.zeros_init(), + dtype=self.dtype, + ) + + def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: + embedding = self.conv_in(conditioning) + embedding = nn.silu(embedding) + + for block in self.blocks: + embedding = block(embedding) + embedding = nn.silu(embedding) + + embedding = self.conv_out(embedding) + + return embedding + + +@flax_register_to_config +class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): + r""" + A ControlNet model. + + This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods + implemented for all models (such as downloading or saving). + + This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) + subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its + general usage and behavior. + + Inherent JAX features such as the following are supported: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + sample_size (`int`, *optional*): + The size of the input sample. + in_channels (`int`, *optional*, defaults to 4): + The number of channels in the input sample. + down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): + The tuple of downsample blocks to use. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): + The dimension of the attention heads. + num_attention_heads (`int` or `Tuple[int]`, *optional*): + The number of attention heads. + cross_attention_dim (`int`, *optional*, defaults to 768): + The dimension of the cross attention features. + dropout (`float`, *optional*, defaults to 0): + Dropout probability for down, up and bottleneck blocks. + flip_sin_to_cos (`bool`, *optional*, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`): + The channel order of conditional image. Will convert to `rgb` if it's `bgr`. + conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`): + The tuple of output channel for each block in the `conditioning_embedding` layer. + """ + + sample_size: int = 32 + in_channels: int = 4 + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ) + only_cross_attention: Union[bool, Tuple[bool, ...]] = False + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) + layers_per_block: int = 2 + attention_head_dim: Union[int, Tuple[int, ...]] = 8 + num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None + cross_attention_dim: int = 1280 + dropout: float = 0.0 + use_linear_projection: bool = False + dtype: jnp.dtype = jnp.float32 + flip_sin_to_cos: bool = True + freq_shift: int = 0 + controlnet_conditioning_channel_order: str = "rgb" + conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256) + + def init_weights(self, rng: jax.Array) -> FrozenDict: + # init input tensors + sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) + sample = jnp.zeros(sample_shape, dtype=jnp.float32) + timesteps = jnp.ones((1,), dtype=jnp.int32) + encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) + controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) + controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] + + def setup(self) -> None: + block_out_channels = self.block_out_channels + time_embed_dim = block_out_channels[0] * 4 + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = self.num_attention_heads or self.attention_head_dim + + # input + self.conv_in = nn.Conv( + block_out_channels[0], + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + # time + self.time_proj = FlaxTimesteps( + block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift + ) + self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) + + self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding( + conditioning_embedding_channels=block_out_channels[0], + block_out_channels=self.conditioning_embedding_out_channels, + ) + + only_cross_attention = self.only_cross_attention + if isinstance(only_cross_attention, bool): + only_cross_attention = (only_cross_attention,) * len(self.down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(self.down_block_types) + + # down + down_blocks = [] + controlnet_down_blocks = [] + + output_channel = block_out_channels[0] + + controlnet_block = nn.Conv( + output_channel, + kernel_size=(1, 1), + padding="VALID", + kernel_init=nn.initializers.zeros_init(), + bias_init=nn.initializers.zeros_init(), + dtype=self.dtype, + ) + controlnet_down_blocks.append(controlnet_block) + + for i, down_block_type in enumerate(self.down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + if down_block_type == "CrossAttnDownBlock2D": + down_block = FlaxCrossAttnDownBlock2D( + in_channels=input_channel, + out_channels=output_channel, + dropout=self.dropout, + num_layers=self.layers_per_block, + num_attention_heads=num_attention_heads[i], + add_downsample=not is_final_block, + use_linear_projection=self.use_linear_projection, + only_cross_attention=only_cross_attention[i], + dtype=self.dtype, + ) + else: + down_block = FlaxDownBlock2D( + in_channels=input_channel, + out_channels=output_channel, + dropout=self.dropout, + num_layers=self.layers_per_block, + add_downsample=not is_final_block, + dtype=self.dtype, + ) + + down_blocks.append(down_block) + + for _ in range(self.layers_per_block): + controlnet_block = nn.Conv( + output_channel, + kernel_size=(1, 1), + padding="VALID", + kernel_init=nn.initializers.zeros_init(), + bias_init=nn.initializers.zeros_init(), + dtype=self.dtype, + ) + controlnet_down_blocks.append(controlnet_block) + + if not is_final_block: + controlnet_block = nn.Conv( + output_channel, + kernel_size=(1, 1), + padding="VALID", + kernel_init=nn.initializers.zeros_init(), + bias_init=nn.initializers.zeros_init(), + dtype=self.dtype, + ) + controlnet_down_blocks.append(controlnet_block) + + self.down_blocks = down_blocks + self.controlnet_down_blocks = controlnet_down_blocks + + # mid + mid_block_channel = block_out_channels[-1] + self.mid_block = FlaxUNetMidBlock2DCrossAttn( + in_channels=mid_block_channel, + dropout=self.dropout, + num_attention_heads=num_attention_heads[-1], + use_linear_projection=self.use_linear_projection, + dtype=self.dtype, + ) + + self.controlnet_mid_block = nn.Conv( + mid_block_channel, + kernel_size=(1, 1), + padding="VALID", + kernel_init=nn.initializers.zeros_init(), + bias_init=nn.initializers.zeros_init(), + dtype=self.dtype, + ) + + def __call__( + self, + sample: jnp.ndarray, + timesteps: Union[jnp.ndarray, float, int], + encoder_hidden_states: jnp.ndarray, + controlnet_cond: jnp.ndarray, + conditioning_scale: float = 1.0, + return_dict: bool = True, + train: bool = False, + ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]: + r""" + Args: + sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor + timestep (`jnp.ndarray` or `float` or `int`): timesteps + encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states + controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor + conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a + plain tuple. + train (`bool`, *optional*, defaults to `False`): + Use deterministic functions and disable dropout when not training. + + Returns: + [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: + [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + """ + channel_order = self.controlnet_conditioning_channel_order + if channel_order == "bgr": + controlnet_cond = jnp.flip(controlnet_cond, axis=1) + + # 1. time + if not isinstance(timesteps, jnp.ndarray): + timesteps = jnp.array([timesteps], dtype=jnp.int32) + elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: + timesteps = timesteps.astype(dtype=jnp.float32) + timesteps = jnp.expand_dims(timesteps, 0) + + t_emb = self.time_proj(timesteps) + t_emb = self.time_embedding(t_emb) + + # 2. pre-process + sample = jnp.transpose(sample, (0, 2, 3, 1)) + sample = self.conv_in(sample) + + controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) + controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) + sample += controlnet_cond + + # 3. down + down_block_res_samples = (sample,) + for down_block in self.down_blocks: + if isinstance(down_block, FlaxCrossAttnDownBlock2D): + sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) + else: + sample, res_samples = down_block(sample, t_emb, deterministic=not train) + down_block_res_samples += res_samples + + # 4. mid + sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) + + # 5. contronet blocks + controlnet_down_block_res_samples = () + for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): + down_block_res_sample = controlnet_block(down_block_res_sample) + controlnet_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = controlnet_down_block_res_samples + + mid_block_res_sample = self.controlnet_mid_block(sample) + + # 6. scaling + down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] + mid_block_res_sample *= conditioning_scale + + if not return_dict: + return (down_block_res_samples, mid_block_res_sample) + + return FlaxControlNetOutput( + down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/downsampling.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/downsampling.py new file mode 100644 index 000000000..9ae28e950 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/downsampling.py @@ -0,0 +1,334 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import deprecate +from .normalization import RMSNorm +from .upsampling import upfirdn2d_native + + +class Downsample1D(nn.Module): + """A 1D downsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + padding (`int`, default `1`): + padding for the convolution. + name (`str`, default `conv`): + name of the downsampling 1D layer. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + out_channels: Optional[int] = None, + padding: int = 1, + name: str = "conv", + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.padding = padding + stride = 2 + self.name = name + + if use_conv: + self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) + else: + assert self.channels == self.out_channels + self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + assert inputs.shape[1] == self.channels + return self.conv(inputs) + + +class Downsample2D(nn.Module): + """A 2D downsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + padding (`int`, default `1`): + padding for the convolution. + name (`str`, default `conv`): + name of the downsampling 2D layer. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + out_channels: Optional[int] = None, + padding: int = 1, + name: str = "conv", + kernel_size=3, + norm_type=None, + eps=None, + elementwise_affine=None, + bias=True, + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.padding = padding + stride = 2 + self.name = name + conv_cls = nn.Conv2d + + if norm_type == "ln_norm": + self.norm = nn.LayerNorm(channels, eps, elementwise_affine) + elif norm_type == "rms_norm": + self.norm = RMSNorm(channels, eps, elementwise_affine) + elif norm_type is None: + self.norm = None + else: + raise ValueError(f"unknown norm_type: {norm_type}") + + if use_conv: + conv = conv_cls( + self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias + ) + else: + assert self.channels == self.out_channels + conv = nn.AvgPool2d(kernel_size=stride, stride=stride) + + # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed + if name == "conv": + self.Conv2d_0 = conv + self.conv = conv + elif name == "Conv2d_0": + self.conv = conv + else: + self.conv = conv + + def forward(self, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + assert hidden_states.shape[1] == self.channels + + if self.norm is not None: + hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + if self.use_conv and self.padding == 0: + pad = (0, 1, 0, 1) + hidden_states = F.pad(hidden_states, pad, mode="constant", value=0) + + assert hidden_states.shape[1] == self.channels + + hidden_states = self.conv(hidden_states) + + return hidden_states + + +class FirDownsample2D(nn.Module): + """A 2D FIR downsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + fir_kernel (`tuple`, default `(1, 3, 3, 1)`): + kernel for the FIR filter. + """ + + def __init__( + self, + channels: Optional[int] = None, + out_channels: Optional[int] = None, + use_conv: bool = False, + fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), + ): + super().__init__() + out_channels = out_channels if out_channels else channels + if use_conv: + self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) + self.fir_kernel = fir_kernel + self.use_conv = use_conv + self.out_channels = out_channels + + def _downsample_2d( + self, + hidden_states: torch.FloatTensor, + weight: Optional[torch.FloatTensor] = None, + kernel: Optional[torch.FloatTensor] = None, + factor: int = 2, + gain: float = 1, + ) -> torch.FloatTensor: + """Fused `Conv2d()` followed by `downsample_2d()`. + Padding is performed only once at the beginning, not between the operations. The fused op is considerably more + efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of + arbitrary order. + + Args: + hidden_states (`torch.FloatTensor`): + Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. + weight (`torch.FloatTensor`, *optional*): + Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be + performed by `inChannels = x.shape[0] // numGroups`. + kernel (`torch.FloatTensor`, *optional*): + FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which + corresponds to average pooling. + factor (`int`, *optional*, default to `2`): + Integer downsampling factor. + gain (`float`, *optional*, default to `1.0`): + Scaling factor for signal magnitude. + + Returns: + output (`torch.FloatTensor`): + Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same + datatype as `x`. + """ + + assert isinstance(factor, int) and factor >= 1 + if kernel is None: + kernel = [1] * factor + + # setup kernel + kernel = torch.tensor(kernel, dtype=torch.float32) + if kernel.ndim == 1: + kernel = torch.outer(kernel, kernel) + kernel /= torch.sum(kernel) + + kernel = kernel * gain + + if self.use_conv: + _, _, convH, convW = weight.shape + pad_value = (kernel.shape[0] - factor) + (convW - 1) + stride_value = [factor, factor] + upfirdn_input = upfirdn2d_native( + hidden_states, + torch.tensor(kernel, device=hidden_states.device), + pad=((pad_value + 1) // 2, pad_value // 2), + ) + output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0) + else: + pad_value = kernel.shape[0] - factor + output = upfirdn2d_native( + hidden_states, + torch.tensor(kernel, device=hidden_states.device), + down=factor, + pad=((pad_value + 1) // 2, pad_value // 2), + ) + + return output + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + if self.use_conv: + downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel) + hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1) + else: + hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) + + return hidden_states + + +# downsample/upsample layer used in k-upscaler, might be able to use FirDownsample2D/DirUpsample2D instead +class KDownsample2D(nn.Module): + r"""A 2D K-downsampling layer. + + Parameters: + pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. + """ + + def __init__(self, pad_mode: str = "reflect"): + super().__init__() + self.pad_mode = pad_mode + kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) + self.pad = kernel_1d.shape[1] // 2 - 1 + self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) + weight = inputs.new_zeros( + [ + inputs.shape[1], + inputs.shape[1], + self.kernel.shape[0], + self.kernel.shape[1], + ] + ) + indices = torch.arange(inputs.shape[1], device=inputs.device) + kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) + weight[indices, indices] = kernel + return F.conv2d(inputs, weight, stride=2) + + +def downsample_2d( + hidden_states: torch.FloatTensor, + kernel: Optional[torch.FloatTensor] = None, + factor: int = 2, + gain: float = 1, +) -> torch.FloatTensor: + r"""Downsample2D a batch of 2D images with the given filter. + Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the + given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the + specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its + shape is a multiple of the downsampling factor. + + Args: + hidden_states (`torch.FloatTensor`) + Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. + kernel (`torch.FloatTensor`, *optional*): + FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which + corresponds to average pooling. + factor (`int`, *optional*, default to `2`): + Integer downsampling factor. + gain (`float`, *optional*, default to `1.0`): + Scaling factor for signal magnitude. + + Returns: + output (`torch.FloatTensor`): + Tensor of the shape `[N, C, H // factor, W // factor]` + """ + + assert isinstance(factor, int) and factor >= 1 + if kernel is None: + kernel = [1] * factor + + kernel = torch.tensor(kernel, dtype=torch.float32) + if kernel.ndim == 1: + kernel = torch.outer(kernel, kernel) + kernel /= torch.sum(kernel) + + kernel = kernel * gain + pad_value = kernel.shape[0] - factor + output = upfirdn2d_native( + hidden_states, + kernel.to(device=hidden_states.device), + down=factor, + pad=((pad_value + 1) // 2, pad_value // 2), + ) + return output diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/dual_transformer_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/dual_transformer_2d.py new file mode 100644 index 000000000..b8e40f14d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/dual_transformer_2d.py @@ -0,0 +1,20 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils import deprecate +from .transformers.dual_transformer_2d import DualTransformer2DModel + + +class DualTransformer2DModel(DualTransformer2DModel): + deprecation_message = "Importing `DualTransformer2DModel` from `diffusers.models.dual_transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.dual_transformer_2d import DualTransformer2DModel`, instead." + deprecate("DualTransformer2DModel", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings.py new file mode 100644 index 000000000..c15ff24cb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings.py @@ -0,0 +1,914 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from torch import nn + +from ..utils import deprecate +from .activations import get_activation +from .attention_processor import Attention + + +def get_timestep_embedding( + timesteps: torch.Tensor, + embedding_dim: int, + flip_sin_to_cos: bool = False, + downscale_freq_shift: float = 1, + scale: float = 1, + max_period: int = 10000, +): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the + embeddings. :return: an [N x dim] Tensor of positional embeddings. + """ + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange( + start=0, end=half_dim, dtype=torch.float32, device=timesteps.device + ) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent) + emb = timesteps[:, None].float() * emb[None, :] + + # scale embeddings + emb = scale * emb + + # concat sine and cosine embeddings + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + # flip sine and cosine embeddings + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + # zero pad + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +def get_2d_sincos_pos_embed( + embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16 +): + """ + grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or + [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if isinstance(grid_size, int): + grid_size = (grid_size, grid_size) + + grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale + grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token and extra_tokens > 0: + pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + if embed_dim % 2 != 0: + raise ValueError("embed_dim must be divisible by 2") + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) + """ + if embed_dim % 2 != 0: + raise ValueError("embed_dim must be divisible by 2") + + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +class PatchEmbed(nn.Module): + """2D Image to Patch Embedding""" + + def __init__( + self, + height=224, + width=224, + patch_size=16, + in_channels=3, + embed_dim=768, + layer_norm=False, + flatten=True, + bias=True, + interpolation_scale=1, + ): + super().__init__() + + num_patches = (height // patch_size) * (width // patch_size) + self.flatten = flatten + self.layer_norm = layer_norm + + self.proj = nn.Conv2d( + in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias + ) + if layer_norm: + self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) + else: + self.norm = None + + self.patch_size = patch_size + # See: + # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L161 + self.height, self.width = height // patch_size, width // patch_size + self.base_size = height // patch_size + self.interpolation_scale = interpolation_scale + pos_embed = get_2d_sincos_pos_embed( + embed_dim, int(num_patches**0.5), base_size=self.base_size, interpolation_scale=self.interpolation_scale + ) + self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) + + def forward(self, latent): + height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size + + latent = self.proj(latent) + if self.flatten: + latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC + if self.layer_norm: + latent = self.norm(latent) + + # Interpolate positional embeddings if needed. + # (For PixArt-Alpha: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L162C151-L162C160) + if self.height != height or self.width != width: + pos_embed = get_2d_sincos_pos_embed( + embed_dim=self.pos_embed.shape[-1], + grid_size=(height, width), + base_size=self.base_size, + interpolation_scale=self.interpolation_scale, + ) + pos_embed = torch.from_numpy(pos_embed) + pos_embed = pos_embed.float().unsqueeze(0).to(latent.device) + else: + pos_embed = self.pos_embed + + return (latent + pos_embed).to(latent.dtype) + + +class TimestepEmbedding(nn.Module): + def __init__( + self, + in_channels: int, + time_embed_dim: int, + act_fn: str = "silu", + out_dim: int = None, + post_act_fn: Optional[str] = None, + cond_proj_dim=None, + sample_proj_bias=True, + ): + super().__init__() + linear_cls = nn.Linear + + self.linear_1 = linear_cls(in_channels, time_embed_dim, sample_proj_bias) + + if cond_proj_dim is not None: + self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False) + else: + self.cond_proj = None + + self.act = get_activation(act_fn) + + if out_dim is not None: + time_embed_dim_out = out_dim + else: + time_embed_dim_out = time_embed_dim + self.linear_2 = linear_cls(time_embed_dim, time_embed_dim_out, sample_proj_bias) + + if post_act_fn is None: + self.post_act = None + else: + self.post_act = get_activation(post_act_fn) + + def forward(self, sample, condition=None): + if condition is not None: + sample = sample + self.cond_proj(condition) + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + + if self.post_act is not None: + sample = self.post_act(sample) + return sample + + +class Timesteps(nn.Module): + def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + ) + return t_emb + + +class GaussianFourierProjection(nn.Module): + """Gaussian Fourier embeddings for noise levels.""" + + def __init__( + self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False + ): + super().__init__() + self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) + self.log = log + self.flip_sin_to_cos = flip_sin_to_cos + + if set_W_to_weight: + # to delete later + self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) + + self.weight = self.W + + def forward(self, x): + if self.log: + x = torch.log(x) + + x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi + + if self.flip_sin_to_cos: + out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1) + else: + out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) + return out + + +class SinusoidalPositionalEmbedding(nn.Module): + """Apply positional information to a sequence of embeddings. + + Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to + them + + Args: + embed_dim: (int): Dimension of the positional embedding. + max_seq_length: Maximum sequence length to apply positional embeddings + + """ + + def __init__(self, embed_dim: int, max_seq_length: int = 32): + super().__init__() + position = torch.arange(max_seq_length).unsqueeze(1) + div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) + pe = torch.zeros(1, max_seq_length, embed_dim) + pe[0, :, 0::2] = torch.sin(position * div_term) + pe[0, :, 1::2] = torch.cos(position * div_term) + self.register_buffer("pe", pe) + + def forward(self, x): + _, seq_length, _ = x.shape + x = x + self.pe[:, :seq_length] + return x + + +class ImagePositionalEmbeddings(nn.Module): + """ + Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the + height and width of the latent space. + + For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092 + + For VQ-diffusion: + + Output vector embeddings are used as input for the transformer. + + Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE. + + Args: + num_embed (`int`): + Number of embeddings for the latent pixels embeddings. + height (`int`): + Height of the latent image i.e. the number of height embeddings. + width (`int`): + Width of the latent image i.e. the number of width embeddings. + embed_dim (`int`): + Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings. + """ + + def __init__( + self, + num_embed: int, + height: int, + width: int, + embed_dim: int, + ): + super().__init__() + + self.height = height + self.width = width + self.num_embed = num_embed + self.embed_dim = embed_dim + + self.emb = nn.Embedding(self.num_embed, embed_dim) + self.height_emb = nn.Embedding(self.height, embed_dim) + self.width_emb = nn.Embedding(self.width, embed_dim) + + def forward(self, index): + emb = self.emb(index) + + height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height)) + + # 1 x H x D -> 1 x H x 1 x D + height_emb = height_emb.unsqueeze(2) + + width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width)) + + # 1 x W x D -> 1 x 1 x W x D + width_emb = width_emb.unsqueeze(1) + + pos_emb = height_emb + width_emb + + # 1 x H x W x D -> 1 x L xD + pos_emb = pos_emb.view(1, self.height * self.width, -1) + + emb = emb + pos_emb[:, : emb.shape[1], :] + + return emb + + +class LabelEmbedding(nn.Module): + """ + Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. + + Args: + num_classes (`int`): The number of classes. + hidden_size (`int`): The size of the vector embeddings. + dropout_prob (`float`): The probability of dropping a label. + """ + + def __init__(self, num_classes, hidden_size, dropout_prob): + super().__init__() + use_cfg_embedding = dropout_prob > 0 + self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) + self.num_classes = num_classes + self.dropout_prob = dropout_prob + + def token_drop(self, labels, force_drop_ids=None): + """ + Drops labels to enable classifier-free guidance. + """ + if force_drop_ids is None: + drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob + else: + drop_ids = torch.tensor(force_drop_ids == 1) + labels = torch.where(drop_ids, self.num_classes, labels) + return labels + + def forward(self, labels: torch.LongTensor, force_drop_ids=None): + use_dropout = self.dropout_prob > 0 + if (self.training and use_dropout) or (force_drop_ids is not None): + labels = self.token_drop(labels, force_drop_ids) + embeddings = self.embedding_table(labels) + return embeddings + + +class TextImageProjection(nn.Module): + def __init__( + self, + text_embed_dim: int = 1024, + image_embed_dim: int = 768, + cross_attention_dim: int = 768, + num_image_text_embeds: int = 10, + ): + super().__init__() + + self.num_image_text_embeds = num_image_text_embeds + self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) + self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim) + + def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor): + batch_size = text_embeds.shape[0] + + # image + image_text_embeds = self.image_embeds(image_embeds) + image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1) + + # text + text_embeds = self.text_proj(text_embeds) + + return torch.cat([image_text_embeds, text_embeds], dim=1) + + +class ImageProjection(nn.Module): + def __init__( + self, + image_embed_dim: int = 768, + cross_attention_dim: int = 768, + num_image_text_embeds: int = 32, + ): + super().__init__() + + self.num_image_text_embeds = num_image_text_embeds + self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) + self.norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds: torch.FloatTensor): + batch_size = image_embeds.shape[0] + + # image + image_embeds = self.image_embeds(image_embeds) + image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1) + image_embeds = self.norm(image_embeds) + return image_embeds + + +class IPAdapterFullImageProjection(nn.Module): + def __init__(self, image_embed_dim=1024, cross_attention_dim=1024): + super().__init__() + from .attention import FeedForward + + self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn="gelu") + self.norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds: torch.FloatTensor): + return self.norm(self.ff(image_embeds)) + + +class CombinedTimestepLabelEmbeddings(nn.Module): + def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1): + super().__init__() + + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob) + + def forward(self, timestep, class_labels, hidden_dtype=None): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) + + class_labels = self.class_embedder(class_labels) # (N, D) + + conditioning = timesteps_emb + class_labels # (N, D) + + return conditioning + + +class TextTimeEmbedding(nn.Module): + def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64): + super().__init__() + self.norm1 = nn.LayerNorm(encoder_dim) + self.pool = AttentionPooling(num_heads, encoder_dim) + self.proj = nn.Linear(encoder_dim, time_embed_dim) + self.norm2 = nn.LayerNorm(time_embed_dim) + + def forward(self, hidden_states): + hidden_states = self.norm1(hidden_states) + hidden_states = self.pool(hidden_states) + hidden_states = self.proj(hidden_states) + hidden_states = self.norm2(hidden_states) + return hidden_states + + +class TextImageTimeEmbedding(nn.Module): + def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536): + super().__init__() + self.text_proj = nn.Linear(text_embed_dim, time_embed_dim) + self.text_norm = nn.LayerNorm(time_embed_dim) + self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) + + def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor): + # text + time_text_embeds = self.text_proj(text_embeds) + time_text_embeds = self.text_norm(time_text_embeds) + + # image + time_image_embeds = self.image_proj(image_embeds) + + return time_image_embeds + time_text_embeds + + +class ImageTimeEmbedding(nn.Module): + def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536): + super().__init__() + self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) + self.image_norm = nn.LayerNorm(time_embed_dim) + + def forward(self, image_embeds: torch.FloatTensor): + # image + time_image_embeds = self.image_proj(image_embeds) + time_image_embeds = self.image_norm(time_image_embeds) + return time_image_embeds + + +class ImageHintTimeEmbedding(nn.Module): + def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536): + super().__init__() + self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) + self.image_norm = nn.LayerNorm(time_embed_dim) + self.input_hint_block = nn.Sequential( + nn.Conv2d(3, 16, 3, padding=1), + nn.SiLU(), + nn.Conv2d(16, 16, 3, padding=1), + nn.SiLU(), + nn.Conv2d(16, 32, 3, padding=1, stride=2), + nn.SiLU(), + nn.Conv2d(32, 32, 3, padding=1), + nn.SiLU(), + nn.Conv2d(32, 96, 3, padding=1, stride=2), + nn.SiLU(), + nn.Conv2d(96, 96, 3, padding=1), + nn.SiLU(), + nn.Conv2d(96, 256, 3, padding=1, stride=2), + nn.SiLU(), + nn.Conv2d(256, 4, 3, padding=1), + ) + + def forward(self, image_embeds: torch.FloatTensor, hint: torch.FloatTensor): + # image + time_image_embeds = self.image_proj(image_embeds) + time_image_embeds = self.image_norm(time_image_embeds) + hint = self.input_hint_block(hint) + return time_image_embeds, hint + + +class AttentionPooling(nn.Module): + # Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54 + + def __init__(self, num_heads, embed_dim, dtype=None): + super().__init__() + self.dtype = dtype + self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) + self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) + self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) + self.num_heads = num_heads + self.dim_per_head = embed_dim // self.num_heads + + def forward(self, x): + bs, length, width = x.size() + + def shape(x): + # (bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, -1, self.num_heads, self.dim_per_head) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs * self.num_heads, -1, self.dim_per_head) + # (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length) + x = x.transpose(1, 2) + return x + + class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype) + x = torch.cat([class_token, x], dim=1) # (bs, length+1, width) + + # (bs*n_heads, class_token_length, dim_per_head) + q = shape(self.q_proj(class_token)) + # (bs*n_heads, length+class_token_length, dim_per_head) + k = shape(self.k_proj(x)) + v = shape(self.v_proj(x)) + + # (bs*n_heads, class_token_length, length+class_token_length): + scale = 1 / math.sqrt(math.sqrt(self.dim_per_head)) + weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + + # (bs*n_heads, dim_per_head, class_token_length) + a = torch.einsum("bts,bcs->bct", weight, v) + + # (bs, length+1, width) + a = a.reshape(bs, -1, 1).transpose(1, 2) + + return a[:, 0, :] # cls_token + + +def get_fourier_embeds_from_boundingbox(embed_dim, box): + """ + Args: + embed_dim: int + box: a 3-D tensor [B x N x 4] representing the bounding boxes for GLIGEN pipeline + Returns: + [B x N x embed_dim] tensor of positional embeddings + """ + + batch_size, num_boxes = box.shape[:2] + + emb = 100 ** (torch.arange(embed_dim) / embed_dim) + emb = emb[None, None, None].to(device=box.device, dtype=box.dtype) + emb = emb * box.unsqueeze(-1) + + emb = torch.stack((emb.sin(), emb.cos()), dim=-1) + emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4) + + return emb + + +class GLIGENTextBoundingboxProjection(nn.Module): + def __init__(self, positive_len, out_dim, feature_type="text-only", fourier_freqs=8): + super().__init__() + self.positive_len = positive_len + self.out_dim = out_dim + + self.fourier_embedder_dim = fourier_freqs + self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy + + if isinstance(out_dim, tuple): + out_dim = out_dim[0] + + if feature_type == "text-only": + self.linears = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + elif feature_type == "text-image": + self.linears_text = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.linears_image = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) + + def forward( + self, + boxes, + masks, + positive_embeddings=None, + phrases_masks=None, + image_masks=None, + phrases_embeddings=None, + image_embeddings=None, + ): + masks = masks.unsqueeze(-1) + + # embedding position (it may includes padding as placeholder) + xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) # B*N*4 -> B*N*C + + # learnable null embedding + xyxy_null = self.null_position_feature.view(1, 1, -1) + + # replace padding with learnable null embedding + xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null + + # positionet with text only information + if positive_embeddings is not None: + # learnable null embedding + positive_null = self.null_positive_feature.view(1, 1, -1) + + # replace padding with learnable null embedding + positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null + + objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) + + # positionet with text and image infomation + else: + phrases_masks = phrases_masks.unsqueeze(-1) + image_masks = image_masks.unsqueeze(-1) + + # learnable null embedding + text_null = self.null_text_feature.view(1, 1, -1) + image_null = self.null_image_feature.view(1, 1, -1) + + # replace padding with learnable null embedding + phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null + image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null + + objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) + objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) + objs = torch.cat([objs_text, objs_image], dim=1) + + return objs + + +class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module): + """ + For PixArt-Alpha. + + Reference: + https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29 + """ + + def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False): + super().__init__() + + self.outdim = size_emb_dim + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + + self.use_additional_conditions = use_additional_conditions + if use_additional_conditions: + self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) + self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) + self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) + + def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) + + if self.use_additional_conditions: + resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype) + resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1) + aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype) + aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1) + conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1) + else: + conditioning = timesteps_emb + + return conditioning + + +class PixArtAlphaTextProjection(nn.Module): + """ + Projects caption embeddings. Also handles dropout for classifier-free guidance. + + Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py + """ + + def __init__(self, in_features, hidden_size, num_tokens=120): + super().__init__() + self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True) + self.act_1 = nn.GELU(approximate="tanh") + self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True) + + def forward(self, caption): + hidden_states = self.linear_1(caption) + hidden_states = self.act_1(hidden_states) + hidden_states = self.linear_2(hidden_states) + return hidden_states + + +class IPAdapterPlusImageProjection(nn.Module): + """Resampler of IP-Adapter Plus. + + Args: + ---- + embed_dims (int): The feature dimension. Defaults to 768. + output_dims (int): The number of output channels, that is the same + number of the channels in the + `unet.config.cross_attention_dim`. Defaults to 1024. + hidden_dims (int): The number of hidden channels. Defaults to 1280. + depth (int): The number of blocks. Defaults to 8. + dim_head (int): The number of head channels. Defaults to 64. + heads (int): Parallel attention heads. Defaults to 16. + num_queries (int): The number of queries. Defaults to 8. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + """ + + def __init__( + self, + embed_dims: int = 768, + output_dims: int = 1024, + hidden_dims: int = 1280, + depth: int = 4, + dim_head: int = 64, + heads: int = 16, + num_queries: int = 8, + ffn_ratio: float = 4, + ) -> None: + super().__init__() + from .attention import FeedForward # Lazy import to avoid circular import + + self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims**0.5) + + self.proj_in = nn.Linear(embed_dims, hidden_dims) + + self.proj_out = nn.Linear(hidden_dims, output_dims) + self.norm_out = nn.LayerNorm(output_dims) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + nn.LayerNorm(hidden_dims), + nn.LayerNorm(hidden_dims), + Attention( + query_dim=hidden_dims, + dim_head=dim_head, + heads=heads, + out_bias=False, + ), + nn.Sequential( + nn.LayerNorm(hidden_dims), + FeedForward(hidden_dims, hidden_dims, activation_fn="gelu", mult=ffn_ratio, bias=False), + ), + ] + ) + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass. + + Args: + ---- + x (torch.Tensor): Input Tensor. + + Returns: + ------- + torch.Tensor: Output Tensor. + """ + latents = self.latents.repeat(x.size(0), 1, 1) + + x = self.proj_in(x) + + for ln0, ln1, attn, ff in self.layers: + residual = latents + + encoder_hidden_states = ln0(x) + latents = ln1(latents) + encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2) + latents = attn(latents, encoder_hidden_states) + residual + latents = ff(latents) + latents + + latents = self.proj_out(latents) + return self.norm_out(latents) + + +class MultiIPAdapterImageProjection(nn.Module): + def __init__(self, IPAdapterImageProjectionLayers: Union[List[nn.Module], Tuple[nn.Module]]): + super().__init__() + self.image_projection_layers = nn.ModuleList(IPAdapterImageProjectionLayers) + + def forward(self, image_embeds: List[torch.FloatTensor]): + projected_image_embeds = [] + + # currently, we accept `image_embeds` as + # 1. a tensor (deprecated) with shape [batch_size, embed_dim] or [batch_size, sequence_length, embed_dim] + # 2. list of `n` tensors where `n` is number of ip-adapters, each tensor can hae shape [batch_size, num_images, embed_dim] or [batch_size, num_images, sequence_length, embed_dim] + if not isinstance(image_embeds, list): + deprecation_message = ( + "You have passed a tensor as `image_embeds`.This is deprecated and will be removed in a future release." + " Please make sure to update your script to pass `image_embeds` as a list of tensors to supress this warning." + ) + deprecate("image_embeds not a list", "1.0.0", deprecation_message, standard_warn=False) + image_embeds = [image_embeds.unsqueeze(1)] + + if len(image_embeds) != len(self.image_projection_layers): + raise ValueError( + f"image_embeds must have the same length as image_projection_layers, got {len(image_embeds)} and {len(self.image_projection_layers)}" + ) + + for image_embed, image_projection_layer in zip(image_embeds, self.image_projection_layers): + batch_size, num_images = image_embed.shape[0], image_embed.shape[1] + image_embed = image_embed.reshape((batch_size * num_images,) + image_embed.shape[2:]) + image_embed = image_projection_layer(image_embed) + image_embed = image_embed.reshape((batch_size, num_images) + image_embed.shape[1:]) + + projected_image_embeds.append(image_embed) + + return projected_image_embeds diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings_flax.py new file mode 100644 index 000000000..8e343be0d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/embeddings_flax.py @@ -0,0 +1,97 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math + +import flax.linen as nn +import jax.numpy as jnp + + +def get_sinusoidal_embeddings( + timesteps: jnp.ndarray, + embedding_dim: int, + freq_shift: float = 1, + min_timescale: float = 1, + max_timescale: float = 1.0e4, + flip_sin_to_cos: bool = False, + scale: float = 1.0, +) -> jnp.ndarray: + """Returns the positional encoding (same as Tensor2Tensor). + + Args: + timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + embedding_dim: The number of output channels. + min_timescale: The smallest time unit (should probably be 0.0). + max_timescale: The largest time unit. + Returns: + a Tensor of timing signals [N, num_channels] + """ + assert timesteps.ndim == 1, "Timesteps should be a 1d-array" + assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" + num_timescales = float(embedding_dim // 2) + log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift) + inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment) + emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0) + + # scale embeddings + scaled_time = scale * emb + + if flip_sin_to_cos: + signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1) + else: + signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1) + signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim]) + return signal + + +class FlaxTimestepEmbedding(nn.Module): + r""" + Time step Embedding Module. Learns embeddings for input time steps. + + Args: + time_embed_dim (`int`, *optional*, defaults to `32`): + Time step embedding dimension + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + time_embed_dim: int = 32 + dtype: jnp.dtype = jnp.float32 + + @nn.compact + def __call__(self, temb): + temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb) + temb = nn.silu(temb) + temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb) + return temb + + +class FlaxTimesteps(nn.Module): + r""" + Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239 + + Args: + dim (`int`, *optional*, defaults to `32`): + Time step embedding dimension + """ + + dim: int = 32 + flip_sin_to_cos: bool = False + freq_shift: float = 1 + + @nn.compact + def __call__(self, timesteps): + return get_sinusoidal_embeddings( + timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/lora.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/lora.py new file mode 100644 index 000000000..4e9e0c07c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/lora.py @@ -0,0 +1,457 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# IMPORTANT: # +################################################################### +# ----------------------------------------------------------------# +# This file is deprecated and will be removed soon # +# (as soon as PEFT will become a required dependency for LoRA) # +# ----------------------------------------------------------------# +################################################################### + +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from ..utils import deprecate, logging +from ..utils.import_utils import is_transformers_available + + +if is_transformers_available(): + from transformers import CLIPTextModel, CLIPTextModelWithProjection + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def text_encoder_attn_modules(text_encoder): + attn_modules = [] + + if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): + for i, layer in enumerate(text_encoder.text_model.encoder.layers): + name = f"text_model.encoder.layers.{i}.self_attn" + mod = layer.self_attn + attn_modules.append((name, mod)) + else: + raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") + + return attn_modules + + +def text_encoder_mlp_modules(text_encoder): + mlp_modules = [] + + if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): + for i, layer in enumerate(text_encoder.text_model.encoder.layers): + mlp_mod = layer.mlp + name = f"text_model.encoder.layers.{i}.mlp" + mlp_modules.append((name, mlp_mod)) + else: + raise ValueError(f"do not know how to get mlp modules for: {text_encoder.__class__.__name__}") + + return mlp_modules + + +def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float = 1.0): + for _, attn_module in text_encoder_attn_modules(text_encoder): + if isinstance(attn_module.q_proj, PatchedLoraProjection): + attn_module.q_proj.lora_scale = lora_scale + attn_module.k_proj.lora_scale = lora_scale + attn_module.v_proj.lora_scale = lora_scale + attn_module.out_proj.lora_scale = lora_scale + + for _, mlp_module in text_encoder_mlp_modules(text_encoder): + if isinstance(mlp_module.fc1, PatchedLoraProjection): + mlp_module.fc1.lora_scale = lora_scale + mlp_module.fc2.lora_scale = lora_scale + + +class PatchedLoraProjection(torch.nn.Module): + def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None): + deprecation_message = "Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("PatchedLoraProjection", "1.0.0", deprecation_message) + + super().__init__() + from ..models.lora import LoRALinearLayer + + self.regular_linear_layer = regular_linear_layer + + device = self.regular_linear_layer.weight.device + + if dtype is None: + dtype = self.regular_linear_layer.weight.dtype + + self.lora_linear_layer = LoRALinearLayer( + self.regular_linear_layer.in_features, + self.regular_linear_layer.out_features, + network_alpha=network_alpha, + device=device, + dtype=dtype, + rank=rank, + ) + + self.lora_scale = lora_scale + + # overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved + # when saving the whole text encoder model and when LoRA is unloaded or fused + def state_dict(self, *args, destination=None, prefix="", keep_vars=False): + if self.lora_linear_layer is None: + return self.regular_linear_layer.state_dict( + *args, destination=destination, prefix=prefix, keep_vars=keep_vars + ) + + return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars) + + def _fuse_lora(self, lora_scale=1.0, safe_fusing=False): + if self.lora_linear_layer is None: + return + + dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device + + w_orig = self.regular_linear_layer.weight.data.float() + w_up = self.lora_linear_layer.up.weight.data.float() + w_down = self.lora_linear_layer.down.weight.data.float() + + if self.lora_linear_layer.network_alpha is not None: + w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank + + fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) + + if safe_fusing and torch.isnan(fused_weight).any().item(): + raise ValueError( + "This LoRA weight seems to be broken. " + f"Encountered NaN values when trying to fuse LoRA weights for {self}." + "LoRA weights will not be fused." + ) + + self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype) + + # we can drop the lora layer now + self.lora_linear_layer = None + + # offload the up and down matrices to CPU to not blow the memory + self.w_up = w_up.cpu() + self.w_down = w_down.cpu() + self.lora_scale = lora_scale + + def _unfuse_lora(self): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): + return + + fused_weight = self.regular_linear_layer.weight.data + dtype, device = fused_weight.dtype, fused_weight.device + + w_up = self.w_up.to(device=device).float() + w_down = self.w_down.to(device).float() + + unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) + self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype) + + self.w_up = None + self.w_down = None + + def forward(self, input): + if self.lora_scale is None: + self.lora_scale = 1.0 + if self.lora_linear_layer is None: + return self.regular_linear_layer(input) + return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input)) + + +class LoRALinearLayer(nn.Module): + r""" + A linear layer that is used with LoRA. + + Parameters: + in_features (`int`): + Number of input features. + out_features (`int`): + Number of output features. + rank (`int`, `optional`, defaults to 4): + The rank of the LoRA layer. + network_alpha (`float`, `optional`, defaults to `None`): + The value of the network alpha used for stable learning and preventing underflow. This value has the same + meaning as the `--network_alpha` option in the kohya-ss trainer script. See + https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + device (`torch.device`, `optional`, defaults to `None`): + The device to use for the layer's weights. + dtype (`torch.dtype`, `optional`, defaults to `None`): + The dtype to use for the layer's weights. + """ + + def __init__( + self, + in_features: int, + out_features: int, + rank: int = 4, + network_alpha: Optional[float] = None, + device: Optional[Union[torch.device, str]] = None, + dtype: Optional[torch.dtype] = None, + ): + super().__init__() + + deprecation_message = "Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("LoRALinearLayer", "1.0.0", deprecation_message) + + self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) + self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) + # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. + # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + self.network_alpha = network_alpha + self.rank = rank + self.out_features = out_features + self.in_features = in_features + + nn.init.normal_(self.down.weight, std=1 / rank) + nn.init.zeros_(self.up.weight) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + orig_dtype = hidden_states.dtype + dtype = self.down.weight.dtype + + down_hidden_states = self.down(hidden_states.to(dtype)) + up_hidden_states = self.up(down_hidden_states) + + if self.network_alpha is not None: + up_hidden_states *= self.network_alpha / self.rank + + return up_hidden_states.to(orig_dtype) + + +class LoRAConv2dLayer(nn.Module): + r""" + A convolutional layer that is used with LoRA. + + Parameters: + in_features (`int`): + Number of input features. + out_features (`int`): + Number of output features. + rank (`int`, `optional`, defaults to 4): + The rank of the LoRA layer. + kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1): + The kernel size of the convolution. + stride (`int` or `tuple` of two `int`, `optional`, defaults to 1): + The stride of the convolution. + padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0): + The padding of the convolution. + network_alpha (`float`, `optional`, defaults to `None`): + The value of the network alpha used for stable learning and preventing underflow. This value has the same + meaning as the `--network_alpha` option in the kohya-ss trainer script. See + https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + """ + + def __init__( + self, + in_features: int, + out_features: int, + rank: int = 4, + kernel_size: Union[int, Tuple[int, int]] = (1, 1), + stride: Union[int, Tuple[int, int]] = (1, 1), + padding: Union[int, Tuple[int, int], str] = 0, + network_alpha: Optional[float] = None, + ): + super().__init__() + + deprecation_message = "Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("LoRAConv2dLayer", "1.0.0", deprecation_message) + + self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + # according to the official kohya_ss trainer kernel_size are always fixed for the up layer + # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129 + self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False) + + # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. + # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + self.network_alpha = network_alpha + self.rank = rank + + nn.init.normal_(self.down.weight, std=1 / rank) + nn.init.zeros_(self.up.weight) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + orig_dtype = hidden_states.dtype + dtype = self.down.weight.dtype + + down_hidden_states = self.down(hidden_states.to(dtype)) + up_hidden_states = self.up(down_hidden_states) + + if self.network_alpha is not None: + up_hidden_states *= self.network_alpha / self.rank + + return up_hidden_states.to(orig_dtype) + + +class LoRACompatibleConv(nn.Conv2d): + """ + A convolutional layer that can be used with LoRA. + """ + + def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs): + deprecation_message = "Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("LoRACompatibleConv", "1.0.0", deprecation_message) + + super().__init__(*args, **kwargs) + self.lora_layer = lora_layer + + def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): + deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("set_lora_layer", "1.0.0", deprecation_message) + + self.lora_layer = lora_layer + + def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): + if self.lora_layer is None: + return + + dtype, device = self.weight.data.dtype, self.weight.data.device + + w_orig = self.weight.data.float() + w_up = self.lora_layer.up.weight.data.float() + w_down = self.lora_layer.down.weight.data.float() + + if self.lora_layer.network_alpha is not None: + w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank + + fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1)) + fusion = fusion.reshape((w_orig.shape)) + fused_weight = w_orig + (lora_scale * fusion) + + if safe_fusing and torch.isnan(fused_weight).any().item(): + raise ValueError( + "This LoRA weight seems to be broken. " + f"Encountered NaN values when trying to fuse LoRA weights for {self}." + "LoRA weights will not be fused." + ) + + self.weight.data = fused_weight.to(device=device, dtype=dtype) + + # we can drop the lora layer now + self.lora_layer = None + + # offload the up and down matrices to CPU to not blow the memory + self.w_up = w_up.cpu() + self.w_down = w_down.cpu() + self._lora_scale = lora_scale + + def _unfuse_lora(self): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): + return + + fused_weight = self.weight.data + dtype, device = fused_weight.data.dtype, fused_weight.data.device + + self.w_up = self.w_up.to(device=device).float() + self.w_down = self.w_down.to(device).float() + + fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1)) + fusion = fusion.reshape((fused_weight.shape)) + unfused_weight = fused_weight.float() - (self._lora_scale * fusion) + self.weight.data = unfused_weight.to(device=device, dtype=dtype) + + self.w_up = None + self.w_down = None + + def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: + if self.padding_mode != "zeros": + hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode) + padding = (0, 0) + else: + padding = self.padding + + original_outputs = F.conv2d( + hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups + ) + + if self.lora_layer is None: + return original_outputs + else: + return original_outputs + (scale * self.lora_layer(hidden_states)) + + +class LoRACompatibleLinear(nn.Linear): + """ + A Linear layer that can be used with LoRA. + """ + + def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs): + deprecation_message = "Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("LoRACompatibleLinear", "1.0.0", deprecation_message) + + super().__init__(*args, **kwargs) + self.lora_layer = lora_layer + + def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]): + deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." + deprecate("set_lora_layer", "1.0.0", deprecation_message) + self.lora_layer = lora_layer + + def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): + if self.lora_layer is None: + return + + dtype, device = self.weight.data.dtype, self.weight.data.device + + w_orig = self.weight.data.float() + w_up = self.lora_layer.up.weight.data.float() + w_down = self.lora_layer.down.weight.data.float() + + if self.lora_layer.network_alpha is not None: + w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank + + fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) + + if safe_fusing and torch.isnan(fused_weight).any().item(): + raise ValueError( + "This LoRA weight seems to be broken. " + f"Encountered NaN values when trying to fuse LoRA weights for {self}." + "LoRA weights will not be fused." + ) + + self.weight.data = fused_weight.to(device=device, dtype=dtype) + + # we can drop the lora layer now + self.lora_layer = None + + # offload the up and down matrices to CPU to not blow the memory + self.w_up = w_up.cpu() + self.w_down = w_down.cpu() + self._lora_scale = lora_scale + + def _unfuse_lora(self): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): + return + + fused_weight = self.weight.data + dtype, device = fused_weight.dtype, fused_weight.device + + w_up = self.w_up.to(device=device).float() + w_down = self.w_down.to(device).float() + + unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) + self.weight.data = unfused_weight.to(device=device, dtype=dtype) + + self.w_up = None + self.w_down = None + + def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: + if self.lora_layer is None: + out = super().forward(hidden_states) + return out + else: + out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states)) + return out diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_pytorch_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_pytorch_utils.py new file mode 100644 index 000000000..4a487133e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_pytorch_utils.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - Flax general utilities.""" +import re + +import jax.numpy as jnp +from flax.traverse_util import flatten_dict, unflatten_dict +from jax.random import PRNGKey + +from ..utils import logging + + +logger = logging.get_logger(__name__) + + +def rename_key(key): + regex = r"\w+[.]\d+" + pats = re.findall(regex, key) + for pat in pats: + key = key.replace(pat, "_".join(pat.split("."))) + return key + + +##################### +# PyTorch => Flax # +##################### + + +# Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 +# and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py +def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): + """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" + # conv norm or layer norm + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) + + # rename attention layers + if len(pt_tuple_key) > 1: + for rename_from, rename_to in ( + ("to_out_0", "proj_attn"), + ("to_k", "key"), + ("to_v", "value"), + ("to_q", "query"), + ): + if pt_tuple_key[-2] == rename_from: + weight_name = pt_tuple_key[-1] + weight_name = "kernel" if weight_name == "weight" else weight_name + renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) + if renamed_pt_tuple_key in random_flax_state_dict: + assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape + return renamed_pt_tuple_key, pt_tensor.T + + if ( + any("norm" in str_ for str_ in pt_tuple_key) + and (pt_tuple_key[-1] == "bias") + and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) + and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) + ): + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) + return renamed_pt_tuple_key, pt_tensor + elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) + return renamed_pt_tuple_key, pt_tensor + + # embedding + if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: + pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) + return renamed_pt_tuple_key, pt_tensor + + # conv layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: + pt_tensor = pt_tensor.transpose(2, 3, 1, 0) + return renamed_pt_tuple_key, pt_tensor + + # linear layer + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) + if pt_tuple_key[-1] == "weight": + pt_tensor = pt_tensor.T + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm weight + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) + if pt_tuple_key[-1] == "gamma": + return renamed_pt_tuple_key, pt_tensor + + # old PyTorch layer norm bias + renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) + if pt_tuple_key[-1] == "beta": + return renamed_pt_tuple_key, pt_tensor + + return pt_tuple_key, pt_tensor + + +def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): + # Step 1: Convert pytorch tensor to numpy + pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} + + # Step 2: Since the model is stateless, get random Flax params + random_flax_params = flax_model.init_weights(PRNGKey(init_key)) + + random_flax_state_dict = flatten_dict(random_flax_params) + flax_state_dict = {} + + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + renamed_pt_key = rename_key(pt_key) + pt_tuple_key = tuple(renamed_pt_key.split(".")) + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = jnp.asarray(flax_tensor) + + return unflatten_dict(flax_state_dict) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_utils.py new file mode 100644 index 000000000..1ddcda900 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_flax_utils.py @@ -0,0 +1,566 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from pickle import UnpicklingError +from typing import Any, Dict, Union + +import jax +import jax.numpy as jnp +import msgpack.exceptions +from flax.core.frozen_dict import FrozenDict, unfreeze +from flax.serialization import from_bytes, to_bytes +from flax.traverse_util import flatten_dict, unflatten_dict +from huggingface_hub import create_repo, hf_hub_download +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + validate_hf_hub_args, +) +from requests import HTTPError + +from .. import __version__, is_torch_available +from ..utils import ( + CONFIG_NAME, + FLAX_WEIGHTS_NAME, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + WEIGHTS_NAME, + PushToHubMixin, + logging, +) +from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax + + +logger = logging.get_logger(__name__) + + +class FlaxModelMixin(PushToHubMixin): + r""" + Base class for all Flax models. + + [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and + saving models. + + - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`]. + """ + + config_name = CONFIG_NAME + _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] + _flax_internal_args = ["name", "parent", "dtype"] + + @classmethod + def _from_config(cls, config, **kwargs): + """ + All context managers that the model should be initialized under go here. + """ + return cls(config, **kwargs) + + def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: + """ + Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. + """ + + # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 + def conditional_cast(param): + if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): + param = param.astype(dtype) + return param + + if mask is None: + return jax.tree_map(conditional_cast, params) + + flat_params = flatten_dict(params) + flat_mask, _ = jax.tree_flatten(mask) + + for masked, key in zip(flat_mask, flat_params.keys()): + if masked: + param = flat_params[key] + flat_params[key] = conditional_cast(param) + + return unflatten_dict(flat_params) + + def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast + the `params` in place. + + This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full + half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` + for params you want to cast, and `False` for those you want to skip. + + Examples: + + ```python + >>> from diffusers import FlaxUNet2DConditionModel + + >>> # load model + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision + >>> params = model.to_bf16(params) + >>> # If you don't want to cast certain parameters (for example layer norm bias and scale) + >>> # then pass the mask as follows + >>> from flax import traverse_util + + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> flat_params = traverse_util.flatten_dict(params) + >>> mask = { + ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) + ... for path in flat_params + ... } + >>> mask = traverse_util.unflatten_dict(mask) + >>> params = model.to_bf16(params, mask) + ```""" + return self._cast_floating_to(params, jnp.bfloat16, mask) + + def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the + model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` + for params you want to cast, and `False` for those you want to skip. + + Examples: + + ```python + >>> from diffusers import FlaxUNet2DConditionModel + + >>> # Download model and configuration from huggingface.co + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> # By default, the model params will be in fp32, to illustrate the use of this method, + >>> # we'll first cast to fp16 and back to fp32 + >>> params = model.to_f16(params) + >>> # now cast back to fp32 + >>> params = model.to_fp32(params) + ```""" + return self._cast_floating_to(params, jnp.float32, mask) + + def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): + r""" + Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the + `params` in place. + + This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full + half-precision training or to save weights in float16 for inference in order to save memory and improve speed. + + Arguments: + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + mask (`Union[Dict, FrozenDict]`): + A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` + for params you want to cast, and `False` for those you want to skip. + + Examples: + + ```python + >>> from diffusers import FlaxUNet2DConditionModel + + >>> # load model + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> # By default, the model params will be in fp32, to cast these to float16 + >>> params = model.to_fp16(params) + >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) + >>> # then pass the mask as follows + >>> from flax import traverse_util + + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> flat_params = traverse_util.flatten_dict(params) + >>> mask = { + ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) + ... for path in flat_params + ... } + >>> mask = traverse_util.unflatten_dict(mask) + >>> params = model.to_fp16(params, mask) + ```""" + return self._cast_floating_to(params, jnp.float16, mask) + + def init_weights(self, rng: jax.Array) -> Dict: + raise NotImplementedError(f"init_weights method has to be implemented for {self}") + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + dtype: jnp.dtype = jnp.float32, + *model_args, + **kwargs, + ): + r""" + Instantiate a pretrained Flax model from a pretrained model configuration. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model + hosted on the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + using [`~FlaxModelMixin.save_pretrained`]. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified, all the computation will be performed with the given `dtype`. + + + + This only specifies the dtype of the *computation* and does not influence the dtype of model + parameters. + + If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and + [`~FlaxModelMixin.to_bf16`]. + + + + model_args (sequence of positional arguments, *optional*): + All remaining positional arguments are passed to the underlying model's `__init__` method. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + from_pt (`bool`, *optional*, defaults to `False`): + Load the model weights from a PyTorch checkpoint save file. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it is loaded) and initiate the model (for + example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or + automatically loaded: + + - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying + model's `__init__` method (we assume all relevant updates to the configuration have already been + done). + - If a configuration is not provided, `kwargs` are first passed to the configuration class + initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds + to a configuration attribute is used to override said attribute with the supplied `kwargs` value. + Remaining keys that do not correspond to any configuration attribute are passed to the underlying + model's `__init__` function. + + Examples: + + ```python + >>> from diffusers import FlaxUNet2DConditionModel + + >>> # Download model and configuration from huggingface.co and cache. + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). + >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/") + ``` + + If you get the error message below, you need to finetune the weights for your downstream task: + + ```bash + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + """ + config = kwargs.pop("config", None) + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + from_pt = kwargs.pop("from_pt", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", False) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + + user_agent = { + "diffusers": __version__, + "file_type": "model", + "framework": "flax", + } + + # Load config if we don't provide one + if config is None: + config, unused_kwargs = cls.load_config( + pretrained_model_name_or_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + **kwargs, + ) + + model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs) + + # Load model + pretrained_path_with_subfolder = ( + pretrained_model_name_or_path + if subfolder is None + else os.path.join(pretrained_model_name_or_path, subfolder) + ) + if os.path.isdir(pretrained_path_with_subfolder): + if from_pt: + if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): + raise EnvironmentError( + f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} " + ) + model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) + elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): + # Load from a Flax checkpoint + model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) + # Check if pytorch weights exist instead + elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): + raise EnvironmentError( + f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model" + " using `from_pt=True`." + ) + else: + raise EnvironmentError( + f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " + f"{pretrained_path_with_subfolder}." + ) + else: + try: + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision, + ) + + except RepositoryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " + "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " + "token having permission to this repo with `token` or log in with `huggingface-cli " + "login`." + ) + except RevisionNotFoundError: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " + "this model name. Check the model page at " + f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) + except EntryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}." + ) + except HTTPError as err: + raise EnvironmentError( + f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" + f"{err}" + ) + except ValueError: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" + " internet connection or see how to run the library in offline mode at" + " 'https://huggingface.co/docs/transformers/installation#offline-mode'." + ) + except EnvironmentError: + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." + ) + + if from_pt: + if is_torch_available(): + from .modeling_utils import load_state_dict + else: + raise EnvironmentError( + "Can't load the model in PyTorch format because PyTorch is not installed. " + "Please, install PyTorch or use native Flax weights." + ) + + # Step 1: Get the pytorch file + pytorch_model_file = load_state_dict(model_file) + + # Step 2: Convert the weights + state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) + else: + try: + with open(model_file, "rb") as state_f: + state = from_bytes(cls, state_f.read()) + except (UnpicklingError, msgpack.exceptions.ExtraData) as e: + try: + with open(model_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please" + " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" + " folder you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") + # make sure all arrays are stored as jnp.ndarray + # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: + # https://github.com/google/flax/issues/1261 + state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state) + + # flatten dicts + state = flatten_dict(state) + + params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) + required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) + + shape_state = flatten_dict(unfreeze(params_shape_tree)) + + missing_keys = required_params - set(state.keys()) + unexpected_keys = set(state.keys()) - required_params + + if missing_keys: + logger.warning( + f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " + "Make sure to call model.init_weights to initialize the missing weights." + ) + cls._missing_keys = missing_keys + + for key in state.keys(): + if key in shape_state and state[key].shape != shape_state[key].shape: + raise ValueError( + f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " + f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. " + ) + + # remove unexpected keys to not be saved again + for unexpected_key in unexpected_keys: + del state[unexpected_key] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" + " with another architecture." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + else: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" + f" was trained on, you can already use {model.__class__.__name__} for predictions without further" + " training." + ) + + return model, unflatten_dict(state) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + params: Union[Dict, FrozenDict], + is_main_process: bool = True, + push_to_hub: bool = False, + **kwargs, + ): + """ + Save a model and its configuration file to a directory so that it can be reloaded using the + [`~FlaxModelMixin.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save a model and its configuration file to. Will be created if it doesn't exist. + params (`Union[Dict, FrozenDict]`): + A `PyTree` of model parameters. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + model_to_save = self + + # Attach architecture to the config + # Save the config + if is_main_process: + model_to_save.save_config(save_directory) + + # save model + output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) + with open(output_model_file, "wb") as f: + model_bytes = to_bytes(params) + f.write(model_bytes) + + logger.info(f"Model weights saved in {output_model_file}") + + if push_to_hub: + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_outputs.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_outputs.py new file mode 100644 index 000000000..8dfee5fec --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_outputs.py @@ -0,0 +1,17 @@ +from dataclasses import dataclass + +from ..utils import BaseOutput + + +@dataclass +class AutoencoderKLOutput(BaseOutput): + """ + Output of AutoencoderKL encoding method. + + Args: + latent_dist (`DiagonalGaussianDistribution`): + Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. + `DiagonalGaussianDistribution` allows for sampling latents from the distribution. + """ + + latent_dist: "DiagonalGaussianDistribution" # noqa: F821 diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_pytorch_flax_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_pytorch_flax_utils.py new file mode 100644 index 000000000..7099daca7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_pytorch_flax_utils.py @@ -0,0 +1,161 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch - Flax general utilities.""" + +from pickle import UnpicklingError + +import jax +import jax.numpy as jnp +import numpy as np +from flax.serialization import from_bytes +from flax.traverse_util import flatten_dict + +from ..utils import logging + + +logger = logging.get_logger(__name__) + + +##################### +# Flax => PyTorch # +##################### + + +# from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_flax_pytorch_utils.py#L224-L352 +def load_flax_checkpoint_in_pytorch_model(pt_model, model_file): + try: + with open(model_file, "rb") as flax_state_f: + flax_state = from_bytes(None, flax_state_f.read()) + except UnpicklingError as e: + try: + with open(model_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please" + " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" + " folder you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") + + return load_flax_weights_in_pytorch_model(pt_model, flax_state) + + +def load_flax_weights_in_pytorch_model(pt_model, flax_state): + """Load flax checkpoints in a PyTorch model""" + + try: + import torch # noqa: F401 + except ImportError: + logger.error( + "Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see" + " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" + " instructions." + ) + raise + + # check if we have bf16 weights + is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() + if any(is_type_bf16): + # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 + + # and bf16 is not fully supported in PT yet. + logger.warning( + "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " + "before loading those in PyTorch model." + ) + flax_state = jax.tree_util.tree_map( + lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state + ) + + pt_model.base_model_prefix = "" + + flax_state_dict = flatten_dict(flax_state, sep=".") + pt_model_dict = pt_model.state_dict() + + # keep track of unexpected & missing keys + unexpected_keys = [] + missing_keys = set(pt_model_dict.keys()) + + for flax_key_tuple, flax_tensor in flax_state_dict.items(): + flax_key_tuple_array = flax_key_tuple.split(".") + + if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: + flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"] + flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) + elif flax_key_tuple_array[-1] == "kernel": + flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"] + flax_tensor = flax_tensor.T + elif flax_key_tuple_array[-1] == "scale": + flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"] + + if "time_embedding" not in flax_key_tuple_array: + for i, flax_key_tuple_string in enumerate(flax_key_tuple_array): + flax_key_tuple_array[i] = ( + flax_key_tuple_string.replace("_0", ".0") + .replace("_1", ".1") + .replace("_2", ".2") + .replace("_3", ".3") + .replace("_4", ".4") + .replace("_5", ".5") + .replace("_6", ".6") + .replace("_7", ".7") + .replace("_8", ".8") + .replace("_9", ".9") + ) + + flax_key = ".".join(flax_key_tuple_array) + + if flax_key in pt_model_dict: + if flax_tensor.shape != pt_model_dict[flax_key].shape: + raise ValueError( + f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " + f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + else: + # add weight to pytorch dict + flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor + pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) + # remove from missing keys + missing_keys.remove(flax_key) + else: + # weight is not expected by PyTorch model + unexpected_keys.append(flax_key) + + pt_model.load_state_dict(pt_model_dict) + + # re-transform missing_keys to list + missing_keys = list(missing_keys) + + if len(unexpected_keys) > 0: + logger.warning( + "Some weights of the Flax model were not used when initializing the PyTorch model" + f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" + f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" + " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" + f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" + " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" + " FlaxBertForSequenceClassification model)." + ) + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" + f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" + " use it for predictions and inference." + ) + + return pt_model diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_utils.py new file mode 100644 index 000000000..73ea5fb07 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/modeling_utils.py @@ -0,0 +1,1021 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import itertools +import os +import re +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, List, Optional, Tuple, Union + +import safetensors +import torch +from huggingface_hub import create_repo +from huggingface_hub.utils import validate_hf_hub_args +from torch import Tensor, nn + +from .. import __version__ +from ..utils import ( + CONFIG_NAME, + FLAX_WEIGHTS_NAME, + SAFETENSORS_FILE_EXTENSION, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, + _add_variant, + _get_model_file, + deprecate, + is_accelerate_available, + is_torch_version, + logging, +) +from ..utils.hub_utils import PushToHubMixin, load_or_create_model_card, populate_model_card + + +logger = logging.get_logger(__name__) + + +if is_torch_version(">=", "1.9.0"): + _LOW_CPU_MEM_USAGE_DEFAULT = True +else: + _LOW_CPU_MEM_USAGE_DEFAULT = False + + +if is_accelerate_available(): + import accelerate + from accelerate.utils import set_module_tensor_to_device + from accelerate.utils.versions import is_torch_version + + +def get_parameter_device(parameter: torch.nn.Module) -> torch.device: + try: + parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers()) + return next(parameters_and_buffers).device + except StopIteration: + # For torch.nn.DataParallel compatibility in PyTorch 1.5 + + def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].device + + +def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype: + try: + params = tuple(parameter.parameters()) + if len(params) > 0: + return params[0].dtype + + buffers = tuple(parameter.buffers()) + if len(buffers) > 0: + return buffers[0].dtype + + except StopIteration: + # For torch.nn.DataParallel compatibility in PyTorch 1.5 + + def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].dtype + + +def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None): + """ + Reads a checkpoint file, returning properly formatted errors if they arise. + """ + try: + file_extension = os.path.basename(checkpoint_file).split(".")[-1] + if file_extension == SAFETENSORS_FILE_EXTENSION: + return safetensors.torch.load_file(checkpoint_file, device="cpu") + else: + return torch.load(checkpoint_file, map_location="cpu") + except Exception as e: + try: + with open(checkpoint_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError( + f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " + "model. Make sure you have saved the model properly." + ) from e + except (UnicodeDecodeError, ValueError): + raise OSError( + f"Unable to load weights from checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. " + ) + + +def load_model_dict_into_meta( + model, + state_dict: OrderedDict, + device: Optional[Union[str, torch.device]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + model_name_or_path: Optional[str] = None, +) -> List[str]: + device = device or torch.device("cpu") + dtype = dtype or torch.float32 + + accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys()) + + unexpected_keys = [] + empty_state_dict = model.state_dict() + for param_name, param in state_dict.items(): + if param_name not in empty_state_dict: + unexpected_keys.append(param_name) + continue + + if empty_state_dict[param_name].shape != param.shape: + model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" + raise ValueError( + f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." + ) + + if accepts_dtype: + set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype) + else: + set_module_tensor_to_device(model, param_name, device, value=param) + return unexpected_keys + + +def _load_state_dict_into_model(model_to_load, state_dict: OrderedDict) -> List[str]: + # Convert old format to new format if needed from a PyTorch state_dict + # copy state_dict so _load_from_state_dict can modify it + state_dict = state_dict.copy() + error_msgs = [] + + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. + def load(module: torch.nn.Module, prefix: str = ""): + args = (state_dict, prefix, {}, True, [], [], error_msgs) + module._load_from_state_dict(*args) + + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + ".") + + load(model_to_load) + + return error_msgs + + +class ModelMixin(torch.nn.Module, PushToHubMixin): + r""" + Base class for all models. + + [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and + saving models. + + - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`]. + """ + + config_name = CONFIG_NAME + _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] + _supports_gradient_checkpointing = False + _keys_to_ignore_on_load_unexpected = None + + def __init__(self): + super().__init__() + + def __getattr__(self, name: str) -> Any: + """The only reason we overwrite `getattr` here is to gracefully deprecate accessing + config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite + __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__': + https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module + """ + + is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) + is_attribute = name in self.__dict__ + + if is_in_config and not is_attribute: + deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'." + deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3) + return self._internal_dict[name] + + # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module + return super().__getattr__(name) + + @property + def is_gradient_checkpointing(self) -> bool: + """ + Whether gradient checkpointing is activated for this model or not. + """ + return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) + + def enable_gradient_checkpointing(self) -> None: + """ + Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or + *checkpoint activations* in other frameworks). + """ + if not self._supports_gradient_checkpointing: + raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") + self.apply(partial(self._set_gradient_checkpointing, value=True)) + + def disable_gradient_checkpointing(self) -> None: + """ + Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or + *checkpoint activations* in other frameworks). + """ + if self._supports_gradient_checkpointing: + self.apply(partial(self._set_gradient_checkpointing, value=False)) + + def set_use_memory_efficient_attention_xformers( + self, valid: bool, attention_op: Optional[Callable] = None + ) -> None: + # Recursively walk through all the children. + # Any children which exposes the set_use_memory_efficient_attention_xformers method + # gets the message + def fn_recursive_set_mem_eff(module: torch.nn.Module): + if hasattr(module, "set_use_memory_efficient_attention_xformers"): + module.set_use_memory_efficient_attention_xformers(valid, attention_op) + + for child in module.children(): + fn_recursive_set_mem_eff(child) + + for module in self.children(): + if isinstance(module, torch.nn.Module): + fn_recursive_set_mem_eff(module) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None: + r""" + Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). + + When this option is enabled, you should observe lower GPU memory usage and a potential speed up during + inference. Speed up during training is not guaranteed. + + + + ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes + precedent. + + + + Parameters: + attention_op (`Callable`, *optional*): + Override the default `None` operator for use as `op` argument to the + [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) + function of xFormers. + + Examples: + + ```py + >>> import torch + >>> from diffusers import UNet2DConditionModel + >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp + + >>> model = UNet2DConditionModel.from_pretrained( + ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16 + ... ) + >>> model = model.to("cuda") + >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) + ``` + """ + self.set_use_memory_efficient_attention_xformers(True, attention_op) + + def disable_xformers_memory_efficient_attention(self) -> None: + r""" + Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). + """ + self.set_use_memory_efficient_attention_xformers(False) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + save_function: Optional[Callable] = None, + safe_serialization: bool = True, + variant: Optional[str] = None, + push_to_hub: bool = False, + **kwargs, + ): + """ + Save a model and its configuration file to a directory so that it can be reloaded using the + [`~models.ModelMixin.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save a model and its configuration file to. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + # Only save the model itself if we are using distributed training + model_to_save = self + + # Attach architecture to the config + # Save the config + if is_main_process: + model_to_save.save_config(save_directory) + + # Save the model + state_dict = model_to_save.state_dict() + + weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + weights_name = _add_variant(weights_name, variant) + + # Save the model + if safe_serialization: + safetensors.torch.save_file( + state_dict, os.path.join(save_directory, weights_name), metadata={"format": "pt"} + ) + else: + torch.save(state_dict, os.path.join(save_directory, weights_name)) + + logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}") + + if push_to_hub: + # Create a new empty model card and eventually tag it + model_card = load_or_create_model_card(repo_id, token=token) + model_card = populate_model_card(model_card) + model_card.save(os.path.join(save_directory, "README.md")) + + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a pretrained PyTorch model from a pretrained model configuration. + + The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To + train the model, set it back in training mode with `model.train()`. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`~ModelMixin.save_pretrained`]. + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info (`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + from_flax (`bool`, *optional*, defaults to `False`): + Load the model weights from a Flax checkpoint save file. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if `device_map` contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + variant (`str`, *optional*): + Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the + `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors` + weights. If set to `False`, `safetensors` weights are not loaded. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. You can also activate the special + ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a + firewalled environment. + + + + Example: + + ```py + from diffusers import UNet2DConditionModel + + unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") + ``` + + If you get the error message below, you need to finetune the weights for your downstream task: + + ```bash + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) + force_download = kwargs.pop("force_download", False) + from_flax = kwargs.pop("from_flax", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + output_loading_info = kwargs.pop("output_loading_info", False) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + subfolder = kwargs.pop("subfolder", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + variant = kwargs.pop("variant", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + if low_cpu_mem_usage and not is_accelerate_available(): + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if device_map is not None and not is_accelerate_available(): + raise NotImplementedError( + "Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set" + " `device_map=None`. You can install accelerate with `pip install accelerate`." + ) + + # Check if we can handle device_map and dispatching the weights + if device_map is not None and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `device_map=None`." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + if low_cpu_mem_usage is False and device_map is not None: + raise ValueError( + f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and" + " dispatching. Please make sure to set `low_cpu_mem_usage=True`." + ) + + # Load config if we don't provide a configuration + config_path = pretrained_model_name_or_path + + user_agent = { + "diffusers": __version__, + "file_type": "model", + "framework": "pytorch", + } + + # load config + config, unused_kwargs, commit_hash = cls.load_config( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + return_commit_hash=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + user_agent=user_agent, + **kwargs, + ) + + # load model + model_file = None + if from_flax: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=FLAX_WEIGHTS_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + commit_hash=commit_hash, + ) + model = cls.from_config(config, **unused_kwargs) + + # Convert the weights + from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model + + model = load_flax_checkpoint_in_pytorch_model(model, model_file) + else: + if use_safetensors: + try: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + commit_hash=commit_hash, + ) + except IOError as e: + if not allow_pickle: + raise e + pass + if model_file is None: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=_add_variant(WEIGHTS_NAME, variant), + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + commit_hash=commit_hash, + ) + + if low_cpu_mem_usage: + # Instantiate model with empty weights + with accelerate.init_empty_weights(): + model = cls.from_config(config, **unused_kwargs) + + # if device_map is None, load the state dict and move the params from meta device to the cpu + if device_map is None: + param_device = "cpu" + state_dict = load_state_dict(model_file, variant=variant) + model._convert_deprecated_attention_blocks(state_dict) + # move the params from meta device to cpu + missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) + if len(missing_keys) > 0: + raise ValueError( + f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are" + f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass" + " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize" + " those weights or else make sure your checkpoint file is correct." + ) + + unexpected_keys = load_model_dict_into_meta( + model, + state_dict, + device=param_device, + dtype=torch_dtype, + model_name_or_path=pretrained_model_name_or_path, + ) + + if cls._keys_to_ignore_on_load_unexpected is not None: + for pat in cls._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + else: # else let accelerate handle loading and dispatching. + # Load weights and dispatch according to the device_map + # by default the device_map is None and the weights are loaded on the CPU + try: + accelerate.load_checkpoint_and_dispatch( + model, + model_file, + device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + dtype=torch_dtype, + ) + except AttributeError as e: + # When using accelerate loading, we do not have the ability to load the state + # dict and rename the weight names manually. Additionally, accelerate skips + # torch loading conventions and directly writes into `module.{_buffers, _parameters}` + # (which look like they should be private variables?), so we can't use the standard hooks + # to rename parameters on load. We need to mimic the original weight names so the correct + # attributes are available. After we have loaded the weights, we convert the deprecated + # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert + # the weights so we don't have to do this again. + + if "'Attention' object has no attribute" in str(e): + logger.warning( + f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" + " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" + " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," + " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint," + " please also re-upload it or open a PR on the original repository." + ) + model._temp_convert_self_to_deprecated_attention_blocks() + accelerate.load_checkpoint_and_dispatch( + model, + model_file, + device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + dtype=torch_dtype, + ) + model._undo_temp_convert_self_to_deprecated_attention_blocks() + else: + raise e + + loading_info = { + "missing_keys": [], + "unexpected_keys": [], + "mismatched_keys": [], + "error_msgs": [], + } + else: + model = cls.from_config(config, **unused_kwargs) + + state_dict = load_state_dict(model_file, variant=variant) + model._convert_deprecated_attention_blocks(state_dict) + + model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( + model, + state_dict, + model_file, + pretrained_model_name_or_path, + ignore_mismatched_sizes=ignore_mismatched_sizes, + ) + + loading_info = { + "missing_keys": missing_keys, + "unexpected_keys": unexpected_keys, + "mismatched_keys": mismatched_keys, + "error_msgs": error_msgs, + } + + if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): + raise ValueError( + f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." + ) + elif torch_dtype is not None: + model = model.to(torch_dtype) + + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + + # Set model in evaluation mode to deactivate DropOut modules by default + model.eval() + if output_loading_info: + return model, loading_info + + return model + + @classmethod + def _load_pretrained_model( + cls, + model, + state_dict: OrderedDict, + resolved_archive_file, + pretrained_model_name_or_path: Union[str, os.PathLike], + ignore_mismatched_sizes: bool = False, + ): + # Retrieve missing & unexpected_keys + model_state_dict = model.state_dict() + loaded_keys = list(state_dict.keys()) + + expected_keys = list(model_state_dict.keys()) + + original_loaded_keys = loaded_keys + + missing_keys = list(set(expected_keys) - set(loaded_keys)) + unexpected_keys = list(set(loaded_keys) - set(expected_keys)) + + # Make sure we are able to load base models as well as derived models (with heads) + model_to_load = model + + def _find_mismatched_keys( + state_dict, + model_state_dict, + loaded_keys, + ignore_mismatched_sizes, + ): + mismatched_keys = [] + if ignore_mismatched_sizes: + for checkpoint_key in loaded_keys: + model_key = checkpoint_key + + if ( + model_key in model_state_dict + and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape + ): + mismatched_keys.append( + (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) + ) + del state_dict[checkpoint_key] + return mismatched_keys + + if state_dict is not None: + # Whole checkpoint + mismatched_keys = _find_mismatched_keys( + state_dict, + model_state_dict, + original_loaded_keys, + ignore_mismatched_sizes, + ) + error_msgs = _load_state_dict_into_model(model_to_load, state_dict) + + if len(error_msgs) > 0: + error_msg = "\n\t".join(error_msgs) + if "size mismatch" in error_msg: + error_msg += ( + "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." + ) + raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" + f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" + f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" + " or with another architecture (e.g. initializing a BertForSequenceClassification model from a" + " BertForPreTraining model).\n- This IS NOT expected if you are initializing" + f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" + " identical (initializing a BertForSequenceClassification model from a" + " BertForSequenceClassification model)." + ) + else: + logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" + " TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" + f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" + f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" + " without further training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" + f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" + f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" + " able to use it for predictions and inference." + ) + + return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + return get_parameter_device(self) + + @property + def dtype(self) -> torch.dtype: + """ + `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). + """ + return get_parameter_dtype(self) + + def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: + """ + Get number of (trainable or non-embedding) parameters in the module. + + Args: + only_trainable (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of trainable parameters. + exclude_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to return only the number of non-embedding parameters. + + Returns: + `int`: The number of parameters. + + Example: + + ```py + from diffusers import UNet2DConditionModel + + model_id = "runwayml/stable-diffusion-v1-5" + unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") + unet.num_parameters(only_trainable=True) + 859520964 + ``` + """ + + if exclude_embeddings: + embedding_param_names = [ + f"{name}.weight" + for name, module_type in self.named_modules() + if isinstance(module_type, torch.nn.Embedding) + ] + non_embedding_parameters = [ + parameter for name, parameter in self.named_parameters() if name not in embedding_param_names + ] + return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) + else: + return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) + + def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None: + deprecated_attention_block_paths = [] + + def recursive_find_attn_block(name, module): + if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: + deprecated_attention_block_paths.append(name) + + for sub_name, sub_module in module.named_children(): + sub_name = sub_name if name == "" else f"{name}.{sub_name}" + recursive_find_attn_block(sub_name, sub_module) + + recursive_find_attn_block("", self) + + # NOTE: we have to check if the deprecated parameters are in the state dict + # because it is possible we are loading from a state dict that was already + # converted + + for path in deprecated_attention_block_paths: + # group_norm path stays the same + + # query -> to_q + if f"{path}.query.weight" in state_dict: + state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight") + if f"{path}.query.bias" in state_dict: + state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias") + + # key -> to_k + if f"{path}.key.weight" in state_dict: + state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight") + if f"{path}.key.bias" in state_dict: + state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias") + + # value -> to_v + if f"{path}.value.weight" in state_dict: + state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight") + if f"{path}.value.bias" in state_dict: + state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias") + + # proj_attn -> to_out.0 + if f"{path}.proj_attn.weight" in state_dict: + state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight") + if f"{path}.proj_attn.bias" in state_dict: + state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias") + + def _temp_convert_self_to_deprecated_attention_blocks(self) -> None: + deprecated_attention_block_modules = [] + + def recursive_find_attn_block(module): + if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: + deprecated_attention_block_modules.append(module) + + for sub_module in module.children(): + recursive_find_attn_block(sub_module) + + recursive_find_attn_block(self) + + for module in deprecated_attention_block_modules: + module.query = module.to_q + module.key = module.to_k + module.value = module.to_v + module.proj_attn = module.to_out[0] + + # We don't _have_ to delete the old attributes, but it's helpful to ensure + # that _all_ the weights are loaded into the new attributes and we're not + # making an incorrect assumption that this model should be converted when + # it really shouldn't be. + del module.to_q + del module.to_k + del module.to_v + del module.to_out + + def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None: + deprecated_attention_block_modules = [] + + def recursive_find_attn_block(module) -> None: + if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: + deprecated_attention_block_modules.append(module) + + for sub_module in module.children(): + recursive_find_attn_block(sub_module) + + recursive_find_attn_block(self) + + for module in deprecated_attention_block_modules: + module.to_q = module.query + module.to_k = module.key + module.to_v = module.value + module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) + + del module.query + del module.key + del module.value + del module.proj_attn diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/Welford.h b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/Welford.h new file mode 100644 index 000000000..f7b955371 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/Welford.h @@ -0,0 +1,94 @@ +#pragma once +#ifndef WELFORD_H +#define WELFORD_H + +#include // C10_HOST_DEVICE + +// copied from https://github.com/pytorch/pytorch/blob/b8307513e57f8beaf99daff342a23d705a417e11/aten/src/ATen/native/SharedReduceOps.h +template +struct WelfordData { + scalar_t mean; + scalar_t m2; + index_t n; + scalar_t nf; + + C10_HOST_DEVICE WelfordData() : mean(0), m2(0), n(0), nf(0) {} + + C10_HOST_DEVICE WelfordData( + scalar_t mean, + scalar_t m2, + index_t n, + scalar_t nf) + : mean(mean), m2(m2), n(n), nf(nf) {} +}; + +// copied from https://github.com/pytorch/pytorch/blob/b8307513e57f8beaf99daff342a23d705a417e11/aten/src/ATen/native/SharedReduceOps.h +template +struct WelfordOps { + acc_scalar_t correction; + bool take_sqrt; + public: + using acc_t = WelfordData; + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data) const { + // We accumulate n in index_t to avoid cumulative rounding error, but still + // need nf for use in combine where int32 may overflow. + index_t new_n = acc.n + 1; + acc_scalar_t new_nf = static_cast(new_n); + + acc_scalar_t delta = data - acc.mean; + + acc_scalar_t new_mean = acc.mean + delta / new_nf; + acc_scalar_t new_delta = data - new_mean; + return { + new_mean, + acc.m2 + delta * new_delta, + new_n, + new_nf, + }; + } + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + if (a.nf == 0) { + return b; + } + if (b.nf == 0) { + return a; + } + acc_scalar_t delta = b.mean - a.mean; + acc_scalar_t new_count = a.nf + b.nf; + acc_scalar_t nb_over_n = b.nf / new_count; + return { + a.mean + delta * nb_over_n, + a.m2 + b.m2 + delta * delta * a.nf * nb_over_n, + // setting acc.n as -1 since acc.n might not be able to represent the count + // correctly within its range, setting it to -1 to avoid confusion + -1, + new_count + }; + } + inline C10_DEVICE res_t project(acc_t acc) const __ubsan_ignore_float_divide_by_zero__ { + const auto mean = static_cast(acc.mean); + const auto divisor = acc.nf > correction ? acc.nf - correction : 0; + const auto var = acc.m2 / divisor; + res_t results(take_sqrt ? std::sqrt(var) : var, mean); + return results; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline __device__ acc_t warp_shfl_down(acc_t acc, int offset) const { + return { + WARP_SHFL_DOWN(acc.mean, offset) + , WARP_SHFL_DOWN(acc.m2, offset) + , WARP_SHFL_DOWN(acc.n, offset) + , WARP_SHFL_DOWN(acc.nf, offset) + }; + } +#endif + C10_HOST_DEVICE WelfordOps(acc_scalar_t correction, bool take_sqrt) + : correction(correction), take_sqrt(take_sqrt) {} +}; + +#endif diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.cpp b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.cpp new file mode 100644 index 000000000..7aeec0187 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.cpp @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include "gn_kernel.h" + +#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") + +std::tuple gn_nhwc_fwd( + const at::Tensor X, + const at::Tensor weight, + const at::Tensor bias, + const int64_t G, + double eps, + const int64_t act_fn_option) { + CHECK_CUDA(X); + CHECK_CUDA(weight); + CHECK_CUDA(bias); + const int N = X.size(0); + const int C = X.size(1); + const int H = X.size(2); + const int W = X.size(3); + + at::Tensor X_nhwc = X.permute({0, 2, 3, 1}); + at::Tensor X_out = at::empty_like(X_nhwc); + at::Tensor means = at::empty({N, G}, weight.options()); + at::Tensor rstds = at::empty({N, G}, weight.options()); + + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + X.scalar_type(), + "group_norm_nhwc_forward", [&]() { + run_gn_fwd_kernels( + X_nhwc.const_data_ptr(), + weight.const_data_ptr(), bias.const_data_ptr(), + N, H, W, C, G, static_cast(eps), act_fn_option, + X_out.mutable_data_ptr(), means.mutable_data_ptr(), rstds.mutable_data_ptr() + ); + }); + return {X_out.permute({0, 3, 1, 2}), means, rstds}; +} + +std::tuple gn_nhwc_bwd( + const at::Tensor dy, + const at::Tensor X, + const at::Tensor weight, + const at::Tensor bias, + const at::Tensor means, + const at::Tensor rstds, + const int64_t G, + const int64_t act_fn_option) { + CHECK_CUDA(dy); + CHECK_CUDA(X); + CHECK_CUDA(weight); + CHECK_CUDA(bias); + CHECK_CUDA(means); + CHECK_CUDA(rstds); + + const int N = X.size(0); + const int C = X.size(1); + const int H = X.size(2); + const int W = X.size(3); + at::Tensor dy_nhwc = dy.permute({0, 2, 3, 1}); + at::Tensor X_nhwc = X.permute({0, 2, 3, 1}); + at::Tensor dX = at::empty_like(X_nhwc); + at::Tensor dweight = at::empty({C}, X.options()); + at::Tensor dbias = at::empty({C}, X.options()); + + AT_DISPATCH_FLOATING_TYPES_AND2( + c10::ScalarType::Half, + c10::ScalarType::BFloat16, + X.scalar_type(), + "group_norm_nhwc_backward", [&]() { + run_gn_bwd_kernels( + dy_nhwc.const_data_ptr(), X_nhwc.const_data_ptr(), + weight.const_data_ptr(), bias.const_data_ptr(), + means.const_data_ptr(), rstds.const_data_ptr(), + N, H, W, C, G, act_fn_option, + dX.mutable_data_ptr(), dweight.mutable_data_ptr(), dbias.mutable_data_ptr() + ); + }); + return {dX.permute({0, 3, 1, 2}), dweight, dbias}; +} + +TORCH_LIBRARY(gnop, m) { + m.def("fwd", &gn_nhwc_fwd); + m.def("bwd", &gn_nhwc_bwd); +} diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.py new file mode 100644 index 000000000..bf50c37e9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/custom_gn.py @@ -0,0 +1,367 @@ +from tqdm import tqdm +import torch.nn.functional as F +import torch.nn as nn +import numpy as np +import torch, datetime, time, os, itertools +torch.set_printoptions(sci_mode=False) +module_dir = os.path.dirname(os.path.abspath(__file__)) + +from torch.utils.cpp_extension import load +gn_op = load( + name="gn_op", + sources=[ + os.path.join(module_dir, "custom_gn.cpp"), + os.path.join(module_dir, "gn_kernel.cu"), + #os.path.join(module_dir, "nchw_kernel.cu") + ], + extra_cuda_cflags=[ + '-use_fast_math', + '-lineinfo', # useful for profiling + ], + extra_cflags=[ + '-O3', # needed or else GN NCHW from source is slower than nn.GroupNorm + '-funroll-all-loops', + '-march=native', + ], + is_python_module=False, + verbose=True, + ) + +class GN_NHWC_Func(torch.autograd.Function): + @staticmethod + def forward(ctx, X: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, G: int, eps: float, activation: str): + X_out, means, rstds = torch.ops.gnop.fwd(X, weight, bias, G, eps, activation) + ctx.save_for_backward(X, weight, bias, means, rstds) + ctx.G = G + ctx.activation = activation + return X_out + + @staticmethod + def backward(ctx, dy: torch.Tensor): + dy = dy.contiguous(memory_format=torch.channels_last) + X, weight, bias, means, rstds = ctx.saved_tensors + dx, dgamma, dbeta = torch.ops.gnop.bwd(dy, X, weight, bias, means, rstds, ctx.G, ctx.activation) + return dx, dgamma, dbeta, None, None, None + +class GN_NHWC(nn.GroupNorm): + def __init__(self, num_groups: int, nc: int, activation='identity', **kwargs): + super().__init__(num_groups, nc, **kwargs) + assert activation in {'identity', 'silu', 'relu', 'gelu', 'gelu_tanh'} + if activation == 'identity': + self.activation = 0 + if activation == 'relu': + self.activation = 1 + if activation == 'silu': + self.activation = 2 + if activation == 'gelu': + self.activation = 3 + if activation == 'gelu_tanh': + self.activation = 4 + + @torch._dynamo.disable + def forward(self, x): + #print(x.shape, self.num_channels) + if len(x.size()) == 3: + N, C, L = x.shape + elif len(x.size()) == 4: + N, C, H, W = x.shape + else: + raise ValueError + G = self.num_groups + + #if C // G > 512: + # raise ValueError(f'Error in fwd for X.shape={x.shape}, G={G}: C // G = {C // G} which is greater than 512. This input is not supported.') + + #if H * W % 8 != 0: + # raise ValueError(f'Error in fwd for X.shape={x.shape}, G={G}: H * W is not a multiple of 8. This input is not supported.') + + if self.affine: + return GN_NHWC_Func.apply(x, self.weight, self.bias, self.num_groups, self.eps, self.activation) + else: + w = torch.ones((self.num_channels,), device=x.device, dtype=x.dtype) + b = torch.zeros((self.num_channels,), device=x.device, dtype=x.dtype) + return GN_NHWC_Func.apply(x, w, b, self.num_groups, self.eps, self.activation) + +class GN_NCHW_Func(torch.autograd.Function): + @staticmethod + def forward(ctx, X: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, G: int, eps: float): + X_out, means, rstds = gn_op.nchwforward(X, weight, bias, G, eps) + ctx.save_for_backward(X, weight, means, rstds) + ctx.G = G + return X_out + + @staticmethod + def backward(ctx, dy): + dy = dy.contiguous() + X, weight, means, rstds = ctx.saved_tensors + dx, dgamma, dbeta = gn_op.nchwbackward(dy, X, weight, means, rstds, ctx.G) + return dx, dgamma, dbeta, None, None + +class GN_NCHW(nn.GroupNorm): + def __init__(self, num_groups: int, nc: int, **kwargs): + super().__init__(num_groups, nc, **kwargs) + + def forward(self, x): + if self.affine: + return GN_NCHW_Func.apply(x.contiguous(), self.weight, self.bias, self.num_groups, self.eps) + else: + w = torch.ones((self.num_channels,), device=x.device, dtype=x.dtype) + b = torch.zeros((self.num_channels,), device=x.device, dtype=x.dtype) + return GN_NCHW_Func.apply(x.contiguous(), w, b, self.num_groups, self.eps) + +def red(text): + return '\033[91m' + str(text) + '\033[0m' +def green(text): + return '\033[92m' + str(text) + '\033[0m' +def yellow(text): + return '\033[93m' + str(text) + '\033[0m' +def blue(text): + return '\033[94m' + str(text) + '\033[0m' + +def config_filter(x): # returns true if config is valid + DTYPE, B, C, R, G = x + if C % G != 0: + return False + + if R == 1: # this causes an autograd problem where it gets confused since the tensor is both contiguous in channels first/last format + return False + + if C / G > 512: # this isn't supported since it is assumed that at least one full group is processed per block in the fwd and the max threads per block is set to 512 + return False + + dtype_size = 2 if DTYPE in (torch.half, torch.bfloat16) else 4 # only care about 16/32-bit dtypes for now + estimated_mem_usage_gib = (25 * dtype_size * B * C * R * R) / 2**30 # this is just a rough estimate, likely wrong + if estimated_mem_usage_gib > 4: # vram filter + return False + return True + +if __name__ == '__main__': + ACT_FN = 'silu' + if ACT_FN == 'silu': + act_fn = F.silu + if ACT_FN == 'identity': + act_fn = lambda x: x + if ACT_FN == 'relu': + act_fn = F.relu + if ACT_FN == 'gelu': + act_fn = F.gelu + if ACT_FN == 'gelu_tanh': + act_fn = lambda x: F.gelu(x, approximate='tanh') + MODE = 'check' # can be 'check', 'bench', other modes do both + CHECK_PROF = False + + if MODE != 'bench': + #DTYPEs = (torch.bfloat16, torch.float, torch.double) + DTYPEs = (torch.float16,) + Bs = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 16) + Cs = ( + 32, 64, 128, 256, 512, + 13, 140, 125, 961, + 160, 320, 640, 960, 1280, 1600, 1920, 2240, 2560 + ) + Rs = ( + 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, + 8, 16, 64, 128, 256, 512, + 1024, + ) + Gs = (1, 2, 4, 8, 16, 32,) + all_params = itertools.product(DTYPEs, Bs, Cs, Rs, Gs) + + err_inputs = [] + for params in tqdm(sorted( + #filter(config_filter, all_params), + [ + (torch.float16, 2, 640, 16, 32), + (torch.float16, 2, 1280, 8, 32), + (torch.float16, 2, 2560, 8, 32), + (torch.float16, 2, 1280, 16, 32), + (torch.float16, 2, 320, 32, 32), + (torch.float16, 2, 1920, 16, 32), + (torch.float16, 2, 2560, 16, 32), + (torch.float16, 2, 640, 32, 32), + (torch.float16, 2, 960, 32, 32), + (torch.float16, 2, 1280, 32, 32), + (torch.float16, 2, 320, 64, 32), + (torch.float16, 2, 1920, 32, 32), + (torch.float16, 2, 640, 64, 32), + (torch.float16, 2, 960, 64, 32), + ], + key = lambda x: x[1]*x[2]*x[3]*x[4] + )): + DTYPE, B, C, R, G = params + #torch.cuda.empty_cache() + print(f'B: {B:<2} | C: {C:<4} | R: {R:<4} | G: {G:<3} | DTYPE: {DTYPE}') + x = torch.randn(B * C * R * R).reshape((B, C, R, R)).to(DTYPE, memory_format=torch.channels_last).cuda().requires_grad_(True) #* 1000 + #x = torch.arange(B * C * R * R).reshape((B, C, R, R)).to(DTYPE, memory_format=torch.channels_last).cuda().requires_grad_(True)/R/R/C/B-0.5 #* 1000 + torch.random.manual_seed(0) + + gn2 = GN_NHWC(G, C, activation=ACT_FN).cuda().to(DTYPE) + + if CHECK_PROF: + #g1 = gn1(x.contiguous()) + #g1sum = g1.sum() + #g1_grad_wrt_w = torch.autograd.grad(g1sum, gn1.weight, retain_graph=True)[0] + g2 = gn2(x) + #g2sum = g2.sum() + #g2_grad_wrt_w = torch.autograd.grad(g2sum, gn2.weight, retain_graph=True)[0] + else: + gn1 = nn.GroupNorm(G, C).float().cuda() + gn3 = nn.GroupNorm(G, C).cuda().to(DTYPE) + with torch.no_grad(): + w = torch.randn((C,), dtype=DTYPE) + b = torch.randn((C,), dtype=DTYPE) + gn1.weight.copy_(w.detach().float()) + gn1.bias.copy_(b.detach().float()) + gn2.weight.copy_(w.detach()) + gn2.bias.copy_(b.detach()) + gn3.weight.copy_(w.detach()) + gn3.bias.copy_(b.detach()) + + g1 = act_fn(gn1(x.float())) + g2 = gn2(x) + g3 = act_fn(gn3(x)) + rand_dy = torch.rand_like(g3) + rand_dy /= rand_dy.numel() ** 0.5 # to prevent false positive errors from ocurring because of really large magnitude losses + g1sum = (g1 * rand_dy).sum() + g2sum = (g2 * rand_dy).sum() + g3sum = (g3 * rand_dy).sum() + def print_err(act_float, act_testing, act_ref, left_pad=0): + with torch.no_grad(): + lpad = ' ' * left_pad + red_error = red('ERROR: ') + testing_err = F.mse_loss(act_float, act_testing) + expected_err = F.mse_loss(act_float, act_ref) + if testing_err.isnan() or testing_err / expected_err > 2 and testing_err > 1e-6: + print(red(f'{lpad}Your error: {testing_err}, expected error: {expected_err}')) + err_inputs.append((params, testing_err, expected_err)) + else: + print(f'{lpad}Negligible difference (testing err: {testing_err:.2e}, ref err: {expected_err:.2e}) found') + + print(' FORWARD') + print_err(g1, g2, g3, 4) + print(' BACKWARD') + print(' wrt weight') + g1_grad_wrt_w = torch.autograd.grad( + g1sum, gn1.weight, retain_graph=True)[0] + g2_grad_wrt_w = torch.autograd.grad( + g2sum, gn2.weight, retain_graph=True)[0] + g3_grad_wrt_w = torch.autograd.grad( + g3sum, gn3.weight, retain_graph=True)[0] + print_err(g1_grad_wrt_w, g2_grad_wrt_w, g3_grad_wrt_w, 6) + + + print(' wrt bias') + g1_grad_wrt_b = torch.autograd.grad( + g1sum, gn1.bias, retain_graph=True)[0] + g2_grad_wrt_b = torch.autograd.grad( + g2sum, gn2.bias, retain_graph=True)[0] + g3_grad_wrt_b = torch.autograd.grad( + g3sum, gn3.bias, retain_graph=True)[0] + print_err(g1_grad_wrt_b, g2_grad_wrt_b, g3_grad_wrt_b, 6) + + print(' wrt X') + g1_grad_wrt_x = torch.autograd.grad(g1sum, x, retain_graph=True)[0] + g2_grad_wrt_x = torch.autograd.grad(g2sum, x, retain_graph=True)[0] + g3_grad_wrt_x = torch.autograd.grad(g3sum, x, retain_graph=True)[0] + print_err(g1_grad_wrt_x, g2_grad_wrt_x, g3_grad_wrt_x, 6) + if len(err_inputs) > 0: + print(red('Error inputs found:')) + print('\n'.join(map(lambda x: f'{x[0]}, testing error: {x[1]:.2e}, expected error: {x[2]:.2e}', err_inputs))) + elif not CHECK_PROF: + print(green('No errors found :)')) + + if MODE != 'check': + NSEC = 1 # number of seconds that each kernel runs for on a certain input + DTYPES = [torch.bfloat16] + BATCHES = [1, 2, 4, 8, 16, 32] + #CHANNELS = [32, 64, 128, 256, 512] + CHANNELS = [320, 640, 960, 1920, 2560] + RESOLUTIONS = [4, 8, 16, 32, 64, 128, 256, 512] + #NUM_GROUPS = [4, 8, 16, 32, 64, 128] + NUM_GROUPS = [32] + BENCH = 'fwd' # can be 'fwd', 'bwd', anything else is fwd + bwd + GN_KERNELS = [ + #(GN_NHWC, 'GN NHWC fused (custom op)', gn_op.fwd_fused), + #(GN_NHWC, 'GN NHWC NH grid (custom op)', gn_op.fwd_NH_grid), + #(GN_NHWC, 'GN NHWC N grid (custom op)', gn_op.fwd_N_grid), + #(GN_NHWC, 'GN NHWC NG grid NG grid (custom op)', gn_op.fwd_NG_grid), + #(GN_NCHW, 'torch.nn GN NCHW (compiled from src)', gn_op.nchwforward), + (nn.GroupNorm, 'torch.nn GN NCHW', None), + #(GN_NCHW, 'torch.nn GN NCHW (compiled from src)', None), + (GN_NHWC, 'GN NHWC', None), + ] + + os.makedirs('csvs', exist_ok=True) + fname = datetime.datetime.now().strftime("csvs/%H-%M-%S-%d-%m-%Y.csv") + print(f'Writing to {fname}') + outfile = open(fname, 'w') + outfile.write('Kernel,B (batch),C (num channels),R (resolution),G (num groups), D (C/G),Speed (it/s; 25th percentile),Speed (it/s; 50th percentile),Speed (it/s; 75th percentile)\n') + + configs = list(filter(config_filter, itertools.product(DTYPES, BATCHES, CHANNELS, RESOLUTIONS, NUM_GROUPS))) + print('Estimated time (seconds) to complete:', NSEC * len(configs) * len(GN_KERNELS)) + + for DTYPE, B, C, R, G in configs: + x_nchw = torch.randn((B, C, R, R), dtype=DTYPE, device='cuda').requires_grad_(True) + x_nhwc = x_nchw.contiguous(memory_format=torch.channels_last).cuda().requires_grad_(True) + + gn_args = (G, C) + print(BENCH, 'X shape:', x_nchw.shape, 'G (num groups):', G) + for gn_class, desc, fwd_fn in GN_KERNELS: + gn_input = x_nchw if 'NCHW' in desc else x_nhwc + print(f'\t{desc}') + + try: + gn_layer = gn_class(*gn_args).cuda().to(DTYPE) + g = gn_layer(gn_input) + if not isinstance(gn_layer, GN_NHWC): + g = act_fn(g) + + torch.cuda.synchronize() + + tic = time.time() + tic_sec = time.time() + ntrials = 0 + ntrials_minor = 0 + minor_speeds = [] # used to track speed percentiles since they can often vary by a lot + + while time.time() - tic < NSEC: + if BENCH == 'fwd': + if fwd_fn is None: + g = gn_layer(gn_input) + else: + g = fwd_fn(gn_input, gn_layer.weight, gn_layer.bias, gn_layer.num_groups, gn_layer.eps) # Not calling gn_layer(gn_input) since I found this added a lot of overhead + elif BENCH == 'both': + g = gn_layer(gn_input) + if not isinstance(gn_layer, GN_NHWC): + g = act_fn(g) + if BENCH != 'fwd': + torch.autograd.grad(g.sum(), gn_input, retain_graph=True) + torch.cuda.synchronize() + + ntrials += 1 + ntrials_minor += 1 + + if time.time() - tic_sec > 0.1: + speed = round(ntrials_minor / (time.time() - tic_sec), 2) + minor_speeds.append(speed) + print(f'\t\t{round(time.time() - tic, 1)}/{NSEC} seconds completed, speed: {blue(speed)} it/s\r', end='') + ntrials_minor = 0 + tic_sec = time.time() + + minor_speeds = np.array(minor_speeds) + median_speed = round(np.percentile(minor_speeds, 50), 2) + slow_speed = round(np.percentile(minor_speeds, 25), 2) + fast_speed = round(np.percentile(minor_speeds, 75), 2) + print(f'\n\t\tSpeed (25th/50th/75th percentile): {red(slow_speed)}/{yellow(median_speed)}/{green(fast_speed)} it/s') + except KeyboardInterrupt: + print(f'Keyboard interrupt, closing {fname}.') + outfile.close() + raise + except Exception as e: + print('\t\tFAILED; Error:', str(e).strip()) + median_speed = slow_speed = fast_speed = '-1 (failed)' + + outfile.write(f'{desc},{B},{C},{R},{G},{C//G},{slow_speed},{median_speed},{fast_speed}\n') + print() + print(f'All tests done, closing {fname}.') + outfile.close() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.cu b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.cu new file mode 100644 index 000000000..b421daccf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.cu @@ -0,0 +1,1051 @@ +#include +#include +#include +#include +#include "gn_kernel.h" +#include "Welford.h" +#include "vecs.h" +#define MAX_THREADS_PER_BLOCK 512 // 512 slightly faster (~3%) than 1024 because of higher theoretical occupancy -> higher mem throughput +#define MAX(a, b) (a > b) ? a : b +#define MIN(a, b) (a < b) ? a : b + +#define DEBUG_ENABLED 0 +#if DEBUG_ENABLED +#define DEBUG(format, args...) fprintf(stderr, format, args) +#else +#define DEBUG(format, args...) ((void)0) +#endif +#define ELEM_DEBUG 0 +#define INT int // torch uses int64_t but this came at a pretty big hit to performance and the input sizes that I frequently use (resolutions no bigger than 1024x1024) have a number of pixels smaller than the int max value + +template +struct acc_type { using type = float; }; +template <> +struct acc_type { using type = double; }; + +typedef struct block_params { + int t; // threads per block + int d; // dimensionality (number of rows of data that each threadblock proceesses in parallel) + int f; // factor (number of different threadblocks needed to represent one row of data) +} block_params_t; + +inline block_params_t calc_block_params(const int ideal_num_threads, const int threads_per_row, int f_divides = -1, const int tpb_divides = -1) { + /* + ideal_num_threads: absolute upper limit of threads that a block should have (e.g. a kernel that operates on only 30 elements should have a max TPB of 30 (ideal_num_threads=30)) + threads_per_row: determines the user-specified upper limit on the size of blockDim.x + - meant to be set to the size of the last dimension, e.g. a kernel operating on tensor sized (N, R, C) would have threads_per_row=C + f_divides: optional parameter if user needs to explicitly specify a stricter requirement on the divisibility of the number of threads per block + - e.g. fwd with C = 2560, G = 32, TPB = 480 wouldn't work since that means 32 groups are split over f=5 blocks (5.333 groups per block) + - e.g. fwd with C = 2560, G = 32, TPB = 320 would work since that means 32 groups are split over f=8 blocks (4 groups per block), you could say that f divides 32 (f_divides=32) + tpb_divides: optional parameter if user needs to explicitly specify that the returned threads per block needs to divide another value (e.g. a kernel where bounds checking isn't implemented) + - e.g. fwd with H, W, C = 5, 5, 32; TPB = 512 wouldn't work since that means you use 1.5625 blocks to represent H*W*C (800) elements + - e.g. fwd with H, W, C = 5, 5, 32; TPB = 160 would work since that means you use 5 blocks to represent H*W*C (800) elements, you could say that TPB (160) divides 800 (tpb_divides=800) + */ + int TPB, d = 1, f = 1; + f_divides = f_divides == -1 ? threads_per_row : f_divides; + TPB = MIN(MAX_THREADS_PER_BLOCK, ideal_num_threads); + if (threads_per_row < TPB) { + d = TPB / threads_per_row; + if (tpb_divides != -1) // could be put as another condition in the while loop but it hurts readability + while (tpb_divides % (threads_per_row * d) != 0) // d = 1 guaranteed to break this condition + --d; + } + else + while (f_divides % f != 0 || threads_per_row / f > MAX_THREADS_PER_BLOCK) + ++f; + TPB = threads_per_row * d / f; + return {TPB, d, f}; +} + +template __device__ T inline identity(T x) { + return x; +} +template __device__ T inline identity_d(T /*x*/) { + return 1; +} + +template __device__ T inline relu(T x) { + return x > 0 ? x : static_cast(0); +} +template __device__ T inline relu_d(T x) { + return x > 0 ? static_cast(1) : static_cast(0); +} + +template __device__ T inline silu(T x) { + return x / (1 + exp(-x)); +} +template __device__ T inline silu_d(T x) { + const T s = 1 / (1 + exp(-x)); + return s * (1 + x * (1 - s)); +} + +template __device__ T inline gelu(T x) { + constexpr float kAlpha = M_SQRT1_2; + return x * T(0.5) * (T(1) + erf(x * kAlpha)); +} +template __device__ T inline gelu_d(T x) { + constexpr float kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5; + constexpr float kAlpha = M_SQRT1_2; + const T cdf = T(0.5) * (T(1) + erf(x * kAlpha)); + const T pdf = exp(T(-0.5) * x * x) * kBeta; + return cdf + x * pdf; +} + +template __device__ T inline gelu_tanh(T x) { + constexpr float kBeta = M_SQRT2 * M_2_SQRTPI * 0.5; + constexpr float kKappa = 0.044715; + auto inner = kBeta * (x + kKappa * x * x * x); + return T(0.5) * x * (T(1) + tanh(inner)); +} +template __device__ T inline gelu_tanh_d(T x) { + constexpr float kBeta = M_SQRT2 * M_2_SQRTPI * 0.5; + constexpr float kKappa = 0.044715; + auto x_sq = x * x; + auto x_cube = x_sq * x; + auto inner = kBeta * (x + kKappa * x_cube); + auto tanh_inner = tanh(inner); + + auto left = T(0.5) * x; + auto right = T(1) + tanh_inner; + + auto left_derivative = T(0.5) * right; + + auto tanh_derivative = T(1) - tanh_inner * tanh_inner; + auto inner_derivative = kBeta * (T(1) + T(3) * kKappa * x_sq); + auto right_derivative = left * tanh_derivative * inner_derivative; + + return left_derivative + right_derivative; +} + +////////////////////////////////////////////////// +// forward kernels +////////////////////////////////////////////////// + +template +__global__ void +compute_stats_pt1( + const T* X, + const int H, + const int W, + const int C, + const int G, + WelfordData::type, INT> *welford_data + ) { + /* + Computes means and rstds of X on the W (width) dimension. + grid: (x=N, y=H, z=f); block: (x=TPB/d, y=d) + - TPB = Cd/f + if TPB < C (f > 1, d=1) + C = f*TPB + X shape: (N, H, W, C) -view-> (N, H, W, 1, f, TPB); X stride: (HWC, WC, C, C, TPB, 1) + dram reduction (per block): (W, 1, TPB) -reduce-> (1, TPB) + else (block.x=C, block.y=d) + TPB = Cd + X shape: (N, H, W, C) -view-> (N, H, W/d, d, 1, C); X stride: (HWC, WC, dC, C, C, 1) + dram reduction (per block): (W/d, d, C) -reduce-> (d, C) + shmem reduction (per block): (TPB,) -view-> (d, G/f, D) -permute-> (d, D, G/f) -reduce-> G/f + output buffer: (N, f, G/f, H) + */ + using T_ACC = typename acc_type::type; + using WelfordType = WelfordData; + using WelfordOp = WelfordOps>; + const int TPB = blockDim.y * blockDim.x; + const int d = blockDim.y; + + WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; + WelfordType val(0, 0, 0, 0); + + const int w = ceil((float)W / d); + int i; +#pragma unroll + for (i = 0; i < w - 1; ++i) { + int reduce_idx = 0; + reduce_idx += blockIdx.x * H * W * C; + reduce_idx += blockIdx.y * W * C; + reduce_idx += i * d * C; + reduce_idx += threadIdx.y * C; + reduce_idx += blockIdx.z * TPB; + reduce_idx += threadIdx.x; + T x = X[reduce_idx]; + val = welford_op.reduce(val, static_cast(x)); + } + if ((int)(i * d + threadIdx.y) < W) // last iteration to deal with inputs with weird width sizes + val = welford_op.reduce(val, static_cast(X[blockIdx.x * H * W * C + blockIdx.y * W * C + i * d * C + threadIdx.y * C + blockIdx.z * TPB + threadIdx.x])); + + // shmem reduction + const int D = C / G; + const int tid = threadIdx.y * blockDim.x + threadIdx.x; + const int f = gridDim.z; + const int gf = G / f; + const int d_idx = threadIdx.y; + const int gf_idx = threadIdx.x / D; + const int D_idx = threadIdx.x % D; + + __shared__ typename std::aligned_storage::type vals_reduced_arr[MAX_THREADS_PER_BLOCK]; + WelfordType *vals_reduced = reinterpret_cast(vals_reduced_arr); + + int idx = 0; + idx += d_idx * D * gf; + idx += D_idx * gf; + idx += gf_idx; + vals_reduced[idx] = val; + __syncthreads(); + + int reduce_n = TPB / gf; // number of inputs that gets reduced to a single output +#pragma unroll + for (int stride = TPB / 2; stride >= gf && reduce_n % 2 == 0 && stride % gf == 0; stride >>= 1, reduce_n >>= 1) { + if (tid < stride) + vals_reduced[tid] = welford_op.combine(vals_reduced[tid], vals_reduced[tid + stride]); + __syncthreads(); + } + + // put reduced outputs into return buffers + if (tid < gf) { +#pragma unroll + for (int i = 1; i < reduce_n; ++i) + vals_reduced[tid] = welford_op.combine(vals_reduced[tid], vals_reduced[tid + i * gf]); + + int out_idx = 0; + out_idx += blockIdx.x * G * H; + out_idx += blockIdx.z * gf * H; + out_idx += tid * H; + out_idx += blockIdx.y; + welford_data[out_idx] = vals_reduced[tid]; + } +} + +template +__global__ void +compute_stats_pt2( + WelfordData::type, INT> *welford_data, + const int H, + const int G, + const T eps, + T* means, + T* rstds + ) { + using T_ACC = typename acc_type::type; + using WelfordType = WelfordData; + using WelfordOp = WelfordOps>; + /* + Computes means and rstds of X on the H (height) dimension. + grid: (x=N, y=G); block: (x=H/f) + - TPB = Gd/f + welford_data shape: (N, G, H) -view-> (N, G, f, H/f); X stride: (GH, H, H/f, 1) + dram reduction (per block): (f, H/f) -reduce-> (H/f,) + shmem reduction (per block): (H/f) -reduce-> (1,) + output buffer: (N, G) + */ + + WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; + WelfordType val(0, 0, 0, 0); + const int TPB = blockDim.y * blockDim.x; + + const int f = H / TPB; + for (int i = 0 ; i < f; ++i) { + int idx = 0; + idx += blockIdx.x * G * H; + idx += blockIdx.y * H; + idx += i * H / f; + idx += threadIdx.x; + val = welford_op.combine(val, welford_data[idx]); + } + + // shmem reduction + __shared__ typename std::aligned_storage::type vals_reduced_arr[MAX_THREADS_PER_BLOCK]; + WelfordType *vals_reduced = reinterpret_cast(vals_reduced_arr); + + const int tid = threadIdx.x; + vals_reduced[tid] = val; + __syncthreads(); + + int reduce_n = TPB; // number of inputs that gets reduced to a single output + +#pragma unroll + for (int stride = TPB / 2; stride >= 1 && reduce_n % 2 == 0; stride >>= 1, reduce_n >>= 1) { + if (tid < stride) + vals_reduced[tid] = welford_op.combine(vals_reduced[tid], vals_reduced[tid + stride]); + __syncthreads(); + } + + // put reduced outputs into return buffers + if (tid == 0) { +#pragma unroll + for (int i = 1; i < reduce_n; ++i) + vals_reduced[tid] = welford_op.combine(vals_reduced[tid], vals_reduced[tid + i]); + + T_ACC mean, var; + thrust::tie(var, mean) = welford_op.project(vals_reduced[tid]); + int out_idx = 0; + out_idx += blockIdx.x * G; + out_idx += blockIdx.y; + means[out_idx] = mean; + rstds[out_idx] = rsqrt(var + static_cast(eps)); + } +} + +template +__global__ void +scale_shift( + const T* X_data, + const T* mean_data, + const T* rstd_data, + const T* weight_data, + const T* bias_data, + const int N, + const int C, + const int G, + T* y + ) { + /* + Performs elementwise op (X - mean) * rstd * weight + bias. Vectorized for speed. + LOOP_I: number of elements that each thread processes. + vec_elems: number of elements stored for each vector. + grid: (x=NHWC / (TPB * LOOP_I * f), y=f), block: (x=TPB) + - HWC % (TPB * LOOP_I * f) = 0 + - TPB * f % C = 0 + X shape: (N, H, W, C) -view-> (NHWC / (TPB * LOOP_I * f), LOOP_I, f, TPB); X.stride: (LOOP_I * f * TPB, f * TPB, TPB, 1) + */ + using T_ACC = typename acc_type::type; + using V = float_vec; + const int f = gridDim.y; + const int TPB = blockDim.x; + + const int n = (N * blockIdx.x) / gridDim.x; + const int c = (blockIdx.y * blockDim.x + threadIdx.x) % (C / vec_elems); + const int g = (G * c) / (C / vec_elems); + const int ng = n * G + g; + const V *X_vecs = reinterpret_cast(X_data); + const V *weight_vecs = reinterpret_cast(weight_data); + const V *bias_vecs = reinterpret_cast(bias_data); + V *y_vecs = reinterpret_cast(y); + T mean = mean_data[ng]; + T rstd = rstd_data[ng]; + V weight_vec = weight_vecs[c]; + V bias_vec = bias_vecs[c]; + + // compute fused weight/bias a,b such that (x - mean) * rstd * weight + bias = x * a + b + V fused_weight, fused_bias; + if constexpr (vec_elems == 1) { + fused_weight = {rstd * weight_vec.x}; + fused_bias = {-mean * fused_weight.x + bias_vec.x}; + } + else if constexpr (vec_elems == 2) { + fused_weight = { + rstd * weight_vec.x, + rstd * weight_vec.y + }; + fused_bias = { + -mean * fused_weight.x + bias_vec.x, + -mean * fused_weight.y + bias_vec.y + }; + } + else if constexpr (vec_elems == 4) { + fused_weight = { + rstd * weight_vec.x, + rstd * weight_vec.y, + rstd * weight_vec.z, + rstd * weight_vec.w + }; + fused_bias = { + -mean * fused_weight.x + bias_vec.x, + -mean * fused_weight.y + bias_vec.y, + -mean * fused_weight.z + bias_vec.z, + -mean * fused_weight.w + bias_vec.w + }; + } + + T (*act_fn)(T); + if constexpr (act_fn_option == 0) + act_fn = identity; + else if constexpr (act_fn_option == 1) + act_fn = relu; + else if constexpr (act_fn_option == 2) + act_fn = silu; + else if constexpr (act_fn_option == 3) + act_fn = gelu; + else if constexpr (act_fn_option == 4) + act_fn = gelu_tanh; + +#pragma unroll + for (int i = 0; i < LOOP_I; ++i) { + int idx = 0; + idx += blockIdx.x * LOOP_I * f * TPB; + idx += i * f * TPB; + idx += blockIdx.y * TPB; + idx += threadIdx.x; + V X_vec = X_vecs[idx]; + + if constexpr (vec_elems == 1) + y_vecs[idx] = {act_fn(static_cast(X_vec.x) * fused_weight.x + fused_bias.x)}; + else if constexpr (vec_elems == 2) { + y_vecs[idx] = { + act_fn(static_cast(X_vec.x) * fused_weight.x + fused_bias.x), + act_fn(static_cast(X_vec.y) * fused_weight.y + fused_bias.y), + }; + } + else if constexpr (vec_elems == 4) { + y_vecs[idx] = { + act_fn(static_cast(X_vec.x) * fused_weight.x + fused_bias.x), + act_fn(static_cast(X_vec.y) * fused_weight.y + fused_bias.y), + act_fn(static_cast(X_vec.z) * fused_weight.z + fused_bias.z), + act_fn(static_cast(X_vec.w) * fused_weight.w + fused_bias.w), + }; + } + } +} + +template +void run_gn_fwd_kernels( + const T *X_data, + const T *weight_data, + const T *bias_data, + const int N, + const int H, + const int W, + const int C, + const int G, + T eps, + const int64_t act_fn_option, + T *Y_data, + T *mean_data, + T *rstd_data) { + using T_ACC = typename acc_type::type; + using WelfordType = WelfordData; + WelfordType *welford_data = (WelfordType*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(WelfordType) * N * G * H); + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + + // compute means/rstds over width dimension + { + auto [TPB, d, f] = calc_block_params(W * C, C, G); + DEBUG("starting compute_stats 1, N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, d: %d, f: %d, G/f: %d\n", N, H, W, C, G, (C / G), TPB, d, f, (G / f)); + compute_stats_pt1<<>>( + X_data, + H, W, C, G, + welford_data + ); + } + + // compute means/rstds over height dimension + { + auto [TPB, d, f] = calc_block_params(H, H); + DEBUG("starting compute_stats 2, N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, d: %d, f: %d, G/f: %d\n", N, H, W, C, G, (C / G), TPB, d, f, (G / f)); + compute_stats_pt2<<>>( + welford_data, + H, G, eps, + mean_data, rstd_data + ); + } + + // scale/shift X + { + const int D = C / G; + int vec_elems; + if (D % 4 == 0) vec_elems = 4; + else if (D % 2 == 0) vec_elems = 2; + else vec_elems = 1; + auto [TPB, d, f] = calc_block_params(H * W * C / 8 / vec_elems, C); + + if (!ELEM_DEBUG && ((H * W * C) % (TPB * 8 * f * vec_elems) == 0)) { + const int LOOP_I = 8; + const int num_blocks = N * H * W * C / TPB / LOOP_I / f; + DEBUG("scale shift starting (LOOP_I = 8), N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, f: %d, num blocks (before vectors): %d, vec_elems: %d\n", N, H, W, C, G, D, TPB, f, num_blocks, vec_elems); + if (vec_elems == 4 && act_fn_option == 0) // i'm sorry + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 2 && act_fn_option == 0) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 1 && act_fn_option == 0) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 4 && act_fn_option == 1) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 2 && act_fn_option == 1) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 1 && act_fn_option == 1) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 4 && act_fn_option == 2) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 2 && act_fn_option == 2) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 1 && act_fn_option == 2) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 4 && act_fn_option == 3) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 2 && act_fn_option == 3) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 1 && act_fn_option == 3) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 4 && act_fn_option == 4) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 2 && act_fn_option == 4) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + else if (vec_elems == 1 && act_fn_option == 4) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + } + else {// relatively slow fallback + const int num_blocks = N * H * W; + DEBUG("SLOW FALLBACK, scale shift kernel starting, N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, f: %d, num blocks (before vectors): %d, vec_elems: %d\n", N, H, W, C, G, D, C/f, f, num_blocks, vec_elems); + if (act_fn_option == 0) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + if (act_fn_option == 1) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + if (act_fn_option == 2) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + if (act_fn_option == 3) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + if (act_fn_option == 4) + scale_shift<<>>(X_data, mean_data, rstd_data, weight_data, bias_data, N, C, G, Y_data); + } + } + + c10::cuda::CUDACachingAllocator::raw_delete(welford_data); +} + +template void run_gn_fwd_kernels(const float *X_data, const float *weight_data, const float *bias_data, const int N, const int h, const int W, const int C, const int G, float eps, const int64_t act_fn_option, float *Y_data, float *mean_data, float *rstd_data); +template void run_gn_fwd_kernels(const double *X_data, const double *weight_data, const double *bias_data, const int N, const int h, const int W, const int C, const int G, double eps, const int64_t act_fn_option, double *Y_data, double *mean_data, double *rstd_data); +template void run_gn_fwd_kernels(const c10::Half *X_data, const c10::Half *weight_data, const c10::Half *bias_data, const int N, const int h, const int W, const int C, const int G, c10::Half eps, const int64_t act_fn_option, c10::Half *Y_data, c10::Half *mean_data, c10::Half *rstd_data); +template void run_gn_fwd_kernels(const c10::BFloat16 *X_data, const c10::BFloat16 *weight_data, const c10::BFloat16 *bias_data, const int N, const int h, const int W, const int C, const int G, c10::BFloat16 eps, const int64_t act_fn_option, c10::BFloat16 *Y_data, c10::BFloat16 *mean_data, c10::BFloat16 *rstd_data); + +////////////////////////////////////////////////// +// backward kernels +////////////////////////////////////////////////// + +template +__device__ void +sum_reduce( + T vals_reduced, + const int start_stride, + const int end_stride + ) { + // Sums a shared buffer (vals_reduced) with shape (2 * start_stride / end_stride, end_stride) into (end_stride,). + const int tid = threadIdx.y * blockDim.x + threadIdx.x; + int reduce_n = 2 * start_stride / end_stride; + +#pragma unroll + for (int stride = start_stride; stride >= end_stride && reduce_n % 2 == 0 && stride % end_stride == 0; stride >>= 1, reduce_n >>= 1) { + if (tid < stride) + vals_reduced[tid] += vals_reduced[tid + stride]; + __syncthreads(); + } + + if (tid < end_stride) +#pragma unroll + for (int i = 1; i < reduce_n; ++i) + vals_reduced[tid] += vals_reduced[tid + i * end_stride]; + __syncthreads(); +} + +template +__global__ void +width_reduce( + const T* dy_data, + const T* X_data, + const T* mean_data, + const T* rstd_data, + const T* weight_data, + const T* bias_data, + const int H, + const int W, + const int C, + const int G, + typename acc_type::type *xdy_dy_sum_data) { + /* + Loops over W (width) dimension, loading and summing dy, X, and the activation derivative of Y. Outputs stored in xdy_dy_sum_data. Spatial dimension H is processed in a separate kernel. + grid: (x=N, y=H, z=f); blockdim: (x=TPB/d, y=d) + TPB = Cd/f + if TPB < C (f > 1, d=1) + C = f*TPB + X shape: (N, H, W, C) -view-> (N, H, W, 1, f, TPB); X stride: (HWC, WC, C, C, TPB, 1) + dram reduction (per block): (W, 1, TPB) -reduce-> (TPB,) + else (block.x=C, block.y=d) + TPB = Cd + X shape: (N, H, W, C) -view-> (N, H, W/d, d, 1, C); X stride: (HWC, WC, dC, C, C, 1) + dram reduction (per block): (W/d, d, C) -reduce-> (d, C) + shmem reduction (per block): (TPB, 2) -> (d, C/f, 2) -reduce-> (C/f, 2) (the 2 comes from storing both xdy_sum and dy_sum in the same buffer) + output buffer: (N, f, C/f, H, 2) -view-> (N, C, H, 2) + xdy_dy_sum_data[:, :, :, 0] = x * dy * activation_derivative((x-mean)*rstd*weight+bias) + xdy_dy_sum_data[:, :, :, 1] = dy * activation_derivative((x-mean)*rstd*weight+bias) + */ + using T_ACC = typename acc_type::type; + + const int TPB = blockDim.y * blockDim.x; + const int d = blockDim.y; + T_ACC xdy_sum = 0; + T_ACC dy_sum = 0; + + const int n = blockIdx.x; + int c = blockIdx.z * blockDim.x + threadIdx.x; + int g = G * c / C; + const int ng = n * G + g; + T_ACC fused_scale = rstd_data[ng] * weight_data[c]; + T_ACC fused_bias = -mean_data[ng] * fused_scale + bias_data[c]; + + T (*act_d_fn)(T x); + if constexpr (act_fn_option == 0) + act_d_fn = identity_d; + else if constexpr (act_fn_option == 1) + act_d_fn = relu_d; + else if constexpr (act_fn_option == 2) + act_d_fn = silu_d; + else if constexpr (act_fn_option == 3) + act_d_fn = gelu_d; + else if constexpr (act_fn_option == 4) + act_d_fn = gelu_tanh_d; + + const int w = ceil((float)W / d); + int i; +#pragma unroll + for (i = 0; i < w - 1; ++i) { + int reduce_idx = 0; + reduce_idx += blockIdx.x * H * W * C; + reduce_idx += blockIdx.y * W * C; + reduce_idx += i * d * C; + reduce_idx += threadIdx.y * C; + reduce_idx += blockIdx.z * TPB; + reduce_idx += threadIdx.x; + T_ACC dy_elem = static_cast(dy_data[reduce_idx]); + T_ACC X_elem = static_cast(X_data[reduce_idx]); + T_ACC X_norm = X_elem * fused_scale + fused_bias; + T_ACC d_act = act_d_fn(X_norm); + xdy_sum += dy_elem * X_elem * d_act; + dy_sum += dy_elem * d_act; + } + if ((int)(i * d + threadIdx.y) < W) { // last iteration to deal with inputs with weird width sizes + int reduce_idx = blockIdx.x * H * W * C + blockIdx.y * W * C + i * d * C + threadIdx.y * C + blockIdx.z * TPB + threadIdx.x; + T_ACC dy_elem = static_cast(dy_data[reduce_idx]); + T_ACC X_elem = static_cast(X_data[reduce_idx]); + T_ACC X_norm = X_elem * fused_scale + fused_bias; + T_ACC d_act = act_d_fn(X_norm); + xdy_sum += dy_elem * X_elem * d_act; + dy_sum += dy_elem * d_act; + } + + // shmem reduction + extern __shared__ char vals_reduced_uncasted[]; // size 2*TPB, TPB for sum1, TPB for sum2 + T_ACC *vals_reduced = reinterpret_cast(vals_reduced_uncasted); + + const int tid = threadIdx.y * blockDim.x + threadIdx.x; + vals_reduced[2 * tid] = xdy_sum; + vals_reduced[2 * tid + 1] = dy_sum; + __syncthreads(); + sum_reduce(vals_reduced, TPB, 2 * C); + + // put reduced outputs into return buffers + if (tid < C) { + int out_idx = 0; + out_idx += blockIdx.x * C * H; + out_idx += (blockIdx.z * TPB + tid) * H; + out_idx += blockIdx.y; + + xdy_dy_sum_data[2 * out_idx] = vals_reduced[2 * tid]; + xdy_dy_sum_data[2 * out_idx + 1] = vals_reduced[2 * tid + 1]; + } +} + +template +__global__ void +height_reduce( + T *xdy_dy_sum_data, // no need to specify T_ACC as T is already an accumulation type + const int H, + const int C, + T *xdy_sum_data, + T *dy_sum_data + ) { + /* + Same thing as width_reduce but over the H (height) instead of the width dimension. + grid: (x=N, y=C); block: (x=2H/f) + X shape: (N, C, H, 2) -view-> (N, C, f, H/f, 2); X stride: (2CH, 2H, 2H/f, H/f, 1) + dram reduction (per block): (f, H/f, 2) -reduce-> (H/f, 2) + shmem reduction (per block): (H/f, 2) -reduce-> (2,) + output buffer: (N, C, 2) + */ + const int TPB = blockDim.x; + const int tid = threadIdx.x; + + // shmem reduction + extern __shared__ char vals_reduced_uncasted[]; + T *vals_reduced = reinterpret_cast(vals_reduced_uncasted); + T sum = 0; + int i; +#pragma unroll + for (i = 0; i < ceil((float)2 * H / TPB) - 1; ++i) { + int idx = 0; + idx += blockIdx.x * C * H * 2; + idx += blockIdx.y * H * 2; + idx += i * TPB; + idx += tid; + sum += xdy_dy_sum_data[idx]; + } + if (i * TPB + tid < 2 * H) + sum += xdy_dy_sum_data[blockIdx.x * C * H * 2 + blockIdx.y * H * 2 + i * TPB + tid]; + + vals_reduced[tid] = sum; + __syncthreads(); + sum_reduce(vals_reduced, TPB / 2, 2); + + // put reduced outputs into return buffers + if (tid == 0) { + int out_idx = blockIdx.x * C + blockIdx.y; + xdy_sum_data[out_idx] = vals_reduced[0]; + dy_sum_data[out_idx] = vals_reduced[1]; + } +} + +template +__global__ void +compute_bwd_scale_biases( + const T* mean_data, + const T* rstd_data, + const T* weight_data, + const T* bias_data, + typename acc_type::type* xdy_sum_data, + typename acc_type::type* dy_sum_data, + const int H, + const int W, + const int C, + const int G, + typename acc_type::type* coef1_data, + typename acc_type::type* coef2_data, + typename acc_type::type* coef3_data, + typename acc_type::type* coef4_data + ) { + /* + Calculates coefficients to reduce computation on the elementwise kernel. + - coef1: fused scale (rstd * weight) + - coef2: fused bias (-mean * rstd * weight + bias) + - coef3/4: some derivative terms + griddim: (x=N, y=f); blockdim: (x=C/f) + - d = num. spatial elements (from HW dimension) each thread-block processes in parallel + - Cd = TPB (threads per block) + X shape: (N, C) -view-> (N, G, D) -permute-> (N, D, G) -reduce-> (N, G) + shmem reduction: (D, G) -reduce-> G + output buffer: (N, G) + */ + using T_ACC = typename acc_type::type; + const int D = C / G; + const int f = gridDim.y; + const int Gf = G / f; + const int n = blockIdx.x; + const int c = blockIdx.y * blockDim.x + threadIdx.x; + const int g = c / D; + const int d = c % D; + const int nc = n * C + c; + const T_ACC gamma_v = static_cast(weight_data[c]); + + extern __shared__ char vals_reduced_uncasted[]; // size 2*C, C for sum1, C for sum2 + T_ACC *vals_reduced = reinterpret_cast(vals_reduced_uncasted); + + int idx = 0; + idx += d * G / f; + idx += g % Gf; + vals_reduced[2 * idx] = xdy_sum_data[nc] * gamma_v; + vals_reduced[2 * idx + 1] = dy_sum_data[nc] * gamma_v; + __syncthreads(); + sum_reduce(vals_reduced, C / f, 2 * G / f); + + const int ng = n * G + g; + const T_ACC mean_elem = static_cast(mean_data[ng]); + const T_ACC rstd_elem = static_cast(rstd_data[ng]); + coef1_data[nc] = rstd_elem * weight_data[c]; + coef2_data[nc] = -mean_elem * rstd_elem * weight_data[c] + bias_data[c]; + + if (d == 0) { + const T_ACC sum1 = vals_reduced[2 * (g % Gf)]; + const T_ACC sum2 = vals_reduced[2 * (g % Gf) + 1]; + const T_ACC s = T_ACC(1) / static_cast(D * H * W); + const T_ACC x = (sum2 * mean_elem - sum1) * rstd_elem * rstd_elem * rstd_elem * s; + coef3_data[ng] = x; + coef4_data[ng] = (-x * mean_elem) - (sum2 * s * rstd_elem); + } +} + +template +__global__ void +compute_dweight_dbias( + const T* mean_data, + const T* rstd_data, + typename acc_type::type *xdy_sum_data, + typename acc_type::type *dy_sum_data, + const int N, + const int C, + const int G, + T* dweight_data, + T* dbias_data) { + /* + Computes derivatives wrt the weight and bias. + grid: (x=f), block: (x=C/f) + */ + using T_ACC = typename acc_type::type; + const int c = blockIdx.x * blockDim.x + threadIdx.x; + const int D = C / G; + const int g = c / D; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + +#pragma unroll + for (int n = 0; n < N; ++n) { + const int nc = n * C + c; + const int ng = n * G + g; + sum1 += (xdy_sum_data[nc] - dy_sum_data[nc] * mean_data[ng]) * rstd_data[ng]; + sum2 += dy_sum_data[nc]; + } + dweight_data[c] = sum1; + dbias_data[c] = sum2; +} + +template +__global__ void +dx_elem_kernel( + const T* dy_data, + const T* X_data, + typename acc_type::type* coef1_data, + typename acc_type::type* coef2_data, + typename acc_type::type* coef3_data, + typename acc_type::type* coef4_data, + const int N, + const int C, + const int G, + T* dx_data + ) { + /* + Performs elementwise kernel to calculate gradients wrt X. Vectorized for speed. + LOOP_I: number of elements that each thread processes. + vec_elems: number of elements stored for each vector. + grid: (x=NHWC / (TPB * LOOP_I * f), y=f), block: (x=TPB) + - HWC % (TPB * LOOP_I * f) = 0 + - TPB * f % C = 0 + X shape: (N, H, W, C) -view-> (NHWC / (TPB * LOOP_I * f), LOOP_I, f, TPB); X.stride: (LOOP_I * f * TPB, f * TPB, TPB, 1) + */ + using T_ACC = typename acc_type::type; + using V = float_vec; + using V_ACC = float_vec; + const int f = gridDim.y; + const int n = (N * blockIdx.x) / gridDim.x; + const int c = (blockIdx.y * blockDim.x + threadIdx.x) % (C / vec_elems); + const int g = (G * c) / (C / vec_elems); + const int nc = n * (C / vec_elems) + c; + const int ng = n * G + g; + T_ACC coef3 = coef3_data[ng]; + T_ACC coef4 = coef4_data[ng]; + const V *dy_vecs = reinterpret_cast(dy_data); + const V *X_vecs = reinterpret_cast(X_data); + V *dx_vecs = reinterpret_cast(dx_data); + V_ACC coef1_vec = reinterpret_cast(coef1_data)[nc]; + V_ACC coef2_vec = reinterpret_cast(coef2_data)[nc]; + + T (*act_d_fn)(T); + if constexpr (act_fn_option == 0) + act_d_fn = identity_d; + else if constexpr (act_fn_option == 1) + act_d_fn = relu_d; + else if constexpr (act_fn_option == 2) + act_d_fn = silu_d; + else if constexpr (act_fn_option == 3) + act_d_fn = gelu_d; + else if constexpr (act_fn_option == 4) + act_d_fn = gelu_tanh_d; + +#pragma unroll + for (int i = 0; i < LOOP_I; ++i) { + int idx = 0; + idx += blockIdx.x * LOOP_I * f * blockDim.x; + idx += i * f * blockDim.x; + idx += blockIdx.y * blockDim.x; + idx += threadIdx.x; + + V dy_vec = dy_vecs[idx]; + V X_vec = X_vecs[idx]; + + if constexpr (vec_elems == 1) { + V X_norm = {X_vec.x * coef1_vec.x + coef2_vec.x}; + dx_vecs[idx] = { + (coef1_vec.x * act_d_fn(X_norm.x) * dy_vec.x) + + ((coef3 * X_vec.x) + coef4) + }; + } + else if constexpr (vec_elems == 2) { + V X_norm = { + X_vec.x * coef1_vec.x + coef2_vec.x, + X_vec.y * coef1_vec.y + coef2_vec.y, + }; + dx_vecs[idx] = { + (coef1_vec.x * act_d_fn(X_norm.x) * dy_vec.x) + + ((coef3 * X_vec.x) + coef4), + (coef1_vec.y * act_d_fn(X_norm.y) * dy_vec.y) + + ((coef3 * X_vec.y) + coef4), + }; + } + else if constexpr (vec_elems == 4) { + V X_norm = { + X_vec.x * coef1_vec.x + coef2_vec.x, + X_vec.y * coef1_vec.y + coef2_vec.y, + X_vec.z * coef1_vec.z + coef2_vec.z, + X_vec.w * coef1_vec.w + coef2_vec.w, + }; + dx_vecs[idx] = { + (coef1_vec.x * act_d_fn(X_norm.x) * dy_vec.x) + + ((coef3 * X_vec.x) + coef4), + (coef1_vec.y * act_d_fn(X_norm.y) * dy_vec.y) + + ((coef3 * X_vec.y) + coef4), + (coef1_vec.z * act_d_fn(X_norm.z) * dy_vec.z) + + ((coef3 * X_vec.z) + coef4), + (coef1_vec.w * act_d_fn(X_norm.w) * dy_vec.w) + + ((coef3 * X_vec.w) + coef4), + }; + } + } +} + +template +void run_gn_bwd_kernels( + const T *dy_data, + const T *X_data, + const T *weight_data, + const T *bias_data, + const T *mean_data, + const T *rstd_data, + const int N, + const int H, + const int W, + const int C, + const int G, + const int64_t act_fn_option, + T *dx_data, + T *dweight_data, + T *dbias_data + ) { + using T_ACC = typename acc_type::type; + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + const int D = C / G; + + T_ACC* xdy_dy_sum_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * C * H * 2); + + // sum over W dim + { + auto [TPB, d, f] = calc_block_params(W * C, C, G); + DEBUG("starting width reduce, N: %d, H: %d, W: %d, C: %d, G: %d, TPB: %d, d: %d, f: %d\n", N, H, W, C, G, TPB, d, f); + if (act_fn_option == 0) + width_reduce<<>>( + dy_data, X_data, + mean_data, rstd_data, + weight_data, bias_data, + H, W, C, G, + xdy_dy_sum_data); + else if (act_fn_option == 1) + width_reduce<<>>(dy_data, X_data, mean_data, rstd_data, weight_data, bias_data, H, W, C, G, xdy_dy_sum_data); + else if (act_fn_option == 2) + width_reduce<<>>(dy_data, X_data, mean_data, rstd_data, weight_data, bias_data, H, W, C, G, xdy_dy_sum_data); + else if (act_fn_option == 3) + width_reduce<<>>(dy_data, X_data, mean_data, rstd_data, weight_data, bias_data, H, W, C, G, xdy_dy_sum_data); + else if (act_fn_option == 4) + width_reduce<<>>(dy_data, X_data, mean_data, rstd_data, weight_data, bias_data, H, W, C, G, xdy_dy_sum_data); + } + + T_ACC* xdy_sum_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * C); + T_ACC* dy_sum_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * C); + // sum over H dim + { + auto [TPB, d, f] = calc_block_params(2 * H, 2); + DEBUG("starting height reduce, N: %d, H: %d, W: %d, C: %d, G: %d, TPB: %d, d: %d, f: %d\n", N, H, W, C, G, TPB, d, f); + height_reduce<<>>( + xdy_dy_sum_data, + H, C, + xdy_sum_data, dy_sum_data); + } + c10::cuda::CUDACachingAllocator::raw_delete(xdy_dy_sum_data); + + // compute weight/bias grads + { + auto [TPB, d, f] = calc_block_params(C, C, G); + DEBUG("starting compute dweight dbias, N: %d, H: %d, W: %d, C: %d, G: %d, TPB: %d, d: %d, f: %d\n", N, H, W, C, G, TPB, d, f); + compute_dweight_dbias<<>>( + mean_data, rstd_data, + xdy_sum_data, dy_sum_data, + N, C, G, + dweight_data, dbias_data); + } + + T_ACC *coef1_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * C); + T_ACC *coef2_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * C); + T_ACC *coef3_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * G); + T_ACC *coef4_data = (T_ACC*)c10::cuda::CUDACachingAllocator::raw_alloc(sizeof(T_ACC) * N * G); + // compute fused scales/biases for dx elementwise kernel + { + auto [TPB, d, f] = calc_block_params(C, C, G); + DEBUG("starting bwd scale biases, N: %d, H: %d, W: %d, C: %d, G: %d, TPB: %d, d: %d, f: %d\n", N, H, W, C, G, TPB, d, f); + compute_bwd_scale_biases<<>>( + mean_data, rstd_data, weight_data, bias_data, + xdy_sum_data, dy_sum_data, + H, W, C, G, + coef1_data, coef2_data, coef3_data, coef4_data); + } + + { + int vec_elems; + if (D % 4 == 0) vec_elems = 4; + else if (D % 2 == 0) vec_elems = 2; + else vec_elems = 1; + auto [TPB, d, f] = calc_block_params(H * W * C, C, G); + + if (!ELEM_DEBUG && ((H * W * C) % (TPB * 8 * f * vec_elems) == 0)) { + const int LOOP_I = 8; + const int num_blocks = ceil((float)N * H * W * C / TPB / LOOP_I / f); + DEBUG("dx elem kernel starting, N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, f: %d, num blocks (before vectors): %d, vec_elems: %d\n", N, H, W, C, G, D, TPB, f, num_blocks, vec_elems); + if (D % 4 == 0 && act_fn_option == 0) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 2 == 0 && act_fn_option == 0) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 1 == 0 && act_fn_option == 0) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 4 == 0 && act_fn_option == 1) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 2 == 0 && act_fn_option == 1) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 1 == 0 && act_fn_option == 1) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 4 == 0 && act_fn_option == 2) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 2 == 0 && act_fn_option == 2) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 1 == 0 && act_fn_option == 2) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 4 == 0 && act_fn_option == 3) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 2 == 0 && act_fn_option == 3) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 1 == 0 && act_fn_option == 3) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 4 == 0 && act_fn_option == 4) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 2 == 0 && act_fn_option == 4) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (D % 1 == 0 && act_fn_option == 4) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + } + else { // relatively slow fallback + const int num_blocks = N * H * W; + DEBUG("SLOW FALLBACK, dx elem kernel starting, N: %d, H: %d, W: %d, C: %d, G: %d, D: %d, TPB: %d, f: %d, num blocks (before vectors): %d, vec_elems: %d\n", N, H, W, C, G, D, C/f, f, num_blocks, vec_elems); + if (act_fn_option == 0) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (act_fn_option == 1) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (act_fn_option == 2) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (act_fn_option == 3) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + else if (act_fn_option == 4) + dx_elem_kernel<<>>(dy_data, X_data, coef1_data, coef2_data, coef3_data, coef4_data, N, C, G, dx_data); + } + } + + c10::cuda::CUDACachingAllocator::raw_delete(xdy_sum_data); + c10::cuda::CUDACachingAllocator::raw_delete(dy_sum_data); + c10::cuda::CUDACachingAllocator::raw_delete(coef1_data); + c10::cuda::CUDACachingAllocator::raw_delete(coef2_data); + c10::cuda::CUDACachingAllocator::raw_delete(coef3_data); + c10::cuda::CUDACachingAllocator::raw_delete(coef4_data); +} + +template void run_gn_bwd_kernels(const double *dy_data, const double *X_data, const double *weight_data, const double *bias_data, const double *mean_data, const double *rstd_data, const int N, const int H, const int W, const int C, const int G, const int64_t act_fn_option, double *dx_data, double *dweight_data, double *dbias_data); +template void run_gn_bwd_kernels(const float *dy_data, const float *X_data, const float *weight_data, const float *bias_data, const float *mean_data, const float *rstd_data, const int N, const int H, const int W, const int C, const int G, const int64_t act_fn_option, float *dx_data, float *dweight_data, float *dbias_data); +template void run_gn_bwd_kernels(const c10::Half *dy_data, const c10::Half *X_data, const c10::Half *weight_data, const c10::Half *bias_data, const c10::Half *mean_data, const c10::Half *rstd_data, const int N, const int H, const int W, const int C, const int G, const int64_t act_fn_option, c10::Half *dx_data, c10::Half *dweight_data, c10::Half *dbias_data); +template void run_gn_bwd_kernels(const c10::BFloat16 *dy_data, const c10::BFloat16 *X_data, const c10::BFloat16 *weight_data, const c10::BFloat16 *bias_data, const c10::BFloat16 *mean_data, const c10::BFloat16 *rstd_data, const int N, const int H, const int W, const int C, const int G, const int64_t act_fn_option, c10::BFloat16 *dx_data, c10::BFloat16 *dweight_data, c10::BFloat16 *dbias_data); diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.h b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.h new file mode 100644 index 000000000..83003d25b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/gn_kernel.h @@ -0,0 +1,39 @@ +#pragma once +#ifndef FWD_GN_KERNEL_H +#define FWD_GN_KERNEL_H + +template +void run_gn_fwd_kernels( + const T *X_data, + const T *weight_data, + const T *bias_data, + const int N, + const int H, + const int W, + const int C, + const int G, + T eps, + const int64_t act_fn_option, + T *Y_data, + T *mean_data, + T *rstd_data); + +template +void run_gn_bwd_kernels( + const T *dy_data, + const T *X_data, + const T *weight_data, + const T *bias_data, + const T *mean_data, + const T *rstd_data, + const int N, + const int H, + const int W, + const int C, + const int G, + const int64_t act_fn_option, + T *dx_data, + T *dweight_data, + T *dbias_data); + +#endif diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/nchw_kernel.cu b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/nchw_kernel.cu new file mode 100644 index 000000000..5dc5605e0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/nchw_kernel.cu @@ -0,0 +1,994 @@ +// Copied from https://github.com/pytorch/pytorch/blob/8852bb561cbc821ffebf395990ee12a7ea376612/aten/src/ATen/native/cuda/group_norm_kernel.cu with slight style modifications +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include + +#include + +#include + +//#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +constexpr int kCUDANumThreads = 256; +constexpr int kReduceTileSize = 32; + +template +__global__ void RowwiseMomentsCUDAKernelF( + int64_t N, + T eps, + const T* X, + T* mean, + T* rstd) { + using T_ACC = at::acc_type; + using WelfordType = at::native::WelfordData; + using WelfordOp = + at::native::WelfordOps>; + + const int64_t i = blockIdx.x; + WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; + WelfordType val(0, 0, 0, 0); + for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { + const int64_t index = i * N + j; + val = welford_op.reduce(val, static_cast(X[index]), index); + } + if (blockDim.x <= C10_WARP_SIZE) { + val = at::native::cuda_utils::WarpReduce(val, welford_op); + } else { + // There will be a warning if we declare a __shared__ WelfordType array. + // https://github.com/pytorch/pytorch/pull/13967 + __shared__ typename std::aligned_storage< + sizeof(WelfordType), + alignof(WelfordType)>::type val_shared[C10_WARP_SIZE]; + WelfordType* val_shared_ptr = reinterpret_cast(val_shared); + val = at::native::cuda_utils::BlockReduce( + val, + welford_op, + /*identity_element=*/WelfordType(0, 0, 0, 0), + val_shared_ptr); + } + if (threadIdx.x == 0) { + T_ACC m1; + T_ACC m2; + thrust::tie(m2, m1) = welford_op.project(val); + mean[i] = m1; + rstd[i] = c10::cuda::compat::rsqrt(m2 + static_cast(eps)); + } +} + +template +__global__ void ComputeFusedParamsCUDAKernelF( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const T* gamma, + const T* beta, + at::acc_type* a, + at::acc_type* b) { + using T_ACC = at::acc_type; + const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < N * C) { + const int64_t ng = index / (C / group); + const int64_t c = index % C; + const T_ACC scale = (gamma == nullptr) + ? static_cast(rstd[ng]) + : static_cast(rstd[ng]) * static_cast(gamma[c]); + a[index] = scale; + b[index] = -scale * static_cast(mean[ng]) + + ((beta == nullptr) ? 0 : static_cast(beta[c])); + } +} + +template +__global__ void Compute1dBackwardFusedParamsCUDAKernelF( + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + const T* gamma, + at::acc_type* c2, + at::acc_type* c3) { + using T_ACC = at::acc_type; + const int64_t G = group; + const int64_t D = C / G; + const int64_t n = blockIdx.x; + const int64_t g = blockIdx.y; + const int64_t ng = n * G + g; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { + const int64_t index = ng * D + i; + const int64_t c = g * D + i; + const T_ACC gamma_v = + gamma == nullptr ? T_ACC(1) : static_cast(gamma[c]); + sum1 += dY[index] * X[index] * gamma_v; + sum2 += dY[index] * gamma_v; + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = at::native::cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = at::native::cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + const T_ACC s = T_ACC(1) / static_cast(D); + const T_ACC x = (sum2 * static_cast(mean[ng]) - sum1) * + static_cast(rstd[ng]) * static_cast(rstd[ng]) * + static_cast(rstd[ng]) * s; + //printf("\n\nng: %d, x: %f\n\n", (int)ng, x); + c2[ng] = x; + c3[ng] = -x * static_cast(mean[ng]) - + sum2 * static_cast(rstd[ng]) * s; + } +} + +template +__global__ void GammaBeta1dBackwardCUDAKernelF1( + int64_t N, + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + T* dgamma, + T* dbeta) { + using T_ACC = at::acc_type; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t n = 0; n < N; ++n) { + const int64_t nc = n * C + c; + const int64_t ng = n * G + c / D; + const T_ACC dy_acc = static_cast(dY[nc]); + const T_ACC x_acc = static_cast(X[nc]); + sum1 += (dgamma == nullptr) + ? T_ACC(0) + : ((dy_acc * x_acc - dy_acc * static_cast(mean[ng])) * + static_cast(rstd[ng])); + sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc; + } + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } +} + +template +__global__ void GammaBeta1dBackwardCUDAKernelF2( + int64_t N, + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + T* dgamma, + T* dbeta) { + using T_ACC = at::acc_type; + __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; + __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + T_ACC dg_sum1 = 0; + T_ACC dg_sum2 = 0; + T_ACC db_sum1 = 0; + T_ACC db_sum2 = 0; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + // Accumulate each 32 cols into a 32 * 32 tile. + // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows + // of a 32 contiguous elements. + for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { + const int64_t n1 = n; + const int64_t n2 = n + blockDim.y; + const int64_t nc1 = n1 * C + c; + const int64_t nc2 = n2 * C + c; + const int64_t ng1 = n1 * G + c / D; + const int64_t ng2 = n2 * G + c / D; + const T_ACC dy1_acc = static_cast(dY[nc1]); + const T_ACC x1_acc = static_cast(X[nc1]); + dg_sum1 += dgamma == nullptr + ? T_ACC(0) + : ((dy1_acc * x1_acc - dy1_acc * static_cast(mean[ng1])) * + static_cast(rstd[ng1])); + db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc; + if (n2 < N) { + const T_ACC dy2_acc = static_cast(dY[nc2]); + const T_ACC x2_acc = static_cast(X[nc2]); + dg_sum2 += dgamma == nullptr + ? T_ACC(0) + : ((dy2_acc * x2_acc - dy2_acc * static_cast(mean[ng2])) * + static_cast(rstd[ng2])); + db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc; + } + } + } + + // Write accumulated tile to shared memory. + g_shared[threadIdx.y][threadIdx.x] = dg_sum1; + g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; + b_shared[threadIdx.y][threadIdx.x] = db_sum1; + b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; + __syncthreads(); + + // Do warp reduce for the 1st 16 cols in the tile. + T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; + T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } + + // Do warp reduce for the 2nd 16 cols in the tile. + sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } +} + +template +__global__ void ComputeInternalGradientsCUDAKernelF( + int64_t HxW, + const T* dY, + const T* X, + at::acc_type* ds, + at::acc_type* db) { + using T_ACC = at::acc_type; + const int64_t nc = blockIdx.x; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) { + const int64_t index = nc * HxW + hw; + sum1 += static_cast(dY[index]) * static_cast(X[index]); + sum2 += static_cast(dY[index]); + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = at::native::cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = at::native::cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + ds[nc] = sum1; + db[nc] = sum2; + } +} + +template +__global__ void ComputeBackwardFusedParamsCUDAKernelF( + int64_t C, + int64_t HxW, + int64_t group, + const T* mean, + const T* rstd, + const T* gamma, + const at::acc_type* ds, + const at::acc_type* db, + at::acc_type* c2, + at::acc_type* c3) { + using T_ACC = at::acc_type; + const int64_t G = group; + const int64_t D = C / G; + const int64_t n = blockIdx.x; + const int64_t g = blockIdx.y; + const int64_t ng = n * G + g; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { + const int64_t index = ng * D + i; + const int64_t c = g * D + i; + const T_ACC gamma_v = + gamma == nullptr ? T_ACC(1) : static_cast(gamma[c]); + sum1 += ds[index] * gamma_v; + sum2 += db[index] * gamma_v; + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = at::native::cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = at::native::cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + const T_ACC s = T_ACC(1) / static_cast(D * HxW); + const T_ACC x = (sum2 * static_cast(mean[ng]) - sum1) * + static_cast(rstd[ng]) * static_cast(rstd[ng]) * + static_cast(rstd[ng]) * s; + c2[ng] = x; + c3[ng] = -x * static_cast(mean[ng]) - + sum2 * static_cast(rstd[ng]) * s; + } +} + +template +__global__ void GammaBetaBackwardCUDAKernelF1( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const at::acc_type* ds, + const at::acc_type* db, + T* dgamma, + T* dbeta) { + using T_ACC = at::acc_type; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t n = 0; n < N; ++n) { + const int64_t nc = n * C + c; + const int64_t ng = n * G + c / D; + sum1 += (dgamma == nullptr) + ? T_ACC(0) + : ((ds[nc] - db[nc] * static_cast(mean[ng])) * + static_cast(rstd[ng])); + sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc]; + } + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } +} + +template +__global__ void GammaBetaBackwardCUDAKernelF2( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const at::acc_type* ds, + const at::acc_type* db, + T* dgamma, + T* dbeta) { + using T_ACC = at::acc_type; + __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; + __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + T_ACC dg_sum1 = 0; + T_ACC dg_sum2 = 0; + T_ACC db_sum1 = 0; + T_ACC db_sum2 = 0; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + // Accumulate each 32 cols into a 32 * 32 tile. + // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows + // of a 32 contiguous elements. + for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { + const int64_t n1 = n; + const int64_t n2 = n + blockDim.y; + const int64_t nc1 = n1 * C + c; + const int64_t nc2 = n2 * C + c; + const int64_t ng1 = n1 * G + c / D; + const int64_t ng2 = n2 * G + c / D; + dg_sum1 += dgamma == nullptr + ? T_ACC(0) + : ((ds[nc1] - db[nc1] * static_cast(mean[ng1])) * + static_cast(rstd[ng1])); + db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1]; + if (n2 < N) { + dg_sum2 += dgamma == nullptr + ? T_ACC(0) + : ((ds[nc2] - db[nc2] * static_cast(mean[ng2])) * + static_cast(rstd[ng2])); + db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2]; + } + } + } + + // Write accumulated tile to shared memory. + g_shared[threadIdx.y][threadIdx.x] = dg_sum1; + g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; + b_shared[threadIdx.y][threadIdx.x] = db_sum1; + b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; + __syncthreads(); + + // Do warp reduce for the 1st 16 cols in the tile. + T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; + T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } + + // Do warp reduce for the 2st 16 cols in the tile. + sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum1 = at::native::cuda_utils::WarpReduceSum(sum1); + sum2 = at::native::cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } +} + +template +void GroupNorm1dForward( + const at::Tensor& X, + const at::Tensor& mean, + const at::Tensor& rstd, + const at::Tensor& gamma, + const at::Tensor& beta, + int64_t N, + int64_t C, + int64_t group, + at::Tensor& Y) { + using T_ACC = at::acc_type; + const int64_t G = group; + const int64_t D = C / G; + if (gamma.defined() && beta.defined()) { + auto iter = at::TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .add_owned_input(beta.view({1, G, D})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) * static_cast(gamma) + + static_cast(beta); + }); + } else if (gamma.defined()) { + auto iter = at::TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) * static_cast(gamma); + }); + } else if (beta.defined()) { + auto iter = at::TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(beta.view({1, G, D})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) + + static_cast(beta); + }); + } else { + auto iter = at::TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N * G, D})) + .add_owned_input(X.view({N * G, D})) + .add_owned_input(mean.view({N * G, 1})) + .add_owned_input(rstd.view({N * G, 1})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd); + }); + } + AT_CUDA_CHECK(cudaGetLastError()); +} + +template +void GroupNormKernelImplInternal( + const at::Tensor& X, + const at::Tensor& gamma, + const at::Tensor& beta, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + T eps, + at::Tensor& Y, + at::Tensor& mean, + at::Tensor& rstd) { + using T_ACC = at::acc_type; + TORCH_CHECK(X.numel() == N * C * HxW); + TORCH_CHECK(!gamma.defined() || gamma.numel() == C); + TORCH_CHECK(!beta.defined() || beta.numel() == C); + if (N == 0) { + return; + } + const int64_t G = group; + const int64_t D = C / G; + const T* X_data = X.const_data_ptr(); + T* mean_data = mean.mutable_data_ptr(); + T* rstd_data = rstd.mutable_data_ptr(); + + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + const int64_t num_threads = D * HxW < at::native::cuda_utils::kCUDABlockReduceNumThreads + ? at::cuda::warp_size() + : at::native::cuda_utils::kCUDABlockReduceNumThreads; + RowwiseMomentsCUDAKernelF<<>>( + D * HxW, eps, X_data, mean_data, rstd_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (HxW == 1) { + GroupNorm1dForward(X, mean, rstd, gamma, beta, N, C, G, Y); + } else if (!gamma.defined() && !beta.defined()) { + auto iter = at::TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N * G, D * HxW})) + .add_owned_input(X.view({N * G, D * HxW})) + .add_owned_input(mean.view({N * G, 1})) + .add_owned_input(rstd.view({N * G, 1})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd); + }); + } else { + const auto kAccType = + (X.scalar_type() == at::kHalf || X.scalar_type() == at::kBFloat16) + ? at::kFloat + : X.scalar_type(); + at::Tensor a = at::empty({N, C}, X.options().dtype(kAccType)); + at::Tensor b = at::empty({N, C}, X.options().dtype(kAccType)); + const T* gamma_data = gamma.defined() ? gamma.const_data_ptr() : nullptr; + const T* beta_data = beta.defined() ? beta.const_data_ptr() : nullptr; + T_ACC* a_data = a.mutable_data_ptr(); + T_ACC* b_data = b.mutable_data_ptr(); + + // TODO: Since there is some issues in at::native::gpu_kernel_multiple_outputs, we are + // using maunal kernel here. Make it using at::native::gpu_kernel_multiple_outputs once + // the issue fixed. + const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads; + ComputeFusedParamsCUDAKernelF<<>>( + N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(Y.view({N * C, HxW})) + .add_owned_input(X.view({N * C, HxW})) + .add_owned_input(a.view({N * C, 1})) + .add_owned_input(b.view({N * C, 1})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T { + return a * static_cast(x) + b; + }); + } + AT_CUDA_CHECK(cudaGetLastError()); +} + +void GroupNormKernelImpl( + const at::Tensor& X, + const at::Tensor& gamma, + const at::Tensor& beta, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + double eps, + at::Tensor& Y, + at::Tensor& mean, + at::Tensor& rstd) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + X.scalar_type(), + "GroupNormKernelImpl", + [&]() { + GroupNormKernelImplInternal( + X, + gamma, + beta, + N, + C, + HxW, + group, + static_cast(eps), + Y, + mean, + rstd); + }); +} + +template +void GroupNorm1dBackward( + const at::Tensor dY, + const at::Tensor X, + const at::Tensor mean, + const at::Tensor rstd, + const at::Tensor gamma, + int64_t N, + int64_t C, + int64_t group, + at::Tensor& dX, + at::Tensor& dgamma, + at::Tensor& dbeta) { + using T_ACC = at::acc_type; + const int64_t G = group; + const int64_t D = C / G; + const T* dY_data = dY.const_data_ptr(); + const T* X_data = X.const_data_ptr(); + const T* mean_data = mean.const_data_ptr(); + const T* rstd_data = rstd.const_data_ptr(); + + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + if (dX.defined()) { + const T* gamma_data = gamma.defined() ? gamma.const_data_ptr() : nullptr; + const auto kAccType = + (X.scalar_type() == at::kHalf || X.scalar_type() == at::kBFloat16) + ? at::kFloat + : X.scalar_type(); + at::Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); + at::Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); + T_ACC* c2_data = c2.mutable_data_ptr(); + T_ACC* c3_data = c3.mutable_data_ptr(); + const int64_t num_threads = (C / G) < at::native::cuda_utils::kCUDABlockReduceNumThreads + ? at::cuda::warp_size() + : at::native::cuda_utils::kCUDABlockReduceNumThreads; + Compute1dBackwardFusedParamsCUDAKernelF + <<>>( + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + gamma_data, + c2_data, + c3_data); + //std::cout << "mean: " << mean << '\n'; + //std::cout << "rstd: " << rstd << '\n'; + //std::cout << "g: " << gamma << '\n'; + //std::cout << "c2: " << c2 << '\n'; + //std::cout << "c3: " << c3 << '\n'; + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (gamma.defined()) { + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N, G, D})) + .add_owned_input(dY.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .add_owned_input(c2.view({N, G, 1})) + .add_owned_input(c3.view({N, G, 1})) + .build(); + at::native::gpu_kernel( + iter, + [] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T { + const T_ACC c1 = + static_cast(rstd) * static_cast(gamma); + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } else { + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D})) + .add_owned_input(dY.view({N * G, D})) + .add_owned_input(X.view({N * G, D})) + .add_owned_input(rstd.view({N * G, 1})) + .add_owned_input(c2.view({N * G, 1})) + .add_owned_input(c3.view({N * G, 1})) + .build(); + at::native::gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T { + const T_ACC c1 = static_cast(rstd); + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } + } + if (dgamma.defined() || dbeta.defined()) { + T* dgamma_data = dgamma.defined() ? dgamma.mutable_data_ptr() : nullptr; + T* dbeta_data = dbeta.defined() ? dbeta.mutable_data_ptr() : nullptr; + if (N <= 128) { + const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; + GammaBeta1dBackwardCUDAKernelF1<<>>( + N, + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; + // The algorithm for colwise reduction here is to accumulate each 32 cols + // to a 32 * 32 tile and write the tile to shared memmory. Then do warp + // reduce for each col in the tile. So here the blockDim must be (32, 16). + constexpr int kThreadX = kReduceTileSize; + constexpr int kThreadY = kReduceTileSize / 2; + GammaBeta1dBackwardCUDAKernelF2 + <<>>( + N, + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } +} + +template +void GroupNormBackwardKernelImplInternal( + const at::Tensor& dY, + const at::Tensor& X, + const at::Tensor& mean, + const at::Tensor& rstd, + const at::Tensor& gamma, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + at::Tensor& dX, + at::Tensor& dgamma, + at::Tensor& dbeta) { + using T_ACC = at::acc_type; + const int64_t G = group; + const int64_t D = C / G; + TORCH_CHECK(dY.numel() == N * C * HxW); + TORCH_CHECK(X.numel() == N * C * HxW); + TORCH_CHECK(mean.numel() == N * G); + TORCH_CHECK(rstd.numel() == N * G); + TORCH_CHECK(!gamma.defined() || gamma.numel() == C); + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + + if (N == 0) { + if (dgamma.defined()) { + dgamma.fill_(T(0)); + } + if (dbeta.defined()) { + dbeta.fill_(T(0)); + } + return; + } + + const T* dY_data = dY.const_data_ptr(); + const T* X_data = X.const_data_ptr(); + const T* mean_data = mean.const_data_ptr(); + const T* rstd_data = rstd.const_data_ptr(); + const T* gamma_data = gamma.defined() ? gamma.const_data_ptr() : nullptr; + const auto kAccType = + (X.scalar_type() == at::kHalf || X.scalar_type() == at::kBFloat16) + ? at::kFloat + : X.scalar_type(); + at::Tensor ds = at::empty({N, C}, X.options().dtype(kAccType)); + at::Tensor db = at::empty({N, C}, X.options().dtype(kAccType)); + T_ACC* ds_data = ds.mutable_data_ptr(); + T_ACC* db_data = db.mutable_data_ptr(); + + if (HxW == 1) { + GroupNorm1dBackward( + dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta); + return; + } + + int warp_size = at::cuda::warp_size(); + int64_t num_threads = HxW < at::native::cuda_utils::kCUDABlockReduceNumThreads + ? warp_size + : at::native::cuda_utils::kCUDABlockReduceNumThreads; + ComputeInternalGradientsCUDAKernelF<<>>( + HxW, dY_data, X_data, ds_data, db_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + //std::cout << "ds: " << ds << '\n'; + //std::cout << "db: " << db << '\n'; + + if (dX.defined()) { + at::Tensor c1 = at::empty({0}, X.options().dtype(kAccType)); + at::Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); + at::Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); + T_ACC* c2_data = c2.mutable_data_ptr(); + T_ACC* c3_data = c3.mutable_data_ptr(); + + if (gamma.defined()) { + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .add_output(c1) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .build(); + at::native::gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC { + return static_cast(rstd) * static_cast(gamma); + }); + } + + num_threads = (C / G) < at::native::cuda_utils::kCUDABlockReduceNumThreads + ? warp_size + : at::native::cuda_utils::kCUDABlockReduceNumThreads; + ComputeBackwardFusedParamsCUDAKernelF + <<>>( + C, + HxW, + G, + mean_data, + rstd_data, + gamma_data, + ds_data, + db_data, + c2_data, + c3_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (gamma.defined()) { + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D, HxW})) + .add_owned_input(dY.view({N * G, D, HxW})) + .add_owned_input(X.view({N * G, D, HxW})) + .add_owned_input(c1.view({N * G, D, 1})) + .add_owned_input(c2.view({N * G, 1, 1})) + .add_owned_input(c3.view({N * G, 1, 1})) + .build(); + at::native::gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } else { + auto iter = at::TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D * HxW})) + .add_owned_input(dY.view({N * G, D * HxW})) + .add_owned_input(X.view({N * G, D * HxW})) + .add_owned_input(rstd.view({N * G, 1})) + .add_owned_input(c2.view({N * G, 1})) + .add_owned_input(c3.view({N * G, 1})) + .build(); + at::native::gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } + } + if (dgamma.defined() || dbeta.defined()) { + T* dgamma_data = dgamma.defined() ? dgamma.mutable_data_ptr() : nullptr; + T* dbeta_data = dbeta.defined() ? dbeta.mutable_data_ptr() : nullptr; + if (N <= 128) { + // For small batch size, do colwise reduce directly. + const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; + GammaBetaBackwardCUDAKernelF1<<>>( + N, + C, + G, + mean_data, + rstd_data, + ds_data, + db_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; + // The algorithm for colwise reduction here is to accumulate each 32 cols + // to a 32 * 32 tile and write the tile to shared memmory. Then do warp + // reduce for each col in the tile. So here the blockDim must be (32, 16). + constexpr int kThreadX = kReduceTileSize; + constexpr int kThreadY = kReduceTileSize / 2; + GammaBetaBackwardCUDAKernelF2 + <<>>( + N, + C, + G, + mean_data, + rstd_data, + ds_data, + db_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } +} + +void GroupNormBackwardKernelImpl( + const at::Tensor& dY, + const at::Tensor& X, + const at::Tensor& mean, + const at::Tensor& rstd, + const at::Tensor& gamma, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + at::Tensor& dX, + at::Tensor& dgamma, + at::Tensor& dbeta) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + X.scalar_type(), + "GroupNormBackwardKernelImpl", + [&]() { + GroupNormBackwardKernelImplInternal( + dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta); + }); +} diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/vecs.h b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/vecs.h new file mode 100644 index 000000000..0b8cb7a65 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/nhwc_groupnorm/vecs.h @@ -0,0 +1,35 @@ +#pragma once +#ifndef VECS_H +#define VECS_H + +template +struct float_vec; + +template +struct alignas(1 * sizeof(T)) float_vec { + T x; + template + __host__ __device__ operator float_vec() const { + return { static_cast(x), }; + } +}; + +template +struct alignas(2 * sizeof(T)) float_vec { + T x, y; + template + __host__ __device__ operator float_vec() const { + return { static_cast(x), static_cast(y), }; + } +}; + +template +struct alignas(4 * sizeof(T)) float_vec { + T x, y, z, w; + template + __host__ __device__ operator float_vec() const { + return { static_cast(x), static_cast(y), static_cast(z), static_cast(w), }; + } +}; + +#endif diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/normalization.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/normalization.py new file mode 100644 index 000000000..036a66890 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/normalization.py @@ -0,0 +1,254 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numbers +from typing import Dict, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import is_torch_version +from .activations import get_activation +from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings + + +class AdaLayerNorm(nn.Module): + r""" + Norm layer modified to incorporate timestep embeddings. + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + num_embeddings (`int`): The size of the embeddings dictionary. + """ + + def __init__(self, embedding_dim: int, num_embeddings: int): + super().__init__() + self.emb = nn.Embedding(num_embeddings, embedding_dim) + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, embedding_dim * 2) + self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False) + + def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor: + emb = self.linear(self.silu(self.emb(timestep))) + scale, shift = torch.chunk(emb, 2) + x = self.norm(x) * (1 + scale) + shift + return x + + +class AdaLayerNormZero(nn.Module): + r""" + Norm layer adaptive layer norm zero (adaLN-Zero). + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + num_embeddings (`int`): The size of the embeddings dictionary. + """ + + def __init__(self, embedding_dim: int, num_embeddings: int): + super().__init__() + + self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) + + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) + self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) + + def forward( + self, + x: torch.Tensor, + timestep: torch.Tensor, + class_labels: torch.LongTensor, + hidden_dtype: Optional[torch.dtype] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype))) + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x, gate_msa, shift_mlp, scale_mlp, gate_mlp + + +class AdaLayerNormSingle(nn.Module): + r""" + Norm layer adaptive layer norm single (adaLN-single). + + As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + use_additional_conditions (`bool`): To use additional conditions for normalization or not. + """ + + def __init__(self, embedding_dim: int, use_additional_conditions: bool = False): + super().__init__() + + self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings( + embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions + ) + + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) + + def forward( + self, + timestep: torch.Tensor, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + batch_size: Optional[int] = None, + hidden_dtype: Optional[torch.dtype] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # No modulation happening here. + embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype) + return self.linear(self.silu(embedded_timestep)), embedded_timestep + + +class AdaGroupNorm(nn.Module): + r""" + GroupNorm layer modified to incorporate timestep embeddings. + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + num_embeddings (`int`): The size of the embeddings dictionary. + num_groups (`int`): The number of groups to separate the channels into. + act_fn (`str`, *optional*, defaults to `None`): The activation function to use. + eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability. + """ + + def __init__( + self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5 + ): + super().__init__() + self.num_groups = num_groups + self.eps = eps + + if act_fn is None: + self.act = None + else: + self.act = get_activation(act_fn) + + self.linear = nn.Linear(embedding_dim, out_dim * 2) + + def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: + if self.act: + emb = self.act(emb) + emb = self.linear(emb) + emb = emb[:, :, None, None] + scale, shift = emb.chunk(2, dim=1) + + x = F.group_norm(x, self.num_groups, eps=self.eps) + x = x * (1 + scale) + shift + return x + + +class AdaLayerNormContinuous(nn.Module): + def __init__( + self, + embedding_dim: int, + conditioning_embedding_dim: int, + # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters + # because the output is immediately scaled and shifted by the projected conditioning embeddings. + # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. + # However, this is how it was implemented in the original code, and it's rather likely you should + # set `elementwise_affine` to False. + elementwise_affine=True, + eps=1e-5, + bias=True, + norm_type="layer_norm", + ): + super().__init__() + self.silu = nn.SiLU() + self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) + if norm_type == "layer_norm": + self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) + elif norm_type == "rms_norm": + self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: + emb = self.linear(self.silu(conditioning_embedding)) + scale, shift = torch.chunk(emb, 2, dim=1) + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x + + +if is_torch_version(">=", "2.1.0"): + LayerNorm = nn.LayerNorm +else: + # Has optional bias parameter compared to torch layer norm + # TODO: replace with torch layernorm once min required torch version >= 2.1 + class LayerNorm(nn.Module): + def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): + super().__init__() + + self.eps = eps + + if isinstance(dim, numbers.Integral): + dim = (dim,) + + self.dim = torch.Size(dim) + + if elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim)) + self.bias = nn.Parameter(torch.zeros(dim)) if bias else None + else: + self.weight = None + self.bias = None + + def forward(self, input): + return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps) + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps: float, elementwise_affine: bool = True): + super().__init__() + + self.eps = eps + + if isinstance(dim, numbers.Integral): + dim = (dim,) + + self.dim = torch.Size(dim) + + if elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim)) + else: + self.weight = None + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + + if self.weight is not None: + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + hidden_states = hidden_states * self.weight + else: + hidden_states = hidden_states.to(input_dtype) + + return hidden_states + + +class GlobalResponseNorm(nn.Module): + # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) + self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) + + def forward(self, x): + gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) + nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-6) + return self.gamma * (x * nx) + self.beta + x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/prior_transformer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/prior_transformer.py new file mode 100644 index 000000000..328835a95 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/prior_transformer.py @@ -0,0 +1,12 @@ +from ..utils import deprecate +from .transformers.prior_transformer import PriorTransformer, PriorTransformerOutput + + +class PriorTransformerOutput(PriorTransformerOutput): + deprecation_message = "Importing `PriorTransformerOutput` from `diffusers.models.prior_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.prior_transformer import PriorTransformerOutput`, instead." + deprecate("PriorTransformerOutput", "0.29", deprecation_message) + + +class PriorTransformer(PriorTransformer): + deprecation_message = "Importing `PriorTransformer` from `diffusers.models.prior_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.prior_transformer import PriorTransformer`, instead." + deprecate("PriorTransformer", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet.py new file mode 100644 index 000000000..34b8eba0a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet.py @@ -0,0 +1,814 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# `TemporalConvLayer` Copyright 2024 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Optional, Tuple, Union +import os + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import deprecate +from .activations import get_activation +from .attention_processor import SpatialNorm +from .downsampling import ( # noqa + Downsample1D, + Downsample2D, + FirDownsample2D, + KDownsample2D, + downsample_2d, +) +from .normalization import AdaGroupNorm +from .upsampling import ( # noqa + FirUpsample2D, + KUpsample2D, + Upsample1D, + Upsample2D, + upfirdn2d_native, + upsample_2d, +) + +from .nhwc_groupnorm.custom_gn import GN_NHWC + + +class ResnetBlockCondNorm2D(nn.Module): + r""" + A Resnet block that use normalization layer that incorporate conditioning information. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv2d layer. If None, same as `in_channels`. + dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. + temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. + groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. + groups_out (`int`, *optional*, default to None): + The number of groups to use for the second normalization layer. if set to None, same as `groups`. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. + non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. + time_embedding_norm (`str`, *optional*, default to `"ada_group"` ): + The normalization layer for time embedding `temb`. Currently only support "ada_group" or "spatial". + kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see + [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. + output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. + use_in_shortcut (`bool`, *optional*, default to `True`): + If `True`, add a 1x1 nn.conv2d layer for skip-connection. + up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. + down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. + conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the + `conv_shortcut` output. + conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. + If None, same as `out_channels`. + """ + + def __init__( + self, + *, + in_channels: int, + out_channels: Optional[int] = None, + conv_shortcut: bool = False, + dropout: float = 0.0, + temb_channels: int = 512, + groups: int = 32, + groups_out: Optional[int] = None, + eps: float = 1e-6, + non_linearity: str = "swish", + time_embedding_norm: str = "ada_group", # ada_group, spatial + output_scale_factor: float = 1.0, + use_in_shortcut: Optional[bool] = None, + up: bool = False, + down: bool = False, + conv_shortcut_bias: bool = True, + conv_2d_out_channels: Optional[int] = None, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.up = up + self.down = down + self.output_scale_factor = output_scale_factor + self.time_embedding_norm = time_embedding_norm + + conv_cls = nn.Conv2d + + if groups_out is None: + groups_out = groups + + if self.time_embedding_norm == "ada_group": # ada_group + self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) + elif self.time_embedding_norm == "spatial": + self.norm1 = SpatialNorm(in_channels, temb_channels) + else: + raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") + + self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + + if self.time_embedding_norm == "ada_group": # ada_group + self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) + elif self.time_embedding_norm == "spatial": # spatial + self.norm2 = SpatialNorm(out_channels, temb_channels) + else: + raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") + + self.dropout = torch.nn.Dropout(dropout) + + conv_2d_out_channels = conv_2d_out_channels or out_channels + self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) + + self.nonlinearity = get_activation(non_linearity) + + self.upsample = self.downsample = None + if self.up: + self.upsample = Upsample2D(in_channels, use_conv=False) + elif self.down: + self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") + + self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = conv_cls( + in_channels, + conv_2d_out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=conv_shortcut_bias, + ) + + def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states, temb) + + hidden_states = self.nonlinearity(hidden_states) + + if self.upsample is not None: + # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 + if hidden_states.shape[0] >= 64: + input_tensor = input_tensor.contiguous() + hidden_states = hidden_states.contiguous() + input_tensor = self.upsample(input_tensor) + hidden_states = self.upsample(hidden_states) + + elif self.downsample is not None: + input_tensor = self.downsample(input_tensor) + hidden_states = self.downsample(hidden_states) + + hidden_states = self.conv1(hidden_states) + + hidden_states = self.norm2(hidden_states, temb) + + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = (input_tensor + hidden_states) / self.output_scale_factor + + return output_tensor + + +class ResnetBlock2D(nn.Module): + r""" + A Resnet block. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv2d layer. If None, same as `in_channels`. + dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. + temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. + groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. + groups_out (`int`, *optional*, default to None): + The number of groups to use for the second normalization layer. if set to None, same as `groups`. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. + non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. + time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. + By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" + for a stronger conditioning with scale and shift. + kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see + [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. + output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. + use_in_shortcut (`bool`, *optional*, default to `True`): + If `True`, add a 1x1 nn.conv2d layer for skip-connection. + up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. + down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. + conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the + `conv_shortcut` output. + conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. + If None, same as `out_channels`. + """ + + def __init__( + self, + *, + in_channels: int, + out_channels: Optional[int] = None, + conv_shortcut: bool = False, + dropout: float = 0.0, + temb_channels: int = 512, + groups: int = 32, + groups_out: Optional[int] = None, + pre_norm: bool = True, + eps: float = 1e-6, + non_linearity: str = "swish", + skip_time_act: bool = False, + time_embedding_norm: str = "default", # default, scale_shift, + kernel: Optional[torch.FloatTensor] = None, + output_scale_factor: float = 1.0, + use_in_shortcut: Optional[bool] = None, + up: bool = False, + down: bool = False, + conv_shortcut_bias: bool = True, + conv_2d_out_channels: Optional[int] = None, + ): + super().__init__() + if time_embedding_norm == "ada_group": + raise ValueError( + "This class cannot be used with `time_embedding_norm==ada_group`, please use `ResnetBlockCondNorm2D` instead", + ) + if time_embedding_norm == "spatial": + raise ValueError( + "This class cannot be used with `time_embedding_norm==spatial`, please use `ResnetBlockCondNorm2D` instead", + ) + + self.pre_norm = True + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.up = up + self.down = down + self.output_scale_factor = output_scale_factor + self.time_embedding_norm = time_embedding_norm + self.skip_time_act = skip_time_act + + linear_cls = nn.Linear + conv_cls = nn.Conv2d + + if groups_out is None: + groups_out = groups + + self.fuse_gn_silu = True if int(os.environ.get("USE_NHWC_GN", 0)) else False + if self.fuse_gn_silu: + self.norm1 = GN_NHWC(groups, in_channels, activation="silu") + else: + self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) + + self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + + if temb_channels is not None: + if self.time_embedding_norm == "default": + self.time_emb_proj = linear_cls(temb_channels, out_channels) + elif self.time_embedding_norm == "scale_shift": + self.time_emb_proj = linear_cls(temb_channels, 2 * out_channels) + else: + raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ") + else: + self.time_emb_proj = None + + if self.fuse_gn_silu: + self.norm2 = GN_NHWC(groups_out, out_channels, activation="silu") + else: + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) + + self.dropout = torch.nn.Dropout(dropout) + conv_2d_out_channels = conv_2d_out_channels or out_channels + self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) + + self.nonlinearity = get_activation(non_linearity) + + self.upsample = self.downsample = None + if self.up: + if kernel == "fir": + fir_kernel = (1, 3, 3, 1) + self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel) + elif kernel == "sde_vp": + self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") + else: + self.upsample = Upsample2D(in_channels, use_conv=False) + elif self.down: + if kernel == "fir": + fir_kernel = (1, 3, 3, 1) + self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel) + elif kernel == "sde_vp": + self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) + else: + self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") + + self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = conv_cls( + in_channels, + conv_2d_out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=conv_shortcut_bias, + ) + + def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + if not self.fuse_gn_silu: + hidden_states = self.nonlinearity(hidden_states) + + if self.upsample is not None: + # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 + if hidden_states.shape[0] >= 64: + input_tensor = input_tensor.contiguous() + hidden_states = hidden_states.contiguous() + input_tensor = self.upsample(input_tensor) + hidden_states = self.upsample(hidden_states) + elif self.downsample is not None: + input_tensor = self.downsample(input_tensor) + hidden_states = self.downsample(hidden_states) + + hidden_states = self.conv1(hidden_states) + + if self.time_emb_proj is not None: + if not self.skip_time_act: + temb = self.nonlinearity(temb) + temb = self.time_emb_proj(temb)[:, :, None, None] + + if self.time_embedding_norm == "default": + if temb is not None: + hidden_states = hidden_states + temb + hidden_states = self.norm2(hidden_states) + elif self.time_embedding_norm == "scale_shift": + if temb is None: + raise ValueError( + f" `temb` should not be None when `time_embedding_norm` is {self.time_embedding_norm}" + ) + time_scale, time_shift = torch.chunk(temb, 2, dim=1) + hidden_states = self.norm2(hidden_states) + hidden_states = hidden_states * (1 + time_scale) + time_shift + else: + hidden_states = self.norm2(hidden_states) + + if not self.fuse_gn_silu: + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = (input_tensor + hidden_states) / self.output_scale_factor + + return output_tensor + + +# unet_rl.py +def rearrange_dims(tensor: torch.Tensor) -> torch.Tensor: + if len(tensor.shape) == 2: + return tensor[:, :, None] + if len(tensor.shape) == 3: + return tensor[:, :, None, :] + elif len(tensor.shape) == 4: + return tensor[:, :, 0, :] + else: + raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") + + +class Conv1dBlock(nn.Module): + """ + Conv1d --> GroupNorm --> Mish + + Parameters: + inp_channels (`int`): Number of input channels. + out_channels (`int`): Number of output channels. + kernel_size (`int` or `tuple`): Size of the convolving kernel. + n_groups (`int`, default `8`): Number of groups to separate the channels into. + activation (`str`, defaults to `mish`): Name of the activation function. + """ + + def __init__( + self, + inp_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, int]], + n_groups: int = 8, + activation: str = "mish", + ): + super().__init__() + + self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) + self.group_norm = nn.GroupNorm(n_groups, out_channels) + self.mish = get_activation(activation) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + intermediate_repr = self.conv1d(inputs) + intermediate_repr = rearrange_dims(intermediate_repr) + intermediate_repr = self.group_norm(intermediate_repr) + intermediate_repr = rearrange_dims(intermediate_repr) + output = self.mish(intermediate_repr) + return output + + +# unet_rl.py +class ResidualTemporalBlock1D(nn.Module): + """ + Residual 1D block with temporal convolutions. + + Parameters: + inp_channels (`int`): Number of input channels. + out_channels (`int`): Number of output channels. + embed_dim (`int`): Embedding dimension. + kernel_size (`int` or `tuple`): Size of the convolving kernel. + activation (`str`, defaults `mish`): It is possible to choose the right activation function. + """ + + def __init__( + self, + inp_channels: int, + out_channels: int, + embed_dim: int, + kernel_size: Union[int, Tuple[int, int]] = 5, + activation: str = "mish", + ): + super().__init__() + self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) + self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) + + self.time_emb_act = get_activation(activation) + self.time_emb = nn.Linear(embed_dim, out_channels) + + self.residual_conv = ( + nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() + ) + + def forward(self, inputs: torch.Tensor, t: torch.Tensor) -> torch.Tensor: + """ + Args: + inputs : [ batch_size x inp_channels x horizon ] + t : [ batch_size x embed_dim ] + + returns: + out : [ batch_size x out_channels x horizon ] + """ + t = self.time_emb_act(t) + t = self.time_emb(t) + out = self.conv_in(inputs) + rearrange_dims(t) + out = self.conv_out(out) + return out + self.residual_conv(inputs) + + +class TemporalConvLayer(nn.Module): + """ + Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from: + https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016 + + Parameters: + in_dim (`int`): Number of input channels. + out_dim (`int`): Number of output channels. + dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. + """ + + def __init__( + self, + in_dim: int, + out_dim: Optional[int] = None, + dropout: float = 0.0, + norm_num_groups: int = 32, + ): + super().__init__() + out_dim = out_dim or in_dim + self.in_dim = in_dim + self.out_dim = out_dim + + # conv layers + self.conv1 = nn.Sequential( + nn.GroupNorm(norm_num_groups, in_dim), + nn.SiLU(), + nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)), + ) + self.conv2 = nn.Sequential( + nn.GroupNorm(norm_num_groups, out_dim), + nn.SiLU(), + nn.Dropout(dropout), + nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), + ) + self.conv3 = nn.Sequential( + nn.GroupNorm(norm_num_groups, out_dim), + nn.SiLU(), + nn.Dropout(dropout), + nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), + ) + self.conv4 = nn.Sequential( + nn.GroupNorm(norm_num_groups, out_dim), + nn.SiLU(), + nn.Dropout(dropout), + nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), + ) + + # zero out the last layer params,so the conv block is identity + nn.init.zeros_(self.conv4[-1].weight) + nn.init.zeros_(self.conv4[-1].bias) + + def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor: + hidden_states = ( + hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4) + ) + + identity = hidden_states + hidden_states = self.conv1(hidden_states) + hidden_states = self.conv2(hidden_states) + hidden_states = self.conv3(hidden_states) + hidden_states = self.conv4(hidden_states) + + hidden_states = identity + hidden_states + + hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape( + (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:] + ) + return hidden_states + + +class TemporalResnetBlock(nn.Module): + r""" + A Resnet block. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv2d layer. If None, same as `in_channels`. + temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. + """ + + def __init__( + self, + in_channels: int, + out_channels: Optional[int] = None, + temb_channels: int = 512, + eps: float = 1e-6, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + + kernel_size = (3, 1, 1) + padding = [k // 2 for k in kernel_size] + + self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=eps, affine=True) + self.conv1 = nn.Conv3d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding, + ) + + if temb_channels is not None: + self.time_emb_proj = nn.Linear(temb_channels, out_channels) + else: + self.time_emb_proj = None + + self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=eps, affine=True) + + self.dropout = torch.nn.Dropout(0.0) + self.conv2 = nn.Conv3d( + out_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + padding=padding, + ) + + self.nonlinearity = get_activation("silu") + + self.use_in_shortcut = self.in_channels != out_channels + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = nn.Conv3d( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + ) + + def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv1(hidden_states) + + if self.time_emb_proj is not None: + temb = self.nonlinearity(temb) + temb = self.time_emb_proj(temb)[:, :, :, None, None] + temb = temb.permute(0, 2, 1, 3, 4) + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = input_tensor + hidden_states + + return output_tensor + + +# VideoResBlock +class SpatioTemporalResBlock(nn.Module): + r""" + A SpatioTemporal Resnet block. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv2d layer. If None, same as `in_channels`. + temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the spatial resenet. + temporal_eps (`float`, *optional*, defaults to `eps`): The epsilon to use for the temporal resnet. + merge_factor (`float`, *optional*, defaults to `0.5`): The merge factor to use for the temporal mixing. + merge_strategy (`str`, *optional*, defaults to `learned_with_images`): + The merge strategy to use for the temporal mixing. + switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): + If `True`, switch the spatial and temporal mixing. + """ + + def __init__( + self, + in_channels: int, + out_channels: Optional[int] = None, + temb_channels: int = 512, + eps: float = 1e-6, + temporal_eps: Optional[float] = None, + merge_factor: float = 0.5, + merge_strategy="learned_with_images", + switch_spatial_to_temporal_mix: bool = False, + ): + super().__init__() + + self.spatial_res_block = ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=eps, + ) + + self.temporal_res_block = TemporalResnetBlock( + in_channels=out_channels if out_channels is not None else in_channels, + out_channels=out_channels if out_channels is not None else in_channels, + temb_channels=temb_channels, + eps=temporal_eps if temporal_eps is not None else eps, + ) + + self.time_mixer = AlphaBlender( + alpha=merge_factor, + merge_strategy=merge_strategy, + switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix, + ) + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ): + num_frames = image_only_indicator.shape[-1] + hidden_states = self.spatial_res_block(hidden_states, temb) + + batch_frames, channels, height, width = hidden_states.shape + batch_size = batch_frames // num_frames + + hidden_states_mix = ( + hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) + ) + hidden_states = ( + hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) + ) + + if temb is not None: + temb = temb.reshape(batch_size, num_frames, -1) + + hidden_states = self.temporal_res_block(hidden_states, temb) + hidden_states = self.time_mixer( + x_spatial=hidden_states_mix, + x_temporal=hidden_states, + image_only_indicator=image_only_indicator, + ) + + hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) + return hidden_states + + +class AlphaBlender(nn.Module): + r""" + A module to blend spatial and temporal features. + + Parameters: + alpha (`float`): The initial value of the blending factor. + merge_strategy (`str`, *optional*, defaults to `learned_with_images`): + The merge strategy to use for the temporal mixing. + switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): + If `True`, switch the spatial and temporal mixing. + """ + + strategies = ["learned", "fixed", "learned_with_images"] + + def __init__( + self, + alpha: float, + merge_strategy: str = "learned_with_images", + switch_spatial_to_temporal_mix: bool = False, + ): + super().__init__() + self.merge_strategy = merge_strategy + self.switch_spatial_to_temporal_mix = switch_spatial_to_temporal_mix # For TemporalVAE + + if merge_strategy not in self.strategies: + raise ValueError(f"merge_strategy needs to be in {self.strategies}") + + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif self.merge_strategy == "learned" or self.merge_strategy == "learned_with_images": + self.register_parameter("mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))) + else: + raise ValueError(f"Unknown merge strategy {self.merge_strategy}") + + def get_alpha(self, image_only_indicator: torch.Tensor, ndims: int) -> torch.Tensor: + if self.merge_strategy == "fixed": + alpha = self.mix_factor + + elif self.merge_strategy == "learned": + alpha = torch.sigmoid(self.mix_factor) + + elif self.merge_strategy == "learned_with_images": + if image_only_indicator is None: + raise ValueError("Please provide image_only_indicator to use learned_with_images merge strategy") + + alpha = torch.where( + image_only_indicator.bool(), + torch.ones(1, 1, device=image_only_indicator.device), + torch.sigmoid(self.mix_factor)[..., None], + ) + + # (batch, channel, frames, height, width) + if ndims == 5: + alpha = alpha[:, None, :, None, None] + # (batch*frames, height*width, channels) + elif ndims == 3: + alpha = alpha.reshape(-1)[:, None, None] + else: + raise ValueError(f"Unexpected ndims {ndims}. Dimensions should be 3 or 5") + + else: + raise NotImplementedError + + return alpha + + def forward( + self, + x_spatial: torch.Tensor, + x_temporal: torch.Tensor, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + alpha = self.get_alpha(image_only_indicator, x_spatial.ndim) + alpha = alpha.to(x_spatial.dtype) + + if self.switch_spatial_to_temporal_mix: + alpha = 1.0 - alpha + + x = alpha * x_spatial + (1.0 - alpha) * x_temporal + return x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet_flax.py new file mode 100644 index 000000000..f8bb4788d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/resnet_flax.py @@ -0,0 +1,124 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import flax.linen as nn +import jax +import jax.numpy as jnp + + +class FlaxUpsample2D(nn.Module): + out_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.conv = nn.Conv( + self.out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + def __call__(self, hidden_states): + batch, height, width, channels = hidden_states.shape + hidden_states = jax.image.resize( + hidden_states, + shape=(batch, height * 2, width * 2, channels), + method="nearest", + ) + hidden_states = self.conv(hidden_states) + return hidden_states + + +class FlaxDownsample2D(nn.Module): + out_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.conv = nn.Conv( + self.out_channels, + kernel_size=(3, 3), + strides=(2, 2), + padding=((1, 1), (1, 1)), # padding="VALID", + dtype=self.dtype, + ) + + def __call__(self, hidden_states): + # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim + # hidden_states = jnp.pad(hidden_states, pad_width=pad) + hidden_states = self.conv(hidden_states) + return hidden_states + + +class FlaxResnetBlock2D(nn.Module): + in_channels: int + out_channels: int = None + dropout_prob: float = 0.0 + use_nin_shortcut: bool = None + dtype: jnp.dtype = jnp.float32 + + def setup(self): + out_channels = self.in_channels if self.out_channels is None else self.out_channels + + self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5) + self.conv1 = nn.Conv( + out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype) + + self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5) + self.dropout = nn.Dropout(self.dropout_prob) + self.conv2 = nn.Conv( + out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut + + self.conv_shortcut = None + if use_nin_shortcut: + self.conv_shortcut = nn.Conv( + out_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + + def __call__(self, hidden_states, temb, deterministic=True): + residual = hidden_states + hidden_states = self.norm1(hidden_states) + hidden_states = nn.swish(hidden_states) + hidden_states = self.conv1(hidden_states) + + temb = self.time_emb_proj(nn.swish(temb)) + temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1) + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = nn.swish(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + residual = self.conv_shortcut(residual) + + return hidden_states + residual diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/t5_film_transformer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/t5_film_transformer.py new file mode 100644 index 000000000..6aa5ff744 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/t5_film_transformer.py @@ -0,0 +1,70 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils import deprecate +from .transformers.t5_film_transformer import ( + DecoderLayer, + NewGELUActivation, + T5DenseGatedActDense, + T5FilmDecoder, + T5FiLMLayer, + T5LayerCrossAttention, + T5LayerFFCond, + T5LayerNorm, + T5LayerSelfAttentionCond, +) + + +class T5FilmDecoder(T5FilmDecoder): + deprecation_message = "Importing `T5FilmDecoder` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5FilmDecoder`, instead." + deprecate("T5FilmDecoder", "0.29", deprecation_message) + + +class DecoderLayer(DecoderLayer): + deprecation_message = "Importing `DecoderLayer` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import DecoderLayer`, instead." + deprecate("DecoderLayer", "0.29", deprecation_message) + + +class T5LayerSelfAttentionCond(T5LayerSelfAttentionCond): + deprecation_message = "Importing `T5LayerSelfAttentionCond` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerSelfAttentionCond`, instead." + deprecate("T5LayerSelfAttentionCond", "0.29", deprecation_message) + + +class T5LayerCrossAttention(T5LayerCrossAttention): + deprecation_message = "Importing `T5LayerCrossAttention` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerCrossAttention`, instead." + deprecate("T5LayerCrossAttention", "0.29", deprecation_message) + + +class T5LayerFFCond(T5LayerFFCond): + deprecation_message = "Importing `T5LayerFFCond` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerFFCond`, instead." + deprecate("T5LayerFFCond", "0.29", deprecation_message) + + +class T5DenseGatedActDense(T5DenseGatedActDense): + deprecation_message = "Importing `T5DenseGatedActDense` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5DenseGatedActDense`, instead." + deprecate("T5DenseGatedActDense", "0.29", deprecation_message) + + +class T5LayerNorm(T5LayerNorm): + deprecation_message = "Importing `T5LayerNorm` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerNorm`, instead." + deprecate("T5LayerNorm", "0.29", deprecation_message) + + +class NewGELUActivation(NewGELUActivation): + deprecation_message = "Importing `T5LayerNorm` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import NewGELUActivation`, instead." + deprecate("NewGELUActivation", "0.29", deprecation_message) + + +class T5FiLMLayer(T5FiLMLayer): + deprecation_message = "Importing `T5FiLMLayer` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5FiLMLayer`, instead." + deprecate("T5FiLMLayer", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_2d.py new file mode 100644 index 000000000..5d8ef1347 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_2d.py @@ -0,0 +1,25 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils import deprecate +from .transformers.transformer_2d import Transformer2DModel, Transformer2DModelOutput + + +class Transformer2DModelOutput(Transformer2DModelOutput): + deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.transformer_2d import Transformer2DModelOutput`, instead." + deprecate("Transformer2DModelOutput", "0.29", deprecation_message) + + +class Transformer2DModel(Transformer2DModel): + deprecation_message = "Importing `Transformer2DModel` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.transformer_2d import Transformer2DModel`, instead." + deprecate("Transformer2DModel", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_temporal.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_temporal.py new file mode 100644 index 000000000..83c7a8e67 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformer_temporal.py @@ -0,0 +1,34 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils import deprecate +from .transformers.transformer_temporal import ( + TransformerSpatioTemporalModel, + TransformerTemporalModel, + TransformerTemporalModelOutput, +) + + +class TransformerTemporalModelOutput(TransformerTemporalModelOutput): + deprecation_message = "Importing `TransformerTemporalModelOutput` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerTemporalModelOutput`, instead." + deprecate("TransformerTemporalModelOutput", "0.29", deprecation_message) + + +class TransformerTemporalModel(TransformerTemporalModel): + deprecation_message = "Importing `TransformerTemporalModel` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerTemporalModel`, instead." + deprecate("TransformerTemporalModel", "0.29", deprecation_message) + + +class TransformerSpatioTemporalModel(TransformerSpatioTemporalModel): + deprecation_message = "Importing `TransformerSpatioTemporalModel` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerSpatioTemporalModel`, instead." + deprecate("TransformerTemporalModelOutput", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/__init__.py new file mode 100644 index 000000000..dc78a72b2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/__init__.py @@ -0,0 +1,9 @@ +from ...utils import is_torch_available + + +if is_torch_available(): + from .dual_transformer_2d import DualTransformer2DModel + from .prior_transformer import PriorTransformer + from .t5_film_transformer import T5FilmDecoder + from .transformer_2d import Transformer2DModel + from .transformer_temporal import TransformerTemporalModel diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/dual_transformer_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/dual_transformer_2d.py new file mode 100644 index 000000000..96849bd28 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/dual_transformer_2d.py @@ -0,0 +1,155 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from torch import nn + +from .transformer_2d import Transformer2DModel, Transformer2DModelOutput + + +class DualTransformer2DModel(nn.Module): + """ + Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference. + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + Pass if the input is continuous. The number of channels in the input and output. + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. + sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. + Note that this is fixed at training time as it is used for learning a number of position embeddings. See + `ImagePositionalEmbeddings`. + num_vector_embeds (`int`, *optional*): + Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. + Includes the class for the masked latent pixel. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. + The number of diffusion steps used during training. Note that this is fixed at training time as it is used + to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for + up to but not more than steps than `num_embeds_ada_norm`. + attention_bias (`bool`, *optional*): + Configure if the TransformerBlocks' attention should contain a bias parameter. + """ + + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + ): + super().__init__() + self.transformers = nn.ModuleList( + [ + Transformer2DModel( + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + in_channels=in_channels, + num_layers=num_layers, + dropout=dropout, + norm_num_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + attention_bias=attention_bias, + sample_size=sample_size, + num_vector_embeds=num_vector_embeds, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + ) + for _ in range(2) + ] + ) + + # Variables that can be set by a pipeline: + + # The ratio of transformer1 to transformer2's output states to be combined during inference + self.mix_ratio = 0.5 + + # The shape of `encoder_hidden_states` is expected to be + # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` + self.condition_lengths = [77, 257] + + # Which transformer to use to encode which condition. + # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` + self.transformer_index_for_condition = [1, 0] + + def forward( + self, + hidden_states, + encoder_hidden_states, + timestep=None, + attention_mask=None, + cross_attention_kwargs=None, + return_dict: bool = True, + ): + """ + Args: + hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. + When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input + hidden_states. + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.long`, *optional*): + Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. + attention_mask (`torch.FloatTensor`, *optional*): + Optional attention mask to be applied in Attention. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. + + Returns: + [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: + [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + """ + input_states = hidden_states + + encoded_states = [] + tokens_start = 0 + # attention_mask is not used yet + for i in range(2): + # for each of the two transformers, pass the corresponding condition tokens + condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] + transformer_index = self.transformer_index_for_condition[i] + encoded_state = self.transformers[transformer_index]( + input_states, + encoder_hidden_states=condition_state, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + encoded_states.append(encoded_state - input_states) + tokens_start += self.condition_lengths[i] + + output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) + output_states = output_states + input_states + + if not return_dict: + return (output_states,) + + return Transformer2DModelOutput(sample=output_states) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/prior_transformer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/prior_transformer.py new file mode 100644 index 000000000..990eabe2c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/prior_transformer.py @@ -0,0 +1,380 @@ +from dataclasses import dataclass +from typing import Dict, Optional, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin +from ...utils import BaseOutput +from ..attention import BasicTransformerBlock +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin + + +@dataclass +class PriorTransformerOutput(BaseOutput): + """ + The output of [`PriorTransformer`]. + + Args: + predicted_image_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`): + The predicted CLIP image embedding conditioned on the CLIP text embedding input. + """ + + predicted_image_embedding: torch.FloatTensor + + +class PriorTransformer(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): + """ + A Prior Transformer model. + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. + num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use. + embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states` + num_embeddings (`int`, *optional*, defaults to 77): + The number of embeddings of the model input `hidden_states` + additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the + projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings + + additional_embeddings`. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + time_embed_act_fn (`str`, *optional*, defaults to 'silu'): + The activation function to use to create timestep embeddings. + norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before + passing to Transformer blocks. Set it to `None` if normalization is not needed. + embedding_proj_norm_type (`str`, *optional*, defaults to None): + The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not + needed. + encoder_hid_proj_type (`str`, *optional*, defaults to `linear`): + The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if + `encoder_hidden_states` is `None`. + added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model. + Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot + product between the text embedding and image embedding as proposed in the unclip paper + https://arxiv.org/abs/2204.06125 If it is `None`, no additional embeddings will be prepended. + time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings. + If None, will be set to `num_attention_heads * attention_head_dim` + embedding_proj_dim (`int`, *optional*, default to None): + The dimension of `proj_embedding`. If None, will be set to `embedding_dim`. + clip_embed_dim (`int`, *optional*, default to None): + The dimension of the output. If None, will be set to `embedding_dim`. + """ + + @register_to_config + def __init__( + self, + num_attention_heads: int = 32, + attention_head_dim: int = 64, + num_layers: int = 20, + embedding_dim: int = 768, + num_embeddings=77, + additional_embeddings=4, + dropout: float = 0.0, + time_embed_act_fn: str = "silu", + norm_in_type: Optional[str] = None, # layer + embedding_proj_norm_type: Optional[str] = None, # layer + encoder_hid_proj_type: Optional[str] = "linear", # linear + added_emb_type: Optional[str] = "prd", # prd + time_embed_dim: Optional[int] = None, + embedding_proj_dim: Optional[int] = None, + clip_embed_dim: Optional[int] = None, + ): + super().__init__() + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + self.additional_embeddings = additional_embeddings + + time_embed_dim = time_embed_dim or inner_dim + embedding_proj_dim = embedding_proj_dim or embedding_dim + clip_embed_dim = clip_embed_dim or embedding_dim + + self.time_proj = Timesteps(inner_dim, True, 0) + self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn) + + self.proj_in = nn.Linear(embedding_dim, inner_dim) + + if embedding_proj_norm_type is None: + self.embedding_proj_norm = None + elif embedding_proj_norm_type == "layer": + self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim) + else: + raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}") + + self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim) + + if encoder_hid_proj_type is None: + self.encoder_hidden_states_proj = None + elif encoder_hid_proj_type == "linear": + self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim) + else: + raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}") + + self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim)) + + if added_emb_type == "prd": + self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim)) + elif added_emb_type is None: + self.prd_embedding = None + else: + raise ValueError( + f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." + ) + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + activation_fn="gelu", + attention_bias=True, + ) + for d in range(num_layers) + ] + ) + + if norm_in_type == "layer": + self.norm_in = nn.LayerNorm(inner_dim) + elif norm_in_type is None: + self.norm_in = None + else: + raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.") + + self.norm_out = nn.LayerNorm(inner_dim) + + self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim) + + causal_attention_mask = torch.full( + [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0 + ) + causal_attention_mask.triu_(1) + causal_attention_mask = causal_attention_mask[None, ...] + self.register_buffer("causal_attention_mask", causal_attention_mask, persistent=False) + + self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim)) + self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim)) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def forward( + self, + hidden_states, + timestep: Union[torch.Tensor, float, int], + proj_embedding: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + return_dict: bool = True, + ): + """ + The [`PriorTransformer`] forward method. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`): + The currently predicted image embeddings. + timestep (`torch.LongTensor`): + Current denoising step. + proj_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`): + Projected embedding vector the denoising process is conditioned on. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_embeddings, embedding_dim)`): + Hidden states of the text embeddings the denoising process is conditioned on. + attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`): + Text mask for the text embeddings. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain + tuple. + + Returns: + [`~models.prior_transformer.PriorTransformerOutput`] or `tuple`: + If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + batch_size = hidden_states.shape[0] + + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device) + + timesteps_projected = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might be fp16, so we need to cast here. + timesteps_projected = timesteps_projected.to(dtype=self.dtype) + time_embeddings = self.time_embedding(timesteps_projected) + + if self.embedding_proj_norm is not None: + proj_embedding = self.embedding_proj_norm(proj_embedding) + + proj_embeddings = self.embedding_proj(proj_embedding) + if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: + encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states) + elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: + raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set") + + hidden_states = self.proj_in(hidden_states) + + positional_embeddings = self.positional_embedding.to(hidden_states.dtype) + + additional_embeds = [] + additional_embeddings_len = 0 + + if encoder_hidden_states is not None: + additional_embeds.append(encoder_hidden_states) + additional_embeddings_len += encoder_hidden_states.shape[1] + + if len(proj_embeddings.shape) == 2: + proj_embeddings = proj_embeddings[:, None, :] + + if len(hidden_states.shape) == 2: + hidden_states = hidden_states[:, None, :] + + additional_embeds = additional_embeds + [ + proj_embeddings, + time_embeddings[:, None, :], + hidden_states, + ] + + if self.prd_embedding is not None: + prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1) + additional_embeds.append(prd_embedding) + + hidden_states = torch.cat( + additional_embeds, + dim=1, + ) + + # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens + additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1 + if positional_embeddings.shape[1] < hidden_states.shape[1]: + positional_embeddings = F.pad( + positional_embeddings, + ( + 0, + 0, + additional_embeddings_len, + self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, + ), + value=0.0, + ) + + hidden_states = hidden_states + positional_embeddings + + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 + attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0) + attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) + attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0) + + if self.norm_in is not None: + hidden_states = self.norm_in(hidden_states) + + for block in self.transformer_blocks: + hidden_states = block(hidden_states, attention_mask=attention_mask) + + hidden_states = self.norm_out(hidden_states) + + if self.prd_embedding is not None: + hidden_states = hidden_states[:, -1] + else: + hidden_states = hidden_states[:, additional_embeddings_len:] + + predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states) + + if not return_dict: + return (predicted_image_embedding,) + + return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding) + + def post_process_latents(self, prior_latents): + prior_latents = (prior_latents * self.clip_std) + self.clip_mean + return prior_latents diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/t5_film_transformer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/t5_film_transformer.py new file mode 100644 index 000000000..bff98db02 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/t5_film_transformer.py @@ -0,0 +1,438 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Optional, Tuple + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ..attention_processor import Attention +from ..embeddings import get_timestep_embedding +from ..modeling_utils import ModelMixin + + +class T5FilmDecoder(ModelMixin, ConfigMixin): + r""" + T5 style decoder with FiLM conditioning. + + Args: + input_dims (`int`, *optional*, defaults to `128`): + The number of input dimensions. + targets_length (`int`, *optional*, defaults to `256`): + The length of the targets. + d_model (`int`, *optional*, defaults to `768`): + Size of the input hidden states. + num_layers (`int`, *optional*, defaults to `12`): + The number of `DecoderLayer`'s to use. + num_heads (`int`, *optional*, defaults to `12`): + The number of attention heads to use. + d_kv (`int`, *optional*, defaults to `64`): + Size of the key-value projection vectors. + d_ff (`int`, *optional*, defaults to `2048`): + The number of dimensions in the intermediate feed-forward layer of `DecoderLayer`'s. + dropout_rate (`float`, *optional*, defaults to `0.1`): + Dropout probability. + """ + + @register_to_config + def __init__( + self, + input_dims: int = 128, + targets_length: int = 256, + max_decoder_noise_time: float = 2000.0, + d_model: int = 768, + num_layers: int = 12, + num_heads: int = 12, + d_kv: int = 64, + d_ff: int = 2048, + dropout_rate: float = 0.1, + ): + super().__init__() + + self.conditioning_emb = nn.Sequential( + nn.Linear(d_model, d_model * 4, bias=False), + nn.SiLU(), + nn.Linear(d_model * 4, d_model * 4, bias=False), + nn.SiLU(), + ) + + self.position_encoding = nn.Embedding(targets_length, d_model) + self.position_encoding.weight.requires_grad = False + + self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False) + + self.dropout = nn.Dropout(p=dropout_rate) + + self.decoders = nn.ModuleList() + for lyr_num in range(num_layers): + # FiLM conditional T5 decoder + lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate) + self.decoders.append(lyr) + + self.decoder_norm = T5LayerNorm(d_model) + + self.post_dropout = nn.Dropout(p=dropout_rate) + self.spec_out = nn.Linear(d_model, input_dims, bias=False) + + def encoder_decoder_mask(self, query_input: torch.FloatTensor, key_input: torch.FloatTensor) -> torch.FloatTensor: + mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2)) + return mask.unsqueeze(-3) + + def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time): + batch, _, _ = decoder_input_tokens.shape + assert decoder_noise_time.shape == (batch,) + + # decoder_noise_time is in [0, 1), so rescale to expected timing range. + time_steps = get_timestep_embedding( + decoder_noise_time * self.config.max_decoder_noise_time, + embedding_dim=self.config.d_model, + max_period=self.config.max_decoder_noise_time, + ).to(dtype=self.dtype) + + conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1) + + assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) + + seq_length = decoder_input_tokens.shape[1] + + # If we want to use relative positions for audio context, we can just offset + # this sequence by the length of encodings_and_masks. + decoder_positions = torch.broadcast_to( + torch.arange(seq_length, device=decoder_input_tokens.device), + (batch, seq_length), + ) + + position_encodings = self.position_encoding(decoder_positions) + + inputs = self.continuous_inputs_projection(decoder_input_tokens) + inputs += position_encodings + y = self.dropout(inputs) + + # decoder: No padding present. + decoder_mask = torch.ones( + decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype + ) + + # Translate encoding masks to encoder-decoder masks. + encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for x, y in encodings_and_masks] + + # cross attend style: concat encodings + encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1) + encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1) + + for lyr in self.decoders: + y = lyr( + y, + conditioning_emb=conditioning_emb, + encoder_hidden_states=encoded, + encoder_attention_mask=encoder_decoder_mask, + )[0] + + y = self.decoder_norm(y) + y = self.post_dropout(y) + + spec_out = self.spec_out(y) + return spec_out + + +class DecoderLayer(nn.Module): + r""" + T5 decoder layer. + + Args: + d_model (`int`): + Size of the input hidden states. + d_kv (`int`): + Size of the key-value projection vectors. + num_heads (`int`): + Number of attention heads. + d_ff (`int`): + Size of the intermediate feed-forward layer. + dropout_rate (`float`): + Dropout probability. + layer_norm_epsilon (`float`, *optional*, defaults to `1e-6`): + A small value used for numerical stability to avoid dividing by zero. + """ + + def __init__( + self, d_model: int, d_kv: int, num_heads: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float = 1e-6 + ): + super().__init__() + self.layer = nn.ModuleList() + + # cond self attention: layer 0 + self.layer.append( + T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate) + ) + + # cross attention: layer 1 + self.layer.append( + T5LayerCrossAttention( + d_model=d_model, + d_kv=d_kv, + num_heads=num_heads, + dropout_rate=dropout_rate, + layer_norm_epsilon=layer_norm_epsilon, + ) + ) + + # Film Cond MLP + dropout: last layer + self.layer.append( + T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon) + ) + + def forward( + self, + hidden_states: torch.FloatTensor, + conditioning_emb: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + encoder_decoder_position_bias=None, + ) -> Tuple[torch.FloatTensor]: + hidden_states = self.layer[0]( + hidden_states, + conditioning_emb=conditioning_emb, + attention_mask=attention_mask, + ) + + if encoder_hidden_states is not None: + encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -1e10).to( + encoder_hidden_states.dtype + ) + + hidden_states = self.layer[1]( + hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_extended_attention_mask, + ) + + # Apply Film Conditional Feed Forward layer + hidden_states = self.layer[-1](hidden_states, conditioning_emb) + + return (hidden_states,) + + +class T5LayerSelfAttentionCond(nn.Module): + r""" + T5 style self-attention layer with conditioning. + + Args: + d_model (`int`): + Size of the input hidden states. + d_kv (`int`): + Size of the key-value projection vectors. + num_heads (`int`): + Number of attention heads. + dropout_rate (`float`): + Dropout probability. + """ + + def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float): + super().__init__() + self.layer_norm = T5LayerNorm(d_model) + self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) + self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) + self.dropout = nn.Dropout(dropout_rate) + + def forward( + self, + hidden_states: torch.FloatTensor, + conditioning_emb: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + # pre_self_attention_layer_norm + normed_hidden_states = self.layer_norm(hidden_states) + + if conditioning_emb is not None: + normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb) + + # Self-attention block + attention_output = self.attention(normed_hidden_states) + + hidden_states = hidden_states + self.dropout(attention_output) + + return hidden_states + + +class T5LayerCrossAttention(nn.Module): + r""" + T5 style cross-attention layer. + + Args: + d_model (`int`): + Size of the input hidden states. + d_kv (`int`): + Size of the key-value projection vectors. + num_heads (`int`): + Number of attention heads. + dropout_rate (`float`): + Dropout probability. + layer_norm_epsilon (`float`): + A small value used for numerical stability to avoid dividing by zero. + """ + + def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float, layer_norm_epsilon: float): + super().__init__() + self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) + self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) + self.dropout = nn.Dropout(dropout_rate) + + def forward( + self, + hidden_states: torch.FloatTensor, + key_value_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + normed_hidden_states = self.layer_norm(hidden_states) + attention_output = self.attention( + normed_hidden_states, + encoder_hidden_states=key_value_states, + attention_mask=attention_mask.squeeze(1), + ) + layer_output = hidden_states + self.dropout(attention_output) + return layer_output + + +class T5LayerFFCond(nn.Module): + r""" + T5 style feed-forward conditional layer. + + Args: + d_model (`int`): + Size of the input hidden states. + d_ff (`int`): + Size of the intermediate feed-forward layer. + dropout_rate (`float`): + Dropout probability. + layer_norm_epsilon (`float`): + A small value used for numerical stability to avoid dividing by zero. + """ + + def __init__(self, d_model: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float): + super().__init__() + self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate) + self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) + self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) + self.dropout = nn.Dropout(dropout_rate) + + def forward( + self, hidden_states: torch.FloatTensor, conditioning_emb: Optional[torch.FloatTensor] = None + ) -> torch.FloatTensor: + forwarded_states = self.layer_norm(hidden_states) + if conditioning_emb is not None: + forwarded_states = self.film(forwarded_states, conditioning_emb) + + forwarded_states = self.DenseReluDense(forwarded_states) + hidden_states = hidden_states + self.dropout(forwarded_states) + return hidden_states + + +class T5DenseGatedActDense(nn.Module): + r""" + T5 style feed-forward layer with gated activations and dropout. + + Args: + d_model (`int`): + Size of the input hidden states. + d_ff (`int`): + Size of the intermediate feed-forward layer. + dropout_rate (`float`): + Dropout probability. + """ + + def __init__(self, d_model: int, d_ff: int, dropout_rate: float): + super().__init__() + self.wi_0 = nn.Linear(d_model, d_ff, bias=False) + self.wi_1 = nn.Linear(d_model, d_ff, bias=False) + self.wo = nn.Linear(d_ff, d_model, bias=False) + self.dropout = nn.Dropout(dropout_rate) + self.act = NewGELUActivation() + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_gelu = self.act(self.wi_0(hidden_states)) + hidden_linear = self.wi_1(hidden_states) + hidden_states = hidden_gelu * hidden_linear + hidden_states = self.dropout(hidden_states) + + hidden_states = self.wo(hidden_states) + return hidden_states + + +class T5LayerNorm(nn.Module): + r""" + T5 style layer normalization module. + + Args: + hidden_size (`int`): + Size of the input hidden states. + eps (`float`, `optional`, defaults to `1e-6`): + A small value used for numerical stability to avoid dividing by zero. + """ + + def __init__(self, hidden_size: int, eps: float = 1e-6): + """ + Construct a layernorm module in the T5 style. No bias and no subtraction of mean. + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean + # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated + # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for + # half-precision inputs is done in fp32 + + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +class NewGELUActivation(nn.Module): + """ + Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see + the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 + """ + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0)))) + + +class T5FiLMLayer(nn.Module): + """ + T5 style FiLM Layer. + + Args: + in_features (`int`): + Number of input features. + out_features (`int`): + Number of output features. + """ + + def __init__(self, in_features: int, out_features: int): + super().__init__() + self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False) + + def forward(self, x: torch.FloatTensor, conditioning_emb: torch.FloatTensor) -> torch.FloatTensor: + emb = self.scale_bias(conditioning_emb) + scale, shift = torch.chunk(emb, 2, -1) + x = x * (1 + scale) + shift + return x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_2d.py new file mode 100644 index 000000000..63df0dbbc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_2d.py @@ -0,0 +1,460 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Any, Dict, Optional +import os + +import torch +import torch.nn.functional as F +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput, deprecate, is_torch_version, logging +from ..attention import BasicTransformerBlock +from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection +from ..modeling_utils import ModelMixin +from ..normalization import AdaLayerNormSingle +from ..nhwc_groupnorm.custom_gn import GN_NHWC + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class Transformer2DModelOutput(BaseOutput): + """ + The output of [`Transformer2DModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): + The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability + distributions for the unnoised latent pixels. + """ + + sample: torch.FloatTensor + + +class Transformer2DModel(ModelMixin, ConfigMixin): + """ + A 2D Transformer model for image-like data. + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + The number of channels in the input and output (specify if the input is **continuous**). + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. + sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). + This is fixed during training since it is used to learn a number of position embeddings. + num_vector_embeds (`int`, *optional*): + The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). + Includes the class for the masked latent pixel. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): + The number of diffusion steps used during training. Pass if at least one of the norm_layers is + `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are + added to the hidden states. + + During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. + attention_bias (`bool`, *optional*): + Configure if the `TransformerBlocks` attention should contain a bias parameter. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + patch_size: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' + norm_elementwise_affine: bool = True, + norm_eps: float = 1e-5, + attention_type: str = "default", + caption_channels: int = None, + interpolation_scale: float = None, + ): + super().__init__() + if patch_size is not None: + if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]: + raise NotImplementedError( + f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." + ) + elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None: + raise ValueError( + f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." + ) + + self.use_linear_projection = use_linear_projection + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + + conv_cls = nn.Conv2d + linear_cls = nn.Linear + + # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` + # Define whether input is continuous or discrete depending on configuration + self.is_input_continuous = (in_channels is not None) and (patch_size is None) + self.is_input_vectorized = num_vector_embeds is not None + self.is_input_patches = in_channels is not None and patch_size is not None + + if norm_type == "layer_norm" and num_embeds_ada_norm is not None: + deprecation_message = ( + f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" + " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." + " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" + " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" + " would be very nice if you could open a Pull request for the `transformer/config.json` file" + ) + deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) + norm_type = "ada_norm" + + if self.is_input_continuous and self.is_input_vectorized: + raise ValueError( + f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" + " sure that either `in_channels` or `num_vector_embeds` is None." + ) + elif self.is_input_vectorized and self.is_input_patches: + raise ValueError( + f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" + " sure that either `num_vector_embeds` or `num_patches` is None." + ) + elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: + raise ValueError( + f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" + f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." + ) + + # 2. Define input layers + if self.is_input_continuous: + self.in_channels = in_channels + if int(os.environ.get("USE_NHWC_GN", 0)): + self.norm = GN_NHWC(norm_num_groups, in_channels, activation="identity") + else: + self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) + if use_linear_projection: + self.proj_in = linear_cls(in_channels, inner_dim) + else: + self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) + elif self.is_input_vectorized: + assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" + assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" + + self.height = sample_size + self.width = sample_size + self.num_vector_embeds = num_vector_embeds + self.num_latent_pixels = self.height * self.width + + self.latent_image_embedding = ImagePositionalEmbeddings( + num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width + ) + elif self.is_input_patches: + assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size" + + self.height = sample_size + self.width = sample_size + + self.patch_size = patch_size + interpolation_scale = ( + interpolation_scale if interpolation_scale is not None else max(self.config.sample_size // 64, 1) + ) + self.pos_embed = PatchEmbed( + height=sample_size, + width=sample_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=inner_dim, + interpolation_scale=interpolation_scale, + ) + + # 3. Define transformers blocks + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + double_self_attention=double_self_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + norm_elementwise_affine=norm_elementwise_affine, + norm_eps=norm_eps, + attention_type=attention_type, + ) + for d in range(num_layers) + ] + ) + + # 4. Define output layers + self.out_channels = in_channels if out_channels is None else out_channels + if self.is_input_continuous: + # TODO: should use out_channels for continuous projections + if use_linear_projection: + self.proj_out = linear_cls(inner_dim, in_channels) + else: + self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + elif self.is_input_vectorized: + self.norm_out = nn.LayerNorm(inner_dim) + self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1) + elif self.is_input_patches and norm_type != "ada_norm_single": + self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) + self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim) + self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) + elif self.is_input_patches and norm_type == "ada_norm_single": + self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) + self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) + self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) + + # 5. PixArt-Alpha blocks. + self.adaln_single = None + self.use_additional_conditions = False + if norm_type == "ada_norm_single": + self.use_additional_conditions = self.config.sample_size == 128 + # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use + # additional conditions until we find better name + self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions) + + self.caption_projection = None + if caption_channels is not None: + self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) + + self.gradient_checkpointing = False + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + timestep: Optional[torch.LongTensor] = None, + added_cond_kwargs: Dict[str, torch.Tensor] = None, + class_labels: Optional[torch.LongTensor] = None, + cross_attention_kwargs: Dict[str, Any] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + ): + """ + The [`Transformer2DModel`] forward method. + + Args: + hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): + Input `hidden_states`. + encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.LongTensor`, *optional*): + Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. + class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): + Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in + `AdaLayerZeroNorm`. + cross_attention_kwargs ( `Dict[str, Any]`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + attention_mask ( `torch.Tensor`, *optional*): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + encoder_attention_mask ( `torch.Tensor`, *optional*): + Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: + + * Mask `(batch, sequence_length)` True = keep, False = discard. + * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. + + If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format + above. This bias will be added to the cross-attention scores. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. + # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. + # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None and attention_mask.ndim == 2: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: + encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 1. Input + if self.is_input_continuous: + batch, _, height, width = hidden_states.shape + residual = hidden_states + + hidden_states = self.norm(hidden_states) + if not self.use_linear_projection: + hidden_states = self.proj_in(hidden_states) + inner_dim = hidden_states.shape[1] + hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) + else: + inner_dim = hidden_states.shape[1] + hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) + hidden_states = self.proj_in(hidden_states) + + elif self.is_input_vectorized: + hidden_states = self.latent_image_embedding(hidden_states) + elif self.is_input_patches: + height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size + hidden_states = self.pos_embed(hidden_states) + + if self.adaln_single is not None: + if self.use_additional_conditions and added_cond_kwargs is None: + raise ValueError( + "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." + ) + batch_size = hidden_states.shape[0] + timestep, embedded_timestep = self.adaln_single( + timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype + ) + + # 2. Blocks + if self.caption_projection is not None: + batch_size = hidden_states.shape[0] + encoder_hidden_states = self.caption_projection(encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) + + for block in self.transformer_blocks: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + timestep, + cross_attention_kwargs, + class_labels, + **ckpt_kwargs, + ) + else: + hidden_states = block( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + + # 3. Output + if self.is_input_continuous: + if not self.use_linear_projection: + hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() + hidden_states = self.proj_out(hidden_states) + else: + hidden_states = self.proj_out(hidden_states) + hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2) + + output = hidden_states + residual + elif self.is_input_vectorized: + hidden_states = self.norm_out(hidden_states) + logits = self.out(hidden_states) + # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) + logits = logits.permute(0, 2, 1) + + # log(p(x_0)) + output = F.log_softmax(logits.double(), dim=1).float() + + if self.is_input_patches: + if self.config.norm_type != "ada_norm_single": + conditioning = self.transformer_blocks[0].norm1.emb( + timestep, class_labels, hidden_dtype=hidden_states.dtype + ) + shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) + hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] + hidden_states = self.proj_out_2(hidden_states) + elif self.config.norm_type == "ada_norm_single": + shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) + hidden_states = self.norm_out(hidden_states) + # Modulation + hidden_states = hidden_states * (1 + scale) + shift + hidden_states = self.proj_out(hidden_states) + hidden_states = hidden_states.squeeze(1) + + # unpatchify + if self.adaln_single is None: + height = width = int(hidden_states.shape[1] ** 0.5) + hidden_states = hidden_states.reshape( + shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) + ) + hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) + output = hidden_states.reshape( + shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) + ) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_temporal.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_temporal.py new file mode 100644 index 000000000..9c61eaee2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/transformers/transformer_temporal.py @@ -0,0 +1,379 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Any, Dict, Optional + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ..attention import BasicTransformerBlock, TemporalBasicTransformerBlock +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from ..resnet import AlphaBlender + + +@dataclass +class TransformerTemporalModelOutput(BaseOutput): + """ + The output of [`TransformerTemporalModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): + The hidden states output conditioned on `encoder_hidden_states` input. + """ + + sample: torch.FloatTensor + + +class TransformerTemporalModel(ModelMixin, ConfigMixin): + """ + A Transformer model for video-like data. + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + The number of channels in the input and output (specify if the input is **continuous**). + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. + attention_bias (`bool`, *optional*): + Configure if the `TransformerBlock` attention should contain a bias parameter. + sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). + This is fixed during training since it is used to learn a number of position embeddings. + activation_fn (`str`, *optional*, defaults to `"geglu"`): + Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported + activation functions. + norm_elementwise_affine (`bool`, *optional*): + Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. + double_self_attention (`bool`, *optional*): + Configure if each `TransformerBlock` should contain two self-attention layers. + positional_embeddings: (`str`, *optional*): + The type of positional embeddings to apply to the sequence input before passing use. + num_positional_embeddings: (`int`, *optional*): + The maximum length of the sequence over which to apply positional embeddings. + """ + + @register_to_config + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + activation_fn: str = "geglu", + norm_elementwise_affine: bool = True, + double_self_attention: bool = True, + positional_embeddings: Optional[str] = None, + num_positional_embeddings: Optional[int] = None, + ): + super().__init__() + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + + self.in_channels = in_channels + + self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) + self.proj_in = nn.Linear(in_channels, inner_dim) + + # 3. Define transformers blocks + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + attention_bias=attention_bias, + double_self_attention=double_self_attention, + norm_elementwise_affine=norm_elementwise_affine, + positional_embeddings=positional_embeddings, + num_positional_embeddings=num_positional_embeddings, + ) + for d in range(num_layers) + ] + ) + + self.proj_out = nn.Linear(inner_dim, in_channels) + + def forward( + self, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.LongTensor] = None, + timestep: Optional[torch.LongTensor] = None, + class_labels: torch.LongTensor = None, + num_frames: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, + ) -> TransformerTemporalModelOutput: + """ + The [`TransformerTemporal`] forward method. + + Args: + hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): + Input hidden_states. + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.LongTensor`, *optional*): + Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. + class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): + Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in + `AdaLayerZeroNorm`. + num_frames (`int`, *optional*, defaults to 1): + The number of frames to be processed per batch. This is used to reshape the hidden states. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + + Returns: + [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: + If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is + returned, otherwise a `tuple` where the first element is the sample tensor. + """ + # 1. Input + batch_frames, channel, height, width = hidden_states.shape + batch_size = batch_frames // num_frames + + residual = hidden_states + + hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) + hidden_states = hidden_states.permute(0, 2, 1, 3, 4) + + hidden_states = self.norm(hidden_states) + hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) + + hidden_states = self.proj_in(hidden_states) + + # 2. Blocks + for block in self.transformer_blocks: + hidden_states = block( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + + # 3. Output + hidden_states = self.proj_out(hidden_states) + hidden_states = ( + hidden_states[None, None, :] + .reshape(batch_size, height, width, num_frames, channel) + .permute(0, 3, 4, 1, 2) + .contiguous() + ) + hidden_states = hidden_states.reshape(batch_frames, channel, height, width) + + output = hidden_states + residual + + if not return_dict: + return (output,) + + return TransformerTemporalModelOutput(sample=output) + + +class TransformerSpatioTemporalModel(nn.Module): + """ + A Transformer model for video-like data. + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + The number of channels in the input and output (specify if the input is **continuous**). + out_channels (`int`, *optional*): + The number of channels in the output (specify if the input is **continuous**). + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. + """ + + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: int = 320, + out_channels: Optional[int] = None, + num_layers: int = 1, + cross_attention_dim: Optional[int] = None, + ): + super().__init__() + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + + inner_dim = num_attention_heads * attention_head_dim + self.inner_dim = inner_dim + + # 2. Define input layers + self.in_channels = in_channels + self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6) + self.proj_in = nn.Linear(in_channels, inner_dim) + + # 3. Define transformers blocks + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + num_attention_heads, + attention_head_dim, + cross_attention_dim=cross_attention_dim, + ) + for d in range(num_layers) + ] + ) + + time_mix_inner_dim = inner_dim + self.temporal_transformer_blocks = nn.ModuleList( + [ + TemporalBasicTransformerBlock( + inner_dim, + time_mix_inner_dim, + num_attention_heads, + attention_head_dim, + cross_attention_dim=cross_attention_dim, + ) + for _ in range(num_layers) + ] + ) + + time_embed_dim = in_channels * 4 + self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels) + self.time_proj = Timesteps(in_channels, True, 0) + self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy="learned_with_images") + + # 4. Define output layers + self.out_channels = in_channels if out_channels is None else out_channels + # TODO: should use out_channels for continuous projections + self.proj_out = nn.Linear(inner_dim, in_channels) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + return_dict: bool = True, + ): + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): + Input hidden_states. + num_frames (`int`): + The number of frames to be processed per batch. This is used to reshape the hidden states. + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + image_only_indicator (`torch.LongTensor` of shape `(batch size, num_frames)`, *optional*): + A tensor indicating whether the input contains only images. 1 indicates that the input contains only + images, 0 indicates that the input contains video frames. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain + tuple. + + Returns: + [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: + If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is + returned, otherwise a `tuple` where the first element is the sample tensor. + """ + # 1. Input + batch_frames, _, height, width = hidden_states.shape + num_frames = image_only_indicator.shape[-1] + batch_size = batch_frames // num_frames + + time_context = encoder_hidden_states + time_context_first_timestep = time_context[None, :].reshape( + batch_size, num_frames, -1, time_context.shape[-1] + )[:, 0] + time_context = time_context_first_timestep[None, :].broadcast_to( + height * width, batch_size, 1, time_context.shape[-1] + ) + time_context = time_context.reshape(height * width * batch_size, 1, time_context.shape[-1]) + + residual = hidden_states + + hidden_states = self.norm(hidden_states) + inner_dim = hidden_states.shape[1] + hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim) + hidden_states = self.proj_in(hidden_states) + + num_frames_emb = torch.arange(num_frames, device=hidden_states.device) + num_frames_emb = num_frames_emb.repeat(batch_size, 1) + num_frames_emb = num_frames_emb.reshape(-1) + t_emb = self.time_proj(num_frames_emb) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=hidden_states.dtype) + + emb = self.time_pos_embed(t_emb) + emb = emb[:, None, :] + + # 2. Blocks + for block, temporal_block in zip(self.transformer_blocks, self.temporal_transformer_blocks): + if self.training and self.gradient_checkpointing: + hidden_states = torch.utils.checkpoint.checkpoint( + block, + hidden_states, + None, + encoder_hidden_states, + None, + use_reentrant=False, + ) + else: + hidden_states = block( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + ) + + hidden_states_mix = hidden_states + hidden_states_mix = hidden_states_mix + emb + + hidden_states_mix = temporal_block( + hidden_states_mix, + num_frames=num_frames, + encoder_hidden_states=time_context, + ) + hidden_states = self.time_mixer( + x_spatial=hidden_states, + x_temporal=hidden_states_mix, + image_only_indicator=image_only_indicator, + ) + + # 3. Output + hidden_states = self.proj_out(hidden_states) + hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() + + output = hidden_states + residual + + if not return_dict: + return (output,) + + return TransformerTemporalModelOutput(sample=output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d.py new file mode 100644 index 000000000..e857c90ca --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d.py @@ -0,0 +1,26 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..utils import deprecate +from .unets.unet_1d import UNet1DModel, UNet1DOutput + + +class UNet1DOutput(UNet1DOutput): + deprecation_message = "Importing `UNet1DOutput` from `diffusers.models.unet_1d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d import UNet1DOutput`, instead." + deprecate("UNet1DOutput", "0.29", deprecation_message) + + +class UNet1DModel(UNet1DModel): + deprecation_message = "Importing `UNet1DModel` from `diffusers.models.unet_1d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d import UNet1DModel`, instead." + deprecate("UNet1DModel", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d_blocks.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d_blocks.py new file mode 100644 index 000000000..6b0f09457 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_1d_blocks.py @@ -0,0 +1,203 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..utils import deprecate +from .unets.unet_1d_blocks import ( + AttnDownBlock1D, + AttnUpBlock1D, + DownBlock1D, + DownBlock1DNoSkip, + DownResnetBlock1D, + Downsample1d, + MidResTemporalBlock1D, + OutConv1DBlock, + OutValueFunctionBlock, + ResConvBlock, + SelfAttention1d, + UNetMidBlock1D, + UpBlock1D, + UpBlock1DNoSkip, + UpResnetBlock1D, + Upsample1d, + ValueFunctionMidBlock1D, +) + + +class DownResnetBlock1D(DownResnetBlock1D): + deprecation_message = "Importing `DownResnetBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownResnetBlock1D`, instead." + deprecate("DownResnetBlock1D", "0.29", deprecation_message) + + +class UpResnetBlock1D(UpResnetBlock1D): + deprecation_message = "Importing `UpResnetBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpResnetBlock1D`, instead." + deprecate("UpResnetBlock1D", "0.29", deprecation_message) + + +class ValueFunctionMidBlock1D(ValueFunctionMidBlock1D): + deprecation_message = "Importing `ValueFunctionMidBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import ValueFunctionMidBlock1D`, instead." + deprecate("ValueFunctionMidBlock1D", "0.29", deprecation_message) + + +class OutConv1DBlock(OutConv1DBlock): + deprecation_message = "Importing `OutConv1DBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import OutConv1DBlock`, instead." + deprecate("OutConv1DBlock", "0.29", deprecation_message) + + +class OutValueFunctionBlock(OutValueFunctionBlock): + deprecation_message = "Importing `OutValueFunctionBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import OutValueFunctionBlock`, instead." + deprecate("OutValueFunctionBlock", "0.29", deprecation_message) + + +class Downsample1d(Downsample1d): + deprecation_message = "Importing `Downsample1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import Downsample1d`, instead." + deprecate("Downsample1d", "0.29", deprecation_message) + + +class Upsample1d(Upsample1d): + deprecation_message = "Importing `Upsample1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import Upsample1d`, instead." + deprecate("Upsample1d", "0.29", deprecation_message) + + +class SelfAttention1d(SelfAttention1d): + deprecation_message = "Importing `SelfAttention1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import SelfAttention1d`, instead." + deprecate("SelfAttention1d", "0.29", deprecation_message) + + +class ResConvBlock(ResConvBlock): + deprecation_message = "Importing `ResConvBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import ResConvBlock`, instead." + deprecate("ResConvBlock", "0.29", deprecation_message) + + +class UNetMidBlock1D(UNetMidBlock1D): + deprecation_message = "Importing `UNetMidBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UNetMidBlock1D`, instead." + deprecate("UNetMidBlock1D", "0.29", deprecation_message) + + +class AttnDownBlock1D(AttnDownBlock1D): + deprecation_message = "Importing `AttnDownBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import AttnDownBlock1D`, instead." + deprecate("AttnDownBlock1D", "0.29", deprecation_message) + + +class DownBlock1D(DownBlock1D): + deprecation_message = "Importing `DownBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownBlock1D`, instead." + deprecate("DownBlock1D", "0.29", deprecation_message) + + +class DownBlock1DNoSkip(DownBlock1DNoSkip): + deprecation_message = "Importing `DownBlock1DNoSkip` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownBlock1DNoSkip`, instead." + deprecate("DownBlock1DNoSkip", "0.29", deprecation_message) + + +class AttnUpBlock1D(AttnUpBlock1D): + deprecation_message = "Importing `AttnUpBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import AttnUpBlock1D`, instead." + deprecate("AttnUpBlock1D", "0.29", deprecation_message) + + +class UpBlock1D(UpBlock1D): + deprecation_message = "Importing `UpBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpBlock1D`, instead." + deprecate("UpBlock1D", "0.29", deprecation_message) + + +class UpBlock1DNoSkip(UpBlock1DNoSkip): + deprecation_message = "Importing `UpBlock1DNoSkip` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpBlock1DNoSkip`, instead." + deprecate("UpBlock1DNoSkip", "0.29", deprecation_message) + + +class MidResTemporalBlock1D(MidResTemporalBlock1D): + deprecation_message = "Importing `MidResTemporalBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import MidResTemporalBlock1D`, instead." + deprecate("MidResTemporalBlock1D", "0.29", deprecation_message) + + +def get_down_block( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, +): + deprecation_message = "Importing `get_down_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_down_block`, instead." + deprecate("get_down_block", "0.29", deprecation_message) + + from .unets.unet_1d_blocks import get_down_block + + return get_down_block( + down_block_type=down_block_type, + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + ) + + +def get_up_block( + up_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_upsample: bool +): + deprecation_message = "Importing `get_up_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_up_block`, instead." + deprecate("get_up_block", "0.29", deprecation_message) + + from .unets.unet_1d_blocks import get_up_block + + return get_up_block( + up_block_type=up_block_type, + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_upsample=add_upsample, + ) + + +def get_mid_block( + mid_block_type: str, + num_layers: int, + in_channels: int, + mid_channels: int, + out_channels: int, + embed_dim: int, + add_downsample: bool, +): + deprecation_message = "Importing `get_mid_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_mid_block`, instead." + deprecate("get_mid_block", "0.29", deprecation_message) + + from .unets.unet_1d_blocks import get_mid_block + + return get_mid_block( + mid_block_type=mid_block_type, + num_layers=num_layers, + in_channels=in_channels, + mid_channels=mid_channels, + out_channels=out_channels, + embed_dim=embed_dim, + add_downsample=add_downsample, + ) + + +def get_out_block( + *, out_block_type: str, num_groups_out: int, embed_dim: int, out_channels: int, act_fn: str, fc_dim: int +): + deprecation_message = "Importing `get_out_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_out_block`, instead." + deprecate("get_out_block", "0.29", deprecation_message) + + from .unets.unet_1d_blocks import get_out_block + + return get_out_block( + out_block_type=out_block_type, + num_groups_out=num_groups_out, + embed_dim=embed_dim, + out_channels=out_channels, + act_fn=act_fn, + fc_dim=fc_dim, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d.py new file mode 100644 index 000000000..21f1fea68 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d.py @@ -0,0 +1,27 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from ..utils import deprecate +from .unets.unet_2d import UNet2DModel, UNet2DOutput + + +class UNet2DOutput(UNet2DOutput): + deprecation_message = "Importing `UNet2DOutput` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DOutput`, instead." + deprecate("UNet2DOutput", "0.29", deprecation_message) + + +class UNet2DModel(UNet2DModel): + deprecation_message = "Importing `UNet2DModel` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DModel`, instead." + deprecate("UNet2DModel", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_blocks.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_blocks.py new file mode 100644 index 000000000..931fa89a7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_blocks.py @@ -0,0 +1,375 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from ..utils import deprecate +from .unets.unet_2d_blocks import ( + AttnDownBlock2D, + AttnDownEncoderBlock2D, + AttnSkipDownBlock2D, + AttnSkipUpBlock2D, + AttnUpBlock2D, + AttnUpDecoderBlock2D, + AutoencoderTinyBlock, + CrossAttnDownBlock2D, + CrossAttnUpBlock2D, + DownBlock2D, + KAttentionBlock, + KCrossAttnDownBlock2D, + KCrossAttnUpBlock2D, + KDownBlock2D, + KUpBlock2D, + ResnetDownsampleBlock2D, + ResnetUpsampleBlock2D, + SimpleCrossAttnDownBlock2D, + SimpleCrossAttnUpBlock2D, + SkipDownBlock2D, + SkipUpBlock2D, + UNetMidBlock2D, + UNetMidBlock2DCrossAttn, + UNetMidBlock2DSimpleCrossAttn, + UpBlock2D, + UpDecoderBlock2D, +) + + +def get_down_block( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, + resnet_eps: float, + resnet_act_fn: str, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + downsample_padding: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + downsample_type: Optional[str] = None, + dropout: float = 0.0, +): + deprecation_message = "Importing `get_down_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_down_block`, instead." + deprecate("get_down_block", "0.29", deprecation_message) + + from .unets.unet_2d_blocks import get_down_block + + return get_down_block( + down_block_type=down_block_type, + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + transformer_layers_per_block=transformer_layers_per_block, + num_attention_heads=num_attention_heads, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + downsample_padding=downsample_padding, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim, + downsample_type=downsample_type, + dropout=dropout, + ) + + +def get_mid_block( + mid_block_type: str, + temb_channels: int, + in_channels: int, + resnet_eps: float, + resnet_act_fn: str, + resnet_groups: int, + output_scale_factor: float = 1.0, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + mid_block_only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = 1, + dropout: float = 0.0, +): + if mid_block_type == "UNetMidBlock2DCrossAttn": + return UNetMidBlock2DCrossAttn( + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + resnet_groups=resnet_groups, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": + return UNetMidBlock2DSimpleCrossAttn( + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + only_cross_attention=mid_block_only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif mid_block_type == "UNetMidBlock2D": + return UNetMidBlock2D( + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + num_layers=0, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + add_attention=False, + ) + elif mid_block_type is None: + return None + else: + raise ValueError(f"unknown mid_block_type : {mid_block_type}") + + +def get_up_block( + up_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + add_upsample: bool, + resnet_eps: float, + resnet_act_fn: str, + resolution_idx: Optional[int] = None, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + upsample_type: Optional[str] = None, + dropout: float = 0.0, +): + deprecation_message = "Importing `get_up_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_up_block`, instead." + deprecate("get_up_block", "0.29", deprecation_message) + + from .unets.unet_2d_blocks import get_up_block + + return get_up_block( + up_block_type=up_block_type, + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resolution_idx=resolution_idx, + transformer_layers_per_block=transformer_layers_per_block, + num_attention_heads=num_attention_heads, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim, + upsample_type=upsample_type, + dropout=dropout, + ) + + +class AutoencoderTinyBlock(AutoencoderTinyBlock): + deprecation_message = "Importing `AutoencoderTinyBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AutoencoderTinyBlock`, instead." + deprecate("AutoencoderTinyBlock", "0.29", deprecation_message) + + +class UNetMidBlock2D(UNetMidBlock2D): + deprecation_message = "Importing `UNetMidBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D`, instead." + deprecate("UNetMidBlock2D", "0.29", deprecation_message) + + +class UNetMidBlock2DCrossAttn(UNetMidBlock2DCrossAttn): + deprecation_message = "Importing `UNetMidBlock2DCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DCrossAttn`, instead." + deprecate("UNetMidBlock2DCrossAttn", "0.29", deprecation_message) + + +class UNetMidBlock2DSimpleCrossAttn(UNetMidBlock2DSimpleCrossAttn): + deprecation_message = "Importing `UNetMidBlock2DSimpleCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DSimpleCrossAttn`, instead." + deprecate("UNetMidBlock2DSimpleCrossAttn", "0.29", deprecation_message) + + +class AttnDownBlock2D(AttnDownBlock2D): + deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownBlock2D`, instead." + deprecate("AttnDownBlock2D", "0.29", deprecation_message) + + +class CrossAttnDownBlock2D(CrossAttnDownBlock2D): + deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnDownBlock2D`, instead." + deprecate("CrossAttnDownBlock2D", "0.29", deprecation_message) + + +class DownBlock2D(DownBlock2D): + deprecation_message = "Importing `DownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import DownBlock2D`, instead." + deprecate("DownBlock2D", "0.29", deprecation_message) + + +class AttnDownEncoderBlock2D(AttnDownEncoderBlock2D): + deprecation_message = "Importing `AttnDownEncoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownEncoderBlock2D`, instead." + deprecate("AttnDownEncoderBlock2D", "0.29", deprecation_message) + + +class AttnSkipDownBlock2D(AttnSkipDownBlock2D): + deprecation_message = "Importing `AttnSkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipDownBlock2D`, instead." + deprecate("AttnSkipDownBlock2D", "0.29", deprecation_message) + + +class SkipDownBlock2D(SkipDownBlock2D): + deprecation_message = "Importing `SkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipDownBlock2D`, instead." + deprecate("SkipDownBlock2D", "0.29", deprecation_message) + + +class ResnetDownsampleBlock2D(ResnetDownsampleBlock2D): + deprecation_message = "Importing `ResnetDownsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetDownsampleBlock2D`, instead." + deprecate("ResnetDownsampleBlock2D", "0.29", deprecation_message) + + +class SimpleCrossAttnDownBlock2D(SimpleCrossAttnDownBlock2D): + deprecation_message = "Importing `SimpleCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnDownBlock2D`, instead." + deprecate("SimpleCrossAttnDownBlock2D", "0.29", deprecation_message) + + +class KDownBlock2D(KDownBlock2D): + deprecation_message = "Importing `KDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KDownBlock2D`, instead." + deprecate("KDownBlock2D", "0.29", deprecation_message) + + +class KCrossAttnDownBlock2D(KCrossAttnDownBlock2D): + deprecation_message = "Importing `KCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnDownBlock2D`, instead." + deprecate("KCrossAttnDownBlock2D", "0.29", deprecation_message) + + +class AttnUpBlock2D(AttnUpBlock2D): + deprecation_message = "Importing `AttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpBlock2D`, instead." + deprecate("AttnUpBlock2D", "0.29", deprecation_message) + + +class CrossAttnUpBlock2D(CrossAttnUpBlock2D): + deprecation_message = "Importing `CrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnUpBlock2D`, instead." + deprecate("CrossAttnUpBlock2D", "0.29", deprecation_message) + + +class UpBlock2D(UpBlock2D): + deprecation_message = "Importing `UpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpBlock2D`, instead." + deprecate("UpBlock2D", "0.29", deprecation_message) + + +class UpDecoderBlock2D(UpDecoderBlock2D): + deprecation_message = "Importing `UpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpDecoderBlock2D`, instead." + deprecate("UpDecoderBlock2D", "0.29", deprecation_message) + + +class AttnUpDecoderBlock2D(AttnUpDecoderBlock2D): + deprecation_message = "Importing `AttnUpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpDecoderBlock2D`, instead." + deprecate("AttnUpDecoderBlock2D", "0.29", deprecation_message) + + +class AttnSkipUpBlock2D(AttnSkipUpBlock2D): + deprecation_message = "Importing `AttnSkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipUpBlock2D`, instead." + deprecate("AttnSkipUpBlock2D", "0.29", deprecation_message) + + +class SkipUpBlock2D(SkipUpBlock2D): + deprecation_message = "Importing `SkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipUpBlock2D`, instead." + deprecate("SkipUpBlock2D", "0.29", deprecation_message) + + +class ResnetUpsampleBlock2D(ResnetUpsampleBlock2D): + deprecation_message = "Importing `ResnetUpsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetUpsampleBlock2D`, instead." + deprecate("ResnetUpsampleBlock2D", "0.29", deprecation_message) + + +class SimpleCrossAttnUpBlock2D(SimpleCrossAttnUpBlock2D): + deprecation_message = "Importing `SimpleCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnUpBlock2D`, instead." + deprecate("SimpleCrossAttnUpBlock2D", "0.29", deprecation_message) + + +class KUpBlock2D(KUpBlock2D): + deprecation_message = "Importing `KUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KUpBlock2D`, instead." + deprecate("KUpBlock2D", "0.29", deprecation_message) + + +class KCrossAttnUpBlock2D(KCrossAttnUpBlock2D): + deprecation_message = "Importing `KCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnUpBlock2D`, instead." + deprecate("KCrossAttnUpBlock2D", "0.29", deprecation_message) + + +# can potentially later be renamed to `No-feed-forward` attention +class KAttentionBlock(KAttentionBlock): + deprecation_message = "Importing `KAttentionBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KAttentionBlock`, instead." + deprecate("KAttentionBlock", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_condition.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_condition.py new file mode 100644 index 000000000..85a3e7b09 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unet_2d_condition.py @@ -0,0 +1,25 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ..utils import deprecate +from .unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput + + +class UNet2DConditionOutput(UNet2DConditionOutput): + deprecation_message = "Importing `UNet2DConditionOutput` from `diffusers.models.unet_2d_condition` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput`, instead." + deprecate("UNet2DConditionOutput", "0.29", deprecation_message) + + +class UNet2DConditionModel(UNet2DConditionModel): + deprecation_message = "Importing `UNet2DConditionModel` from `diffusers.models.unet_2d_condition` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel`, instead." + deprecate("UNet2DConditionModel", "0.29", deprecation_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/__init__.py new file mode 100644 index 000000000..9ef04fb62 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/__init__.py @@ -0,0 +1,18 @@ +from ...utils import is_flax_available, is_torch_available + + +if is_torch_available(): + from .unet_1d import UNet1DModel + from .unet_2d import UNet2DModel + from .unet_2d_condition import UNet2DConditionModel + from .unet_3d_condition import UNet3DConditionModel + from .unet_i2vgen_xl import I2VGenXLUNet + from .unet_kandinsky3 import Kandinsky3UNet + from .unet_motion_model import MotionAdapter, UNetMotionModel + from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel + from .unet_stable_cascade import StableCascadeUNet + from .uvit_2d import UVit2DModel + + +if is_flax_available(): + from .unet_2d_condition_flax import FlaxUNet2DConditionModel diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d.py new file mode 100644 index 000000000..59d70f67c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d.py @@ -0,0 +1,255 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block + + +@dataclass +class UNet1DOutput(BaseOutput): + """ + The output of [`UNet1DModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, sample_size)`): + The hidden states output from the last layer of the model. + """ + + sample: torch.FloatTensor + + +class UNet1DModel(ModelMixin, ConfigMixin): + r""" + A 1D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. + in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. + extra_in_channels (`int`, *optional*, defaults to 0): + Number of additional channels to be added to the input of the first down block. Useful for cases where the + input data has more channels than what the model was initially designed for. + time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. + freq_shift (`float`, *optional*, defaults to 0.0): Frequency shift for Fourier time embedding. + flip_sin_to_cos (`bool`, *optional*, defaults to `False`): + Whether to flip sin to cos for Fourier time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D")`): + Tuple of downsample block types. + up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip")`): + Tuple of upsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(32, 32, 64)`): + Tuple of block output channels. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock1D"`): Block type for middle of UNet. + out_block_type (`str`, *optional*, defaults to `None`): Optional output processing block of UNet. + act_fn (`str`, *optional*, defaults to `None`): Optional activation function in UNet blocks. + norm_num_groups (`int`, *optional*, defaults to 8): The number of groups for normalization. + layers_per_block (`int`, *optional*, defaults to 1): The number of layers per block. + downsample_each_block (`int`, *optional*, defaults to `False`): + Experimental feature for using a UNet without upsampling. + """ + + @register_to_config + def __init__( + self, + sample_size: int = 65536, + sample_rate: Optional[int] = None, + in_channels: int = 2, + out_channels: int = 2, + extra_in_channels: int = 0, + time_embedding_type: str = "fourier", + flip_sin_to_cos: bool = True, + use_timestep_embedding: bool = False, + freq_shift: float = 0.0, + down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), + up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), + mid_block_type: Tuple[str] = "UNetMidBlock1D", + out_block_type: str = None, + block_out_channels: Tuple[int] = (32, 32, 64), + act_fn: str = None, + norm_num_groups: int = 8, + layers_per_block: int = 1, + downsample_each_block: bool = False, + ): + super().__init__() + self.sample_size = sample_size + + # time + if time_embedding_type == "fourier": + self.time_proj = GaussianFourierProjection( + embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + ) + timestep_input_dim = 2 * block_out_channels[0] + elif time_embedding_type == "positional": + self.time_proj = Timesteps( + block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift + ) + timestep_input_dim = block_out_channels[0] + + if use_timestep_embedding: + time_embed_dim = block_out_channels[0] * 4 + self.time_mlp = TimestepEmbedding( + in_channels=timestep_input_dim, + time_embed_dim=time_embed_dim, + act_fn=act_fn, + out_dim=block_out_channels[0], + ) + + self.down_blocks = nn.ModuleList([]) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + self.out_block = None + + # down + output_channel = in_channels + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + + if i == 0: + input_channel += extra_in_channels + + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=block_out_channels[0], + add_downsample=not is_final_block or downsample_each_block, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = get_mid_block( + mid_block_type, + in_channels=block_out_channels[-1], + mid_channels=block_out_channels[-1], + out_channels=block_out_channels[-1], + embed_dim=block_out_channels[0], + num_layers=layers_per_block, + add_downsample=downsample_each_block, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + if out_block_type is None: + final_upsample_channels = out_channels + else: + final_upsample_channels = block_out_channels[0] + + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = ( + reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels + ) + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block, + in_channels=prev_output_channel, + out_channels=output_channel, + temb_channels=block_out_channels[0], + add_upsample=not is_final_block, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) + self.out_block = get_out_block( + out_block_type=out_block_type, + num_groups_out=num_groups_out, + embed_dim=block_out_channels[0], + out_channels=out_channels, + act_fn=act_fn, + fc_dim=block_out_channels[-1] // 4, + ) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + return_dict: bool = True, + ) -> Union[UNet1DOutput, Tuple]: + r""" + The [`UNet1DModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch_size, num_channels, sample_size)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_1d.UNet1DOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_1d.UNet1DOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is + returned where the first element is the sample tensor. + """ + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + timestep_embed = self.time_proj(timesteps) + if self.config.use_timestep_embedding: + timestep_embed = self.time_mlp(timestep_embed) + else: + timestep_embed = timestep_embed[..., None] + timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) + timestep_embed = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) + + # 2. down + down_block_res_samples = () + for downsample_block in self.down_blocks: + sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) + down_block_res_samples += res_samples + + # 3. mid + if self.mid_block: + sample = self.mid_block(sample, timestep_embed) + + # 4. up + for i, upsample_block in enumerate(self.up_blocks): + res_samples = down_block_res_samples[-1:] + down_block_res_samples = down_block_res_samples[:-1] + sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) + + # 5. post-process + if self.out_block: + sample = self.out_block(sample, timestep_embed) + + if not return_dict: + return (sample,) + + return UNet1DOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d_blocks.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d_blocks.py new file mode 100644 index 000000000..e3163cd1d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_1d_blocks.py @@ -0,0 +1,702 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from ..activations import get_activation +from ..resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims + + +class DownResnetBlock1D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: Optional[int] = None, + num_layers: int = 1, + conv_shortcut: bool = False, + temb_channels: int = 32, + groups: int = 32, + groups_out: Optional[int] = None, + non_linearity: Optional[str] = None, + time_embedding_norm: str = "default", + output_scale_factor: float = 1.0, + add_downsample: bool = True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + self.add_downsample = add_downsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + # there will always be at least one resnet + resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) + + self.resnets = nn.ModuleList(resnets) + + if non_linearity is None: + self.nonlinearity = None + else: + self.nonlinearity = get_activation(non_linearity) + + self.downsample = None + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + output_states = () + + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) + + output_states += (hidden_states,) + + if self.nonlinearity is not None: + hidden_states = self.nonlinearity(hidden_states) + + if self.downsample is not None: + hidden_states = self.downsample(hidden_states) + + return hidden_states, output_states + + +class UpResnetBlock1D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: Optional[int] = None, + num_layers: int = 1, + temb_channels: int = 32, + groups: int = 32, + groups_out: Optional[int] = None, + non_linearity: Optional[str] = None, + time_embedding_norm: str = "default", + output_scale_factor: float = 1.0, + add_upsample: bool = True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.time_embedding_norm = time_embedding_norm + self.add_upsample = add_upsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + # there will always be at least one resnet + resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) + + self.resnets = nn.ModuleList(resnets) + + if non_linearity is None: + self.nonlinearity = None + else: + self.nonlinearity = get_activation(non_linearity) + + self.upsample = None + if add_upsample: + self.upsample = Upsample1D(out_channels, use_conv_transpose=True) + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Optional[Tuple[torch.FloatTensor, ...]] = None, + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if res_hidden_states_tuple is not None: + res_hidden_states = res_hidden_states_tuple[-1] + hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) + + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) + + if self.nonlinearity is not None: + hidden_states = self.nonlinearity(hidden_states) + + if self.upsample is not None: + hidden_states = self.upsample(hidden_states) + + return hidden_states + + +class ValueFunctionMidBlock1D(nn.Module): + def __init__(self, in_channels: int, out_channels: int, embed_dim: int): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.embed_dim = embed_dim + + self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) + self.down1 = Downsample1D(out_channels // 2, use_conv=True) + self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) + self.down2 = Downsample1D(out_channels // 4, use_conv=True) + + def forward(self, x: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + x = self.res1(x, temb) + x = self.down1(x) + x = self.res2(x, temb) + x = self.down2(x) + return x + + +class MidResTemporalBlock1D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + embed_dim: int, + num_layers: int = 1, + add_downsample: bool = False, + add_upsample: bool = False, + non_linearity: Optional[str] = None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.add_downsample = add_downsample + + # there will always be at least one resnet + resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) + + self.resnets = nn.ModuleList(resnets) + + if non_linearity is None: + self.nonlinearity = None + else: + self.nonlinearity = get_activation(non_linearity) + + self.upsample = None + if add_upsample: + self.upsample = Downsample1D(out_channels, use_conv=True) + + self.downsample = None + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True) + + if self.upsample and self.downsample: + raise ValueError("Block cannot downsample and upsample") + + def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) + + if self.upsample: + hidden_states = self.upsample(hidden_states) + if self.downsample: + self.downsample = self.downsample(hidden_states) + + return hidden_states + + +class OutConv1DBlock(nn.Module): + def __init__(self, num_groups_out: int, out_channels: int, embed_dim: int, act_fn: str): + super().__init__() + self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) + self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) + self.final_conv1d_act = get_activation(act_fn) + self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.final_conv1d_1(hidden_states) + hidden_states = rearrange_dims(hidden_states) + hidden_states = self.final_conv1d_gn(hidden_states) + hidden_states = rearrange_dims(hidden_states) + hidden_states = self.final_conv1d_act(hidden_states) + hidden_states = self.final_conv1d_2(hidden_states) + return hidden_states + + +class OutValueFunctionBlock(nn.Module): + def __init__(self, fc_dim: int, embed_dim: int, act_fn: str = "mish"): + super().__init__() + self.final_block = nn.ModuleList( + [ + nn.Linear(fc_dim + embed_dim, fc_dim // 2), + get_activation(act_fn), + nn.Linear(fc_dim // 2, 1), + ] + ) + + def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = hidden_states.view(hidden_states.shape[0], -1) + hidden_states = torch.cat((hidden_states, temb), dim=-1) + for layer in self.final_block: + hidden_states = layer(hidden_states) + + return hidden_states + + +_kernels = { + "linear": [1 / 8, 3 / 8, 3 / 8, 1 / 8], + "cubic": [-0.01171875, -0.03515625, 0.11328125, 0.43359375, 0.43359375, 0.11328125, -0.03515625, -0.01171875], + "lanczos3": [ + 0.003689131001010537, + 0.015056144446134567, + -0.03399861603975296, + -0.066637322306633, + 0.13550527393817902, + 0.44638532400131226, + 0.44638532400131226, + 0.13550527393817902, + -0.066637322306633, + -0.03399861603975296, + 0.015056144446134567, + 0.003689131001010537, + ], +} + + +class Downsample1d(nn.Module): + def __init__(self, kernel: str = "linear", pad_mode: str = "reflect"): + super().__init__() + self.pad_mode = pad_mode + kernel_1d = torch.tensor(_kernels[kernel]) + self.pad = kernel_1d.shape[0] // 2 - 1 + self.register_buffer("kernel", kernel_1d) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = F.pad(hidden_states, (self.pad,) * 2, self.pad_mode) + weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) + indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) + kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) + weight[indices, indices] = kernel + return F.conv1d(hidden_states, weight, stride=2) + + +class Upsample1d(nn.Module): + def __init__(self, kernel: str = "linear", pad_mode: str = "reflect"): + super().__init__() + self.pad_mode = pad_mode + kernel_1d = torch.tensor(_kernels[kernel]) * 2 + self.pad = kernel_1d.shape[0] // 2 - 1 + self.register_buffer("kernel", kernel_1d) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = F.pad(hidden_states, ((self.pad + 1) // 2,) * 2, self.pad_mode) + weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) + indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) + kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) + weight[indices, indices] = kernel + return F.conv_transpose1d(hidden_states, weight, stride=2, padding=self.pad * 2 + 1) + + +class SelfAttention1d(nn.Module): + def __init__(self, in_channels: int, n_head: int = 1, dropout_rate: float = 0.0): + super().__init__() + self.channels = in_channels + self.group_norm = nn.GroupNorm(1, num_channels=in_channels) + self.num_heads = n_head + + self.query = nn.Linear(self.channels, self.channels) + self.key = nn.Linear(self.channels, self.channels) + self.value = nn.Linear(self.channels, self.channels) + + self.proj_attn = nn.Linear(self.channels, self.channels, bias=True) + + self.dropout = nn.Dropout(dropout_rate, inplace=True) + + def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor: + new_projection_shape = projection.size()[:-1] + (self.num_heads, -1) + # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) + new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) + return new_projection + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + residual = hidden_states + batch, channel_dim, seq = hidden_states.shape + + hidden_states = self.group_norm(hidden_states) + hidden_states = hidden_states.transpose(1, 2) + + query_proj = self.query(hidden_states) + key_proj = self.key(hidden_states) + value_proj = self.value(hidden_states) + + query_states = self.transpose_for_scores(query_proj) + key_states = self.transpose_for_scores(key_proj) + value_states = self.transpose_for_scores(value_proj) + + scale = 1 / math.sqrt(math.sqrt(key_states.shape[-1])) + + attention_scores = torch.matmul(query_states * scale, key_states.transpose(-1, -2) * scale) + attention_probs = torch.softmax(attention_scores, dim=-1) + + # compute attention output + hidden_states = torch.matmul(attention_probs, value_states) + + hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() + new_hidden_states_shape = hidden_states.size()[:-2] + (self.channels,) + hidden_states = hidden_states.view(new_hidden_states_shape) + + # compute next hidden_states + hidden_states = self.proj_attn(hidden_states) + hidden_states = hidden_states.transpose(1, 2) + hidden_states = self.dropout(hidden_states) + + output = hidden_states + residual + + return output + + +class ResConvBlock(nn.Module): + def __init__(self, in_channels: int, mid_channels: int, out_channels: int, is_last: bool = False): + super().__init__() + self.is_last = is_last + self.has_conv_skip = in_channels != out_channels + + if self.has_conv_skip: + self.conv_skip = nn.Conv1d(in_channels, out_channels, 1, bias=False) + + self.conv_1 = nn.Conv1d(in_channels, mid_channels, 5, padding=2) + self.group_norm_1 = nn.GroupNorm(1, mid_channels) + self.gelu_1 = nn.GELU() + self.conv_2 = nn.Conv1d(mid_channels, out_channels, 5, padding=2) + + if not self.is_last: + self.group_norm_2 = nn.GroupNorm(1, out_channels) + self.gelu_2 = nn.GELU() + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + residual = self.conv_skip(hidden_states) if self.has_conv_skip else hidden_states + + hidden_states = self.conv_1(hidden_states) + hidden_states = self.group_norm_1(hidden_states) + hidden_states = self.gelu_1(hidden_states) + hidden_states = self.conv_2(hidden_states) + + if not self.is_last: + hidden_states = self.group_norm_2(hidden_states) + hidden_states = self.gelu_2(hidden_states) + + output = hidden_states + residual + return output + + +class UNetMidBlock1D(nn.Module): + def __init__(self, mid_channels: int, in_channels: int, out_channels: Optional[int] = None): + super().__init__() + + out_channels = in_channels if out_channels is None else out_channels + + # there is always at least one resnet + self.down = Downsample1d("cubic") + resnets = [ + ResConvBlock(in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + attentions = [ + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(out_channels, out_channels // 32), + ] + self.up = Upsample1d(kernel="cubic") + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.down(hidden_states) + for attn, resnet in zip(self.attentions, self.resnets): + hidden_states = resnet(hidden_states) + hidden_states = attn(hidden_states) + + hidden_states = self.up(hidden_states) + + return hidden_states + + +class AttnDownBlock1D(nn.Module): + def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = out_channels if mid_channels is None else mid_channels + + self.down = Downsample1d("cubic") + resnets = [ + ResConvBlock(in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + attentions = [ + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(out_channels, out_channels // 32), + ] + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.down(hidden_states) + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states) + hidden_states = attn(hidden_states) + + return hidden_states, (hidden_states,) + + +class DownBlock1D(nn.Module): + def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = out_channels if mid_channels is None else mid_channels + + self.down = Downsample1d("cubic") + resnets = [ + ResConvBlock(in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.down(hidden_states) + + for resnet in self.resnets: + hidden_states = resnet(hidden_states) + + return hidden_states, (hidden_states,) + + +class DownBlock1DNoSkip(nn.Module): + def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = out_channels if mid_channels is None else mid_channels + + resnets = [ + ResConvBlock(in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = torch.cat([hidden_states, temb], dim=1) + for resnet in self.resnets: + hidden_states = resnet(hidden_states) + + return hidden_states, (hidden_states,) + + +class AttnUpBlock1D(nn.Module): + def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = out_channels if mid_channels is None else mid_channels + + resnets = [ + ResConvBlock(2 * in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + attentions = [ + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(mid_channels, mid_channels // 32), + SelfAttention1d(out_channels, out_channels // 32), + ] + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.up = Upsample1d(kernel="cubic") + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + res_hidden_states = res_hidden_states_tuple[-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states) + hidden_states = attn(hidden_states) + + hidden_states = self.up(hidden_states) + + return hidden_states + + +class UpBlock1D(nn.Module): + def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = in_channels if mid_channels is None else mid_channels + + resnets = [ + ResConvBlock(2 * in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels), + ] + + self.resnets = nn.ModuleList(resnets) + self.up = Upsample1d(kernel="cubic") + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + res_hidden_states = res_hidden_states_tuple[-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + for resnet in self.resnets: + hidden_states = resnet(hidden_states) + + hidden_states = self.up(hidden_states) + + return hidden_states + + +class UpBlock1DNoSkip(nn.Module): + def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int] = None): + super().__init__() + mid_channels = in_channels if mid_channels is None else mid_channels + + resnets = [ + ResConvBlock(2 * in_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, mid_channels), + ResConvBlock(mid_channels, mid_channels, out_channels, is_last=True), + ] + + self.resnets = nn.ModuleList(resnets) + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + res_hidden_states = res_hidden_states_tuple[-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + for resnet in self.resnets: + hidden_states = resnet(hidden_states) + + return hidden_states + + +DownBlockType = Union[DownResnetBlock1D, DownBlock1D, AttnDownBlock1D, DownBlock1DNoSkip] +MidBlockType = Union[MidResTemporalBlock1D, ValueFunctionMidBlock1D, UNetMidBlock1D] +OutBlockType = Union[OutConv1DBlock, OutValueFunctionBlock] +UpBlockType = Union[UpResnetBlock1D, UpBlock1D, AttnUpBlock1D, UpBlock1DNoSkip] + + +def get_down_block( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, +) -> DownBlockType: + if down_block_type == "DownResnetBlock1D": + return DownResnetBlock1D( + in_channels=in_channels, + num_layers=num_layers, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + ) + elif down_block_type == "DownBlock1D": + return DownBlock1D(out_channels=out_channels, in_channels=in_channels) + elif down_block_type == "AttnDownBlock1D": + return AttnDownBlock1D(out_channels=out_channels, in_channels=in_channels) + elif down_block_type == "DownBlock1DNoSkip": + return DownBlock1DNoSkip(out_channels=out_channels, in_channels=in_channels) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block( + up_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_upsample: bool +) -> UpBlockType: + if up_block_type == "UpResnetBlock1D": + return UpResnetBlock1D( + in_channels=in_channels, + num_layers=num_layers, + out_channels=out_channels, + temb_channels=temb_channels, + add_upsample=add_upsample, + ) + elif up_block_type == "UpBlock1D": + return UpBlock1D(in_channels=in_channels, out_channels=out_channels) + elif up_block_type == "AttnUpBlock1D": + return AttnUpBlock1D(in_channels=in_channels, out_channels=out_channels) + elif up_block_type == "UpBlock1DNoSkip": + return UpBlock1DNoSkip(in_channels=in_channels, out_channels=out_channels) + raise ValueError(f"{up_block_type} does not exist.") + + +def get_mid_block( + mid_block_type: str, + num_layers: int, + in_channels: int, + mid_channels: int, + out_channels: int, + embed_dim: int, + add_downsample: bool, +) -> MidBlockType: + if mid_block_type == "MidResTemporalBlock1D": + return MidResTemporalBlock1D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + embed_dim=embed_dim, + add_downsample=add_downsample, + ) + elif mid_block_type == "ValueFunctionMidBlock1D": + return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) + elif mid_block_type == "UNetMidBlock1D": + return UNetMidBlock1D(in_channels=in_channels, mid_channels=mid_channels, out_channels=out_channels) + raise ValueError(f"{mid_block_type} does not exist.") + + +def get_out_block( + *, out_block_type: str, num_groups_out: int, embed_dim: int, out_channels: int, act_fn: str, fc_dim: int +) -> Optional[OutBlockType]: + if out_block_type == "OutConv1DBlock": + return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) + elif out_block_type == "ValueFunction": + return OutValueFunctionBlock(fc_dim, embed_dim, act_fn) + return None diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d.py new file mode 100644 index 000000000..5efb63822 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d.py @@ -0,0 +1,346 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block + + +@dataclass +class UNet2DOutput(BaseOutput): + """ + The output of [`UNet2DModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The hidden states output from the last layer of the model. + """ + + sample: torch.FloatTensor + + +class UNet2DModel(ModelMixin, ConfigMixin): + r""" + A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) - + 1)`. + in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. + center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. + time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use. + freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding. + flip_sin_to_cos (`bool`, *optional*, defaults to `True`): + Whether to flip sin to cos for Fourier time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): + Tuple of downsample block types. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`): + Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`. + up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): + Tuple of upsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`): + Tuple of block output channels. + layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block. + mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block. + downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution. + downsample_type (`str`, *optional*, defaults to `conv`): + The downsample type for downsampling layers. Choose between "conv" and "resnet" + upsample_type (`str`, *optional*, defaults to `conv`): + The upsample type for upsampling layers. Choose between "conv" and "resnet" + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension. + norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization. + attn_norm_num_groups (`int`, *optional*, defaults to `None`): + If set to an integer, a group norm layer will be created in the mid block's [`Attention`] layer with the + given number of groups. If left as `None`, the group norm layer will only be created if + `resnet_time_scale_shift` is set to `default`, and if created will have `norm_num_groups` groups. + norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization. + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, or `"identity"`. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class + conditioning with `class_embed_type` equal to `None`. + """ + + @register_to_config + def __init__( + self, + sample_size: Optional[Union[int, Tuple[int, int]]] = None, + in_channels: int = 3, + out_channels: int = 3, + center_input_sample: bool = False, + time_embedding_type: str = "positional", + freq_shift: int = 0, + flip_sin_to_cos: bool = True, + down_block_types: Tuple[str, ...] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"), + up_block_types: Tuple[str, ...] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"), + block_out_channels: Tuple[int, ...] = (224, 448, 672, 896), + layers_per_block: int = 2, + mid_block_scale_factor: float = 1, + downsample_padding: int = 1, + downsample_type: str = "conv", + upsample_type: str = "conv", + dropout: float = 0.0, + act_fn: str = "silu", + attention_head_dim: Optional[int] = 8, + norm_num_groups: int = 32, + attn_norm_num_groups: Optional[int] = None, + norm_eps: float = 1e-5, + resnet_time_scale_shift: str = "default", + add_attention: bool = True, + class_embed_type: Optional[str] = None, + num_class_embeds: Optional[int] = None, + num_train_timesteps: Optional[int] = None, + ): + super().__init__() + + self.sample_size = sample_size + time_embed_dim = block_out_channels[0] * 4 + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + # input + self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) + + # time + if time_embedding_type == "fourier": + self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16) + timestep_input_dim = 2 * block_out_channels[0] + elif time_embedding_type == "positional": + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + elif time_embedding_type == "learned": + self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + else: + self.class_embedding = None + + self.down_blocks = nn.ModuleList([]) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + downsample_type=downsample_type, + dropout=dropout, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock2D( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + dropout=dropout, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1], + resnet_groups=norm_num_groups, + attn_groups=attn_norm_num_groups, + add_attention=add_attention, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, + resnet_time_scale_shift=resnet_time_scale_shift, + upsample_type=upsample_type, + dropout=dropout, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + class_labels: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet2DOutput, Tuple]: + r""" + The [`UNet2DModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + class_labels (`torch.FloatTensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_2d.UNet2DOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is + returned where the first element is the sample tensor. + """ + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=self.dtype) + emb = self.time_embedding(t_emb) + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when doing class conditioning") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) + emb = emb + class_emb + elif self.class_embedding is None and class_labels is not None: + raise ValueError("class_embedding needs to be initialized in order to use class conditioning") + + # 2. pre-process + skip_sample = sample + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "skip_conv"): + sample, res_samples, skip_sample = downsample_block( + hidden_states=sample, temb=emb, skip_sample=skip_sample + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + sample = self.mid_block(sample, emb) + + # 5. up + skip_sample = None + for upsample_block in self.up_blocks: + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + if hasattr(upsample_block, "skip_conv"): + sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample) + else: + sample = upsample_block(sample, res_samples, emb) + + # 6. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if skip_sample is not None: + sample += skip_sample + + if self.config.time_embedding_type == "fourier": + timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:])))) + sample = sample / timesteps + + if not return_dict: + return (sample,) + + return UNet2DOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks.py new file mode 100644 index 000000000..b9e9e63bb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks.py @@ -0,0 +1,3731 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from ...utils import deprecate, is_torch_version, logging +from ...utils.torch_utils import apply_freeu +from ..activations import get_activation +from ..attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 +from ..normalization import AdaGroupNorm +from ..resnet import ( + Downsample2D, + FirDownsample2D, + FirUpsample2D, + KDownsample2D, + KUpsample2D, + ResnetBlock2D, + ResnetBlockCondNorm2D, + Upsample2D, +) +from ..transformers.dual_transformer_2d import DualTransformer2DModel +from ..transformers.transformer_2d import Transformer2DModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def get_down_block( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, + resnet_eps: float, + resnet_act_fn: str, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + downsample_padding: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + downsample_type: Optional[str] = None, + dropout: float = 0.0, +): + # If attn head dim is not defined, we default it to the number of heads + if attention_head_dim is None: + logger.warning( + f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." + ) + attention_head_dim = num_attention_heads + + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlock2D": + return DownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "ResnetDownsampleBlock2D": + return ResnetDownsampleBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + output_scale_factor=resnet_out_scale_factor, + ) + elif down_block_type == "AttnDownBlock2D": + if add_downsample is False: + downsample_type = None + else: + downsample_type = downsample_type or "conv" # default to 'conv' + return AttnDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + downsample_type=downsample_type, + ) + elif down_block_type == "CrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") + return CrossAttnDownBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + ) + elif down_block_type == "SimpleCrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D") + return SimpleCrossAttnDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + output_scale_factor=resnet_out_scale_factor, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif down_block_type == "SkipDownBlock2D": + return SkipDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "AttnSkipDownBlock2D": + return AttnSkipDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "DownEncoderBlock2D": + return DownEncoderBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "AttnDownEncoderBlock2D": + return AttnDownEncoderBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "KDownBlock2D": + return KDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif down_block_type == "KCrossAttnDownBlock2D": + return KCrossAttnDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + add_self_attention=True if not add_downsample else False, + ) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_mid_block( + mid_block_type: str, + temb_channels: int, + in_channels: int, + resnet_eps: float, + resnet_act_fn: str, + resnet_groups: int, + output_scale_factor: float = 1.0, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + mid_block_only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = 1, + dropout: float = 0.0, +): + if mid_block_type == "UNetMidBlock2DCrossAttn": + return UNetMidBlock2DCrossAttn( + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + resnet_groups=resnet_groups, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": + return UNetMidBlock2DSimpleCrossAttn( + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + only_cross_attention=mid_block_only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif mid_block_type == "UNetMidBlock2D": + return UNetMidBlock2D( + in_channels=in_channels, + temb_channels=temb_channels, + dropout=dropout, + num_layers=0, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + output_scale_factor=output_scale_factor, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + add_attention=False, + ) + elif mid_block_type is None: + return None + else: + raise ValueError(f"unknown mid_block_type : {mid_block_type}") + + +def get_up_block( + up_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + add_upsample: bool, + resnet_eps: float, + resnet_act_fn: str, + resolution_idx: Optional[int] = None, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + upsample_type: Optional[str] = None, + dropout: float = 0.0, +) -> nn.Module: + # If attn head dim is not defined, we default it to the number of heads + if attention_head_dim is None: + logger.warning( + f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." + ) + attention_head_dim = num_attention_heads + + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpBlock2D": + return UpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "ResnetUpsampleBlock2D": + return ResnetUpsampleBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + output_scale_factor=resnet_out_scale_factor, + ) + elif up_block_type == "CrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") + return CrossAttnUpBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + ) + elif up_block_type == "SimpleCrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D") + return SimpleCrossAttnUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + output_scale_factor=resnet_out_scale_factor, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif up_block_type == "AttnUpBlock2D": + if add_upsample is False: + upsample_type = None + else: + upsample_type = upsample_type or "conv" # default to 'conv' + + return AttnUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + upsample_type=upsample_type, + ) + elif up_block_type == "SkipUpBlock2D": + return SkipUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "AttnSkipUpBlock2D": + return AttnSkipUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "UpDecoderBlock2D": + return UpDecoderBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + temb_channels=temb_channels, + ) + elif up_block_type == "AttnUpDecoderBlock2D": + return AttnUpDecoderBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + attention_head_dim=attention_head_dim, + resnet_time_scale_shift=resnet_time_scale_shift, + temb_channels=temb_channels, + ) + elif up_block_type == "KUpBlock2D": + return KUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif up_block_type == "KCrossAttnUpBlock2D": + return KCrossAttnUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + cross_attention_dim=cross_attention_dim, + attention_head_dim=attention_head_dim, + ) + + raise ValueError(f"{up_block_type} does not exist.") + + +class AutoencoderTinyBlock(nn.Module): + """ + Tiny Autoencoder block used in [`AutoencoderTiny`]. It is a mini residual module consisting of plain conv + ReLU + blocks. + + Args: + in_channels (`int`): The number of input channels. + out_channels (`int`): The number of output channels. + act_fn (`str`): + ` The activation function to use. Supported values are `"swish"`, `"mish"`, `"gelu"`, and `"relu"`. + + Returns: + `torch.FloatTensor`: A tensor with the same shape as the input tensor, but with the number of channels equal to + `out_channels`. + """ + + def __init__(self, in_channels: int, out_channels: int, act_fn: str): + super().__init__() + act_fn = get_activation(act_fn) + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + act_fn, + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + act_fn, + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + ) + self.skip = ( + nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) + if in_channels != out_channels + else nn.Identity() + ) + self.fuse = nn.ReLU() + + def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: + return self.fuse(self.conv(x) + self.skip(x)) + + +class UNetMidBlock2D(nn.Module): + """ + A 2D UNet mid-block [`UNetMidBlock2D`] with multiple residual blocks and optional attention blocks. + + Args: + in_channels (`int`): The number of input channels. + temb_channels (`int`): The number of temporal embedding channels. + dropout (`float`, *optional*, defaults to 0.0): The dropout rate. + num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. + resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. + resnet_time_scale_shift (`str`, *optional*, defaults to `default`): + The type of normalization to apply to the time embeddings. This can help to improve the performance of the + model on tasks with long-range temporal dependencies. + resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks. + resnet_groups (`int`, *optional*, defaults to 32): + The number of groups to use in the group normalization layers of the resnet blocks. + attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks. + resnet_pre_norm (`bool`, *optional*, defaults to `True`): + Whether to use pre-normalization for the resnet blocks. + add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks. + attention_head_dim (`int`, *optional*, defaults to 1): + Dimension of a single attention head. The number of attention heads is determined based on this value and + the number of input channels. + output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor. + + Returns: + `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size, + in_channels, height, width)`. + + """ + + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + attn_groups: Optional[int] = None, + resnet_pre_norm: bool = True, + add_attention: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + ): + super().__init__() + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + self.add_attention = add_attention + + if attn_groups is None: + attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None + + # there is always at least one resnet + if resnet_time_scale_shift == "spatial": + resnets = [ + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ] + else: + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." + ) + attention_head_dim = in_channels + + for _ in range(num_layers): + if self.add_attention: + attentions.append( + Attention( + in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=attn_groups, + spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + else: + attentions.append(None) + + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if attn is not None: + hidden_states = attn(hidden_states, temb=temb) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class UNetMidBlock2DCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # support for variable transformer layers per block + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + for i in range(num_layers): + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class UNetMidBlock2DSimpleCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + skip_time_act: bool = False, + only_cross_attention: bool = False, + cross_attention_norm: Optional[str] = None, + ): + super().__init__() + + self.has_cross_attention = True + + self.attention_head_dim = attention_head_dim + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + self.num_heads = in_channels // self.attention_head_dim + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ] + attentions = [] + + for _ in range(num_layers): + processor = ( + AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() + ) + + attentions.append( + Attention( + query_dim=in_channels, + cross_attention_dim=in_channels, + heads=self.num_heads, + dim_head=self.attention_head_dim, + added_kv_proj_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + bias=True, + upcast_softmax=True, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + processor=processor, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + if attention_mask is None: + # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. + mask = None if encoder_hidden_states is None else encoder_attention_mask + else: + # when attention_mask is defined: we don't even check for encoder_attention_mask. + # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. + # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. + # then we can simplify this whole if/else block to: + # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask + mask = attention_mask + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + # attn + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + + # resnet + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class AttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + downsample_type: str = "conv", + ): + super().__init__() + resnets = [] + attentions = [] + self.downsample_type = downsample_type + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=resnet_groups, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if downsample_type == "conv": + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + elif downsample_type == "resnet": + self.downsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + down=True, + ) + ] + ) + else: + self.downsamplers = None + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states, **cross_attention_kwargs) + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + if self.downsample_type == "resnet": + hidden_states = downsampler(hidden_states, temb=temb) + else: + hidden_states = downsampler(hidden_states) + + output_states += (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + add_downsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + additional_residuals: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + output_states = () + + blocks = list(zip(self.resnets, self.attentions)) + + for i, (resnet, attn) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class DownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, *args, **kwargs + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class DownEncoderBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=None, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=None, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + def forward(self, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb=None) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + return hidden_states + + +class AttnDownEncoderBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + attentions = [] + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=None, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=None, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=resnet_groups, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + def forward(self, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb=None) + hidden_states = attn(hidden_states) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + return hidden_states + + +class AttnSkipDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = np.sqrt(2.0), + add_downsample: bool = True, + ): + super().__init__() + self.attentions = nn.ModuleList([]) + self.resnets = nn.ModuleList([]) + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + self.resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(in_channels // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + self.attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=32, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + if add_downsample: + self.resnet_down = ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + use_in_shortcut=True, + down=True, + kernel="fir", + ) + self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) + self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) + else: + self.resnet_down = None + self.downsamplers = None + self.skip_conv = None + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + skip_sample: Optional[torch.FloatTensor] = None, + *args, + **kwargs, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...], torch.FloatTensor]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states) + output_states += (hidden_states,) + + if self.downsamplers is not None: + hidden_states = self.resnet_down(hidden_states, temb) + for downsampler in self.downsamplers: + skip_sample = downsampler(skip_sample) + + hidden_states = self.skip_conv(skip_sample) + hidden_states + + output_states += (hidden_states,) + + return hidden_states, output_states, skip_sample + + +class SkipDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_pre_norm: bool = True, + output_scale_factor: float = np.sqrt(2.0), + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + self.resnets = nn.ModuleList([]) + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + self.resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(in_channels // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + if add_downsample: + self.resnet_down = ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + use_in_shortcut=True, + down=True, + kernel="fir", + ) + self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) + self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) + else: + self.resnet_down = None + self.downsamplers = None + self.skip_conv = None + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + skip_sample: Optional[torch.FloatTensor] = None, + *args, + **kwargs, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...], torch.FloatTensor]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb) + output_states += (hidden_states,) + + if self.downsamplers is not None: + hidden_states = self.resnet_down(hidden_states, temb) + for downsampler in self.downsamplers: + skip_sample = downsampler(skip_sample) + + hidden_states = self.skip_conv(skip_sample) + hidden_states + + output_states += (hidden_states,) + + return hidden_states, output_states, skip_sample + + +class ResnetDownsampleBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + skip_time_act: bool = False, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + down=True, + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, *args, **kwargs + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class SimpleCrossAttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + skip_time_act: bool = False, + only_cross_attention: bool = False, + cross_attention_norm: Optional[str] = None, + ): + super().__init__() + + self.has_cross_attention = True + + resnets = [] + attentions = [] + + self.attention_head_dim = attention_head_dim + self.num_heads = out_channels // self.attention_head_dim + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + processor = ( + AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() + ) + + attentions.append( + Attention( + query_dim=out_channels, + cross_attention_dim=out_channels, + heads=self.num_heads, + dim_head=attention_head_dim, + added_kv_proj_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + bias=True, + upcast_softmax=True, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + processor=processor, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + down=True, + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + output_states = () + + if attention_mask is None: + # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. + mask = None if encoder_hidden_states is None else encoder_attention_mask + else: + # when attention_mask is defined: we don't even check for encoder_attention_mask. + # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. + # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. + # then we can simplify this whole if/else block to: + # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask + mask = attention_mask + + for resnet, attn in zip(self.resnets, self.attentions): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + else: + hidden_states = resnet(hidden_states, temb) + + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class KDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 4, + resnet_eps: float = 1e-5, + resnet_act_fn: str = "gelu", + resnet_group_size: int = 32, + add_downsample: bool = False, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + groups = in_channels // resnet_group_size + groups_out = out_channels // resnet_group_size + + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=out_channels, + dropout=dropout, + temb_channels=temb_channels, + groups=groups, + groups_out=groups_out, + eps=resnet_eps, + non_linearity=resnet_act_fn, + time_embedding_norm="ada_group", + conv_shortcut_bias=False, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + # YiYi's comments- might be able to use FirDownsample2D, look into details later + self.downsamplers = nn.ModuleList([KDownsample2D()]) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, *args, **kwargs + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + output_states += (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + return hidden_states, output_states + + +class KCrossAttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + cross_attention_dim: int, + dropout: float = 0.0, + num_layers: int = 4, + resnet_group_size: int = 32, + add_downsample: bool = True, + attention_head_dim: int = 64, + add_self_attention: bool = False, + resnet_eps: float = 1e-5, + resnet_act_fn: str = "gelu", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + groups = in_channels // resnet_group_size + groups_out = out_channels // resnet_group_size + + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=out_channels, + dropout=dropout, + temb_channels=temb_channels, + groups=groups, + groups_out=groups_out, + eps=resnet_eps, + non_linearity=resnet_act_fn, + time_embedding_norm="ada_group", + conv_shortcut_bias=False, + ) + ) + attentions.append( + KAttentionBlock( + out_channels, + out_channels // attention_head_dim, + attention_head_dim, + cross_attention_dim=cross_attention_dim, + temb_channels=temb_channels, + attention_bias=True, + add_self_attention=add_self_attention, + cross_attention_norm="layer_norm", + group_size=resnet_group_size, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.attentions = nn.ModuleList(attentions) + + if add_downsample: + self.downsamplers = nn.ModuleList([KDownsample2D()]) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + emb=temb, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + emb=temb, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + + if self.downsamplers is None: + output_states += (None,) + else: + output_states += (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + return hidden_states, output_states + + +class AttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: int = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + upsample_type: str = "conv", + ): + super().__init__() + resnets = [] + attentions = [] + + self.upsample_type = upsample_type + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=resnet_groups, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if upsample_type == "conv": + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + elif upsample_type == "resnet": + self.upsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + up=True, + ) + ] + ) + else: + self.upsamplers = None + + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + if self.upsample_type == "resnet": + hidden_states = upsampler(hidden_states, temb=temb) + else: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class CrossAttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class UpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class UpDecoderBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + temb_channels: Optional[int] = None, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlock2D( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.resolution_idx = resolution_idx + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb=temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class AttnUpDecoderBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + temb_channels: Optional[int] = None, + ): + super().__init__() + resnets = [] + attentions = [] + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlock2D( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=resnet_groups if resnet_time_scale_shift != "spatial" else None, + spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.resolution_idx = resolution_idx + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb=temb) + hidden_states = attn(hidden_states, temb=temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class AttnSkipUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = np.sqrt(2.0), + add_upsample: bool = True, + ): + super().__init__() + self.attentions = nn.ModuleList([]) + self.resnets = nn.ModuleList([]) + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + self.resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(resnet_in_channels + res_skip_channels // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." + ) + attention_head_dim = out_channels + + self.attentions.append( + Attention( + out_channels, + heads=out_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=32, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + + self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) + if add_upsample: + self.resnet_up = ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(out_channels // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + use_in_shortcut=True, + up=True, + kernel="fir", + ) + self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.skip_norm = torch.nn.GroupNorm( + num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True + ) + self.act = nn.SiLU() + else: + self.resnet_up = None + self.skip_conv = None + self.skip_norm = None + self.act = None + + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + skip_sample=None, + *args, + **kwargs, + ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + + hidden_states = self.attentions[0](hidden_states) + + if skip_sample is not None: + skip_sample = self.upsampler(skip_sample) + else: + skip_sample = 0 + + if self.resnet_up is not None: + skip_sample_states = self.skip_norm(hidden_states) + skip_sample_states = self.act(skip_sample_states) + skip_sample_states = self.skip_conv(skip_sample_states) + + skip_sample = skip_sample + skip_sample_states + + hidden_states = self.resnet_up(hidden_states, temb) + + return hidden_states, skip_sample + + +class SkipUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_pre_norm: bool = True, + output_scale_factor: float = np.sqrt(2.0), + add_upsample: bool = True, + upsample_padding: int = 1, + ): + super().__init__() + self.resnets = nn.ModuleList([]) + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + self.resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min((resnet_in_channels + res_skip_channels) // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) + if add_upsample: + self.resnet_up = ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=min(out_channels // 4, 32), + groups_out=min(out_channels // 4, 32), + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + use_in_shortcut=True, + up=True, + kernel="fir", + ) + self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.skip_norm = torch.nn.GroupNorm( + num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True + ) + self.act = nn.SiLU() + else: + self.resnet_up = None + self.skip_conv = None + self.skip_norm = None + self.act = None + + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + skip_sample=None, + *args, + **kwargs, + ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + + if skip_sample is not None: + skip_sample = self.upsampler(skip_sample) + else: + skip_sample = 0 + + if self.resnet_up is not None: + skip_sample_states = self.skip_norm(hidden_states) + skip_sample_states = self.act(skip_sample_states) + skip_sample_states = self.skip_conv(skip_sample_states) + + skip_sample = skip_sample + skip_sample_states + + hidden_states = self.resnet_up(hidden_states, temb) + + return hidden_states, skip_sample + + +class ResnetUpsampleBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + skip_time_act: bool = False, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + up=True, + ) + ] + ) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, temb) + + return hidden_states + + +class SimpleCrossAttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + skip_time_act: bool = False, + only_cross_attention: bool = False, + cross_attention_norm: Optional[str] = None, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.attention_head_dim = attention_head_dim + + self.num_heads = out_channels // self.attention_head_dim + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + processor = ( + AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() + ) + + attentions.append( + Attention( + query_dim=out_channels, + cross_attention_dim=out_channels, + heads=self.num_heads, + dim_head=self.attention_head_dim, + added_kv_proj_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + bias=True, + upcast_softmax=True, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + processor=processor, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList( + [ + ResnetBlock2D( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + up=True, + ) + ] + ) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + if attention_mask is None: + # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. + mask = None if encoder_hidden_states is None else encoder_attention_mask + else: + # when attention_mask is defined: we don't even check for encoder_attention_mask. + # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. + # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. + # then we can simplify this whole if/else block to: + # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask + mask = attention_mask + + for resnet, attn in zip(self.resnets, self.attentions): + # resnet + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + else: + hidden_states = resnet(hidden_states, temb) + + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, temb) + + return hidden_states + + +class KUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + resolution_idx: int, + dropout: float = 0.0, + num_layers: int = 5, + resnet_eps: float = 1e-5, + resnet_act_fn: str = "gelu", + resnet_group_size: Optional[int] = 32, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + k_in_channels = 2 * out_channels + k_out_channels = in_channels + num_layers = num_layers - 1 + + for i in range(num_layers): + in_channels = k_in_channels if i == 0 else out_channels + groups = in_channels // resnet_group_size + groups_out = out_channels // resnet_group_size + + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=k_out_channels if (i == num_layers - 1) else out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=groups, + groups_out=groups_out, + dropout=dropout, + non_linearity=resnet_act_fn, + time_embedding_norm="ada_group", + conv_shortcut_bias=False, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([KUpsample2D()]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + res_hidden_states_tuple = res_hidden_states_tuple[-1] + if res_hidden_states_tuple is not None: + hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class KCrossAttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + resolution_idx: int, + dropout: float = 0.0, + num_layers: int = 4, + resnet_eps: float = 1e-5, + resnet_act_fn: str = "gelu", + resnet_group_size: int = 32, + attention_head_dim: int = 1, # attention dim_head + cross_attention_dim: int = 768, + add_upsample: bool = True, + upcast_attention: bool = False, + ): + super().__init__() + resnets = [] + attentions = [] + + is_first_block = in_channels == out_channels == temb_channels + is_middle_block = in_channels != out_channels + add_self_attention = True if is_first_block else False + + self.has_cross_attention = True + self.attention_head_dim = attention_head_dim + + # in_channels, and out_channels for the block (k-unet) + k_in_channels = out_channels if is_first_block else 2 * out_channels + k_out_channels = in_channels + + num_layers = num_layers - 1 + + for i in range(num_layers): + in_channels = k_in_channels if i == 0 else out_channels + groups = in_channels // resnet_group_size + groups_out = out_channels // resnet_group_size + + if is_middle_block and (i == num_layers - 1): + conv_2d_out_channels = k_out_channels + else: + conv_2d_out_channels = None + + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=out_channels, + conv_2d_out_channels=conv_2d_out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=groups, + groups_out=groups_out, + dropout=dropout, + non_linearity=resnet_act_fn, + time_embedding_norm="ada_group", + conv_shortcut_bias=False, + ) + ) + attentions.append( + KAttentionBlock( + k_out_channels if (i == num_layers - 1) else out_channels, + k_out_channels // attention_head_dim + if (i == num_layers - 1) + else out_channels // attention_head_dim, + attention_head_dim, + cross_attention_dim=cross_attention_dim, + temb_channels=temb_channels, + attention_bias=True, + add_self_attention=add_self_attention, + cross_attention_norm="layer_norm", + upcast_attention=upcast_attention, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.attentions = nn.ModuleList(attentions) + + if add_upsample: + self.upsamplers = nn.ModuleList([KUpsample2D()]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + res_hidden_states_tuple = res_hidden_states_tuple[-1] + if res_hidden_states_tuple is not None: + hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) + + for resnet, attn in zip(self.resnets, self.attentions): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + emb=temb, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + emb=temb, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +# can potentially later be renamed to `No-feed-forward` attention +class KAttentionBlock(nn.Module): + r""" + A basic Transformer block. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + attention_bias (`bool`, *optional*, defaults to `False`): + Configure if the attention layers should contain a bias parameter. + upcast_attention (`bool`, *optional*, defaults to `False`): + Set to `True` to upcast the attention computation to `float32`. + temb_channels (`int`, *optional*, defaults to 768): + The number of channels in the token embedding. + add_self_attention (`bool`, *optional*, defaults to `False`): + Set to `True` to add self-attention to the block. + cross_attention_norm (`str`, *optional*, defaults to `None`): + The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. + group_size (`int`, *optional*, defaults to 32): + The number of groups to separate the channels into for group normalization. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout: float = 0.0, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + upcast_attention: bool = False, + temb_channels: int = 768, # for ada_group_norm + add_self_attention: bool = False, + cross_attention_norm: Optional[str] = None, + group_size: int = 32, + ): + super().__init__() + self.add_self_attention = add_self_attention + + # 1. Self-Attn + if add_self_attention: + self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=None, + cross_attention_norm=None, + ) + + # 2. Cross-Attn + self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + cross_attention_norm=cross_attention_norm, + ) + + def _to_3d(self, hidden_states: torch.FloatTensor, height: int, weight: int) -> torch.FloatTensor: + return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1) + + def _to_4d(self, hidden_states: torch.FloatTensor, height: int, weight: int) -> torch.FloatTensor: + return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight) + + def forward( + self, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + # TODO: mark emb as non-optional (self.norm2 requires it). + # requires assessing impact of change to positional param interface. + emb: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + # 1. Self-Attention + if self.add_self_attention: + norm_hidden_states = self.norm1(hidden_states, emb) + + height, weight = norm_hidden_states.shape[2:] + norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) + + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + attn_output = self._to_4d(attn_output, height, weight) + + hidden_states = attn_output + hidden_states + + # 2. Cross-Attention/None + norm_hidden_states = self.norm2(hidden_states, emb) + + height, weight = norm_hidden_states.shape[2:] + norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask, + **cross_attention_kwargs, + ) + attn_output = self._to_4d(attn_output, height, weight) + + hidden_states = attn_output + hidden_states + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks_flax.py new file mode 100644 index 000000000..a4585dbc8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_blocks_flax.py @@ -0,0 +1,400 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import flax.linen as nn +import jax.numpy as jnp + +from ..attention_flax import FlaxTransformer2DModel +from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D + + +class FlaxCrossAttnDownBlock2D(nn.Module): + r""" + Cross Attention 2D Downsizing block - original architecture from Unet transformers: + https://arxiv.org/abs/2103.06104 + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of attention blocks layers + num_attention_heads (:obj:`int`, *optional*, defaults to 1): + Number of attention heads of each spatial transformer block + add_downsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add downsampling layer before each final output + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + dropout: float = 0.0 + num_layers: int = 1 + num_attention_heads: int = 1 + add_downsample: bool = True + use_linear_projection: bool = False + only_cross_attention: bool = False + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + dtype: jnp.dtype = jnp.float32 + transformer_layers_per_block: int = 1 + + def setup(self): + resnets = [] + attentions = [] + + for i in range(self.num_layers): + in_channels = self.in_channels if i == 0 else self.out_channels + + res_block = FlaxResnetBlock2D( + in_channels=in_channels, + out_channels=self.out_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + resnets.append(res_block) + + attn_block = FlaxTransformer2DModel( + in_channels=self.out_channels, + n_heads=self.num_attention_heads, + d_head=self.out_channels // self.num_attention_heads, + depth=self.transformer_layers_per_block, + use_linear_projection=self.use_linear_projection, + only_cross_attention=self.only_cross_attention, + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + attentions.append(attn_block) + + self.resnets = resnets + self.attentions = attentions + + if self.add_downsample: + self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb, deterministic=deterministic) + hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) + output_states += (hidden_states,) + + if self.add_downsample: + hidden_states = self.downsamplers_0(hidden_states) + output_states += (hidden_states,) + + return hidden_states, output_states + + +class FlaxDownBlock2D(nn.Module): + r""" + Flax 2D downsizing block + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of attention blocks layers + add_downsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add downsampling layer before each final output + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + dropout: float = 0.0 + num_layers: int = 1 + add_downsample: bool = True + dtype: jnp.dtype = jnp.float32 + + def setup(self): + resnets = [] + + for i in range(self.num_layers): + in_channels = self.in_channels if i == 0 else self.out_channels + + res_block = FlaxResnetBlock2D( + in_channels=in_channels, + out_channels=self.out_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + resnets.append(res_block) + self.resnets = resnets + + if self.add_downsample: + self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, temb, deterministic=True): + output_states = () + + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb, deterministic=deterministic) + output_states += (hidden_states,) + + if self.add_downsample: + hidden_states = self.downsamplers_0(hidden_states) + output_states += (hidden_states,) + + return hidden_states, output_states + + +class FlaxCrossAttnUpBlock2D(nn.Module): + r""" + Cross Attention 2D Upsampling block - original architecture from Unet transformers: + https://arxiv.org/abs/2103.06104 + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of attention blocks layers + num_attention_heads (:obj:`int`, *optional*, defaults to 1): + Number of attention heads of each spatial transformer block + add_upsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add upsampling layer before each final output + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + prev_output_channel: int + dropout: float = 0.0 + num_layers: int = 1 + num_attention_heads: int = 1 + add_upsample: bool = True + use_linear_projection: bool = False + only_cross_attention: bool = False + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + dtype: jnp.dtype = jnp.float32 + transformer_layers_per_block: int = 1 + + def setup(self): + resnets = [] + attentions = [] + + for i in range(self.num_layers): + res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels + resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels + + res_block = FlaxResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=self.out_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + resnets.append(res_block) + + attn_block = FlaxTransformer2DModel( + in_channels=self.out_channels, + n_heads=self.num_attention_heads, + d_head=self.out_channels // self.num_attention_heads, + depth=self.transformer_layers_per_block, + use_linear_projection=self.use_linear_projection, + only_cross_attention=self.only_cross_attention, + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + attentions.append(attn_block) + + self.resnets = resnets + self.attentions = attentions + + if self.add_upsample: + self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True): + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) + + hidden_states = resnet(hidden_states, temb, deterministic=deterministic) + hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) + + if self.add_upsample: + hidden_states = self.upsamplers_0(hidden_states) + + return hidden_states + + +class FlaxUpBlock2D(nn.Module): + r""" + Flax 2D upsampling block + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + prev_output_channel (:obj:`int`): + Output channels from the previous block + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of attention blocks layers + add_downsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add downsampling layer before each final output + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + prev_output_channel: int + dropout: float = 0.0 + num_layers: int = 1 + add_upsample: bool = True + dtype: jnp.dtype = jnp.float32 + + def setup(self): + resnets = [] + + for i in range(self.num_layers): + res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels + resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels + + res_block = FlaxResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=self.out_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + resnets.append(res_block) + + self.resnets = resnets + + if self.add_upsample: + self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True): + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) + + hidden_states = resnet(hidden_states, temb, deterministic=deterministic) + + if self.add_upsample: + hidden_states = self.upsamplers_0(hidden_states) + + return hidden_states + + +class FlaxUNetMidBlock2DCrossAttn(nn.Module): + r""" + Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104 + + Parameters: + in_channels (:obj:`int`): + Input channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of attention blocks layers + num_attention_heads (:obj:`int`, *optional*, defaults to 1): + Number of attention heads of each spatial transformer block + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + enable memory efficient attention https://arxiv.org/abs/2112.05682 + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + dropout: float = 0.0 + num_layers: int = 1 + num_attention_heads: int = 1 + use_linear_projection: bool = False + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + dtype: jnp.dtype = jnp.float32 + transformer_layers_per_block: int = 1 + + def setup(self): + # there is always at least one resnet + resnets = [ + FlaxResnetBlock2D( + in_channels=self.in_channels, + out_channels=self.in_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + ] + + attentions = [] + + for _ in range(self.num_layers): + attn_block = FlaxTransformer2DModel( + in_channels=self.in_channels, + n_heads=self.num_attention_heads, + d_head=self.in_channels // self.num_attention_heads, + depth=self.transformer_layers_per_block, + use_linear_projection=self.use_linear_projection, + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + attentions.append(attn_block) + + res_block = FlaxResnetBlock2D( + in_channels=self.in_channels, + out_channels=self.in_channels, + dropout_prob=self.dropout, + dtype=self.dtype, + ) + resnets.append(res_block) + + self.resnets = resnets + self.attentions = attentions + + def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) + hidden_states = resnet(hidden_states, temb, deterministic=deterministic) + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition.py new file mode 100644 index 000000000..cd5b48469 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition.py @@ -0,0 +1,1319 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint +import os +from ..nhwc_groupnorm.custom_gn import GN_NHWC +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin +from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ..activations import get_activation +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import ( + GaussianFourierProjection, + GLIGENTextBoundingboxProjection, + ImageHintTimeEmbedding, + ImageProjection, + ImageTimeEmbedding, + TextImageProjection, + TextImageTimeEmbedding, + TextTimeEmbedding, + TimestepEmbedding, + Timesteps, +) +from ..modeling_utils import ModelMixin +from .unet_2d_blocks import ( + get_down_block, + get_mid_block, + get_up_block, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class UNet2DConditionOutput(BaseOutput): + """ + The output of [`UNet2DConditionModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. + """ + + sample: torch.FloatTensor = None + + +class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. + flip_sin_to_cos (`bool`, *optional*, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or + `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): + Whether to include self-attention in the basic transformer blocks, see + [`~models.attention.BasicTransformerBlock`]. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling + blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): + The number of attention heads. If not defined, defaults to `attention_head_dim` + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + addition_time_embed_dim: (`int`, *optional*, defaults to `None`): + Dimension for the timestep embeddings. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + time_embedding_type (`str`, *optional*, defaults to `positional`): + The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. + time_embedding_dim (`int`, *optional*, defaults to `None`): + An optional override for the dimension of the projected time embedding. + time_embedding_act_fn (`str`, *optional*, defaults to `None`): + Optional activation function to use only once on the time embeddings before they are passed to the rest of + the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. + timestep_post_act (`str`, *optional*, defaults to `None`): + The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. + time_cond_proj_dim (`int`, *optional*, defaults to `None`): + The dimension of `cond_proj` layer in the timestep embedding. + conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. + conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. + projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when + `class_embed_type="projection"`. Required when `class_embed_type="projection"`. + class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time + embeddings with the class embeddings. + mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): + Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If + `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the + `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` + otherwise. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + center_input_sample: bool = False, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", + up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + layers_per_block: Union[int, Tuple[int]] = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + dropout: float = 0.0, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: Union[int, Tuple[int]] = 1280, + transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, + reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + time_embedding_type: str = "positional", + time_embedding_dim: Optional[int] = None, + time_embedding_act_fn: Optional[str] = None, + timestep_post_act: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + conv_in_kernel: int = 3, + conv_out_kernel: int = 3, + projection_class_embeddings_input_dim: Optional[int] = None, + attention_type: str = "default", + class_embeddings_concat: bool = False, + mid_block_only_cross_attention: Optional[bool] = None, + cross_attention_norm: Optional[str] = None, + addition_embed_type_num_heads: int = 64, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + self._check_config( + down_block_types=down_block_types, + up_block_types=up_block_types, + only_cross_attention=only_cross_attention, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + cross_attention_dim=cross_attention_dim, + transformer_layers_per_block=transformer_layers_per_block, + reverse_transformer_layers_per_block=reverse_transformer_layers_per_block, + attention_head_dim=attention_head_dim, + num_attention_heads=num_attention_heads, + ) + + # input + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim, timestep_input_dim = self._set_time_proj( + time_embedding_type, + block_out_channels=block_out_channels, + flip_sin_to_cos=flip_sin_to_cos, + freq_shift=freq_shift, + time_embedding_dim=time_embedding_dim, + ) + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + post_act_fn=timestep_post_act, + cond_proj_dim=time_cond_proj_dim, + ) + + self._set_encoder_hid_proj( + encoder_hid_dim_type, + cross_attention_dim=cross_attention_dim, + encoder_hid_dim=encoder_hid_dim, + ) + + # class embedding + self._set_class_embedding( + class_embed_type, + act_fn=act_fn, + num_class_embeds=num_class_embeds, + projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, + time_embed_dim=time_embed_dim, + timestep_input_dim=timestep_input_dim, + ) + + self._set_add_embedding( + addition_embed_type, + addition_embed_type_num_heads=addition_embed_type_num_heads, + addition_time_embed_dim=addition_time_embed_dim, + cross_attention_dim=cross_attention_dim, + encoder_hid_dim=encoder_hid_dim, + flip_sin_to_cos=flip_sin_to_cos, + freq_shift=freq_shift, + projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, + time_embed_dim=time_embed_dim, + ) + + if time_embedding_act_fn is None: + self.time_embed_act = None + else: + self.time_embed_act = get_activation(time_embedding_act_fn) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = only_cross_attention + + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = False + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + if class_embeddings_concat: + # The time embeddings are concatenated with the class embeddings. The dimension of the + # time embeddings passed to the down, middle, and up blocks is twice the dimension of the + # regular time embeddings + blocks_time_embed_dim = time_embed_dim * 2 + else: + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = get_mid_block( + mid_block_type, + temb_channels=blocks_time_embed_dim, + in_channels=block_out_channels[-1], + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + output_scale_factor=mid_block_scale_factor, + transformer_layers_per_block=transformer_layers_per_block[-1], + num_attention_heads=num_attention_heads[-1], + cross_attention_dim=cross_attention_dim[-1], + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + mid_block_only_cross_attention=mid_block_only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[-1], + dropout=dropout, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = ( + list(reversed(transformer_layers_per_block)) + if reverse_transformer_layers_per_block is None + else reverse_transformer_layers_per_block + ) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resolution_idx=i, + resnet_groups=norm_num_groups, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + self.fuse_gn_silu = True if int(os.environ.get("USE_NHWC_GN", 0)) else False + # out + if norm_num_groups is not None: + if self.fuse_gn_silu: + self.conv_norm_out=GN_NHWC(norm_num_groups, block_out_channels[0], activation="silu") + else: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) + self.conv_act = get_activation(act_fn) + + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = nn.Conv2d( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim) + + def _check_config( + self, + down_block_types: Tuple[str], + up_block_types: Tuple[str], + only_cross_attention: Union[bool, Tuple[bool]], + block_out_channels: Tuple[int], + layers_per_block: Union[int, Tuple[int]], + cross_attention_dim: Union[int, Tuple[int]], + transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]], + reverse_transformer_layers_per_block: bool, + attention_head_dim: int, + num_attention_heads: Optional[Union[int, Tuple[int]]], + ): + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: + for layer_number_per_block in transformer_layers_per_block: + if isinstance(layer_number_per_block, list): + raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") + + def _set_time_proj( + self, + time_embedding_type: str, + block_out_channels: int, + flip_sin_to_cos: bool, + freq_shift: float, + time_embedding_dim: int, + ) -> Tuple[int, int]: + if time_embedding_type == "fourier": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 + if time_embed_dim % 2 != 0: + raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") + self.time_proj = GaussianFourierProjection( + time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + ) + timestep_input_dim = time_embed_dim + elif time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + else: + raise ValueError( + f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." + ) + + return time_embed_dim, timestep_input_dim + + def _set_encoder_hid_proj( + self, + encoder_hid_dim_type: Optional[str], + cross_attention_dim: Union[int, Tuple[int]], + encoder_hid_dim: Optional[int], + ): + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 + self.encoder_hid_proj = ImageProjection( + image_embed_dim=encoder_hid_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type is not None: + raise ValueError( + f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." + ) + else: + self.encoder_hid_proj = None + + def _set_class_embedding( + self, + class_embed_type: Optional[str], + act_fn: str, + num_class_embeds: Optional[int], + projection_class_embeddings_input_dim: Optional[int], + time_embed_dim: int, + timestep_input_dim: int, + ): + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif class_embed_type == "simple_projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" + ) + self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + def _set_add_embedding( + self, + addition_embed_type: str, + addition_embed_type_num_heads: int, + addition_time_embed_dim: Optional[int], + flip_sin_to_cos: bool, + freq_shift: float, + cross_attention_dim: Optional[int], + encoder_hid_dim: Optional[int], + projection_class_embeddings_input_dim: Optional[int], + time_embed_dim: int, + ): + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif addition_embed_type == "image": + # Kandinsky 2.2 + self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type == "image_hint": + # Kandinsky 2.2 ControlNet + self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type is not None: + raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") + + def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int): + if attention_type in ["gated", "gated-text-image"]: + positive_len = 768 + if isinstance(cross_attention_dim, int): + positive_len = cross_attention_dim + elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list): + positive_len = cross_attention_dim[0] + + feature_type = "text-only" if attention_type == "gated" else "text-image" + self.position_net = GLIGENTextBoundingboxProjection( + positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type + ) + + @property + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + def disable_freeu(self): + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def unload_lora(self): + """Unloads LoRA weights.""" + deprecate( + "unload_lora", + "0.28.0", + "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", + ) + for module in self.modules(): + if hasattr(module, "set_lora_layer"): + module.set_lora_layer(None) + + def get_time_embed( + self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int] + ) -> Optional[torch.Tensor]: + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + return t_emb + + def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]: + class_emb = None + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # there might be better ways to encapsulate this. + class_labels = class_labels.to(dtype=sample.dtype) + + class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) + return class_emb + + def get_aug_embed( + self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any] + ) -> Optional[torch.Tensor]: + aug_emb = None + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + elif self.config.addition_embed_type == "text_image": + # Kandinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + + image_embs = added_cond_kwargs.get("image_embeds") + text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) + aug_emb = self.add_embedding(text_embs, image_embs) + elif self.config.addition_embed_type == "text_time": + # SDXL - style + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + elif self.config.addition_embed_type == "image": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + aug_emb = self.add_embedding(image_embs) + elif self.config.addition_embed_type == "image_hint": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + hint = added_cond_kwargs.get("hint") + aug_emb = self.add_embedding(image_embs, hint) + return aug_emb + + def process_encoder_hidden_states( + self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any] + ) -> torch.Tensor: + if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": + # Kadinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + image_embeds = self.encoder_hid_proj(image_embeds) + encoder_hidden_states = (encoder_hidden_states, image_embeds) + return encoder_hidden_states + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`UNet2DConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added to UNet long skip connections from down blocks to up blocks for + example from ControlNet side model(s) + mid_block_additional_residual (`torch.Tensor`, *optional*): + additional residual to be added to UNet mid block output, for example from ControlNet side model + down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + for dim in sample.shape[-2:]: + if dim % default_overall_up_factor != 0: + # Forward upsample size to force interpolation output size. + forward_upsample_size = True + break + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + t_emb = self.get_time_embed(sample=sample, timestep=timestep) + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) + if class_emb is not None: + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + aug_emb = self.get_aug_embed( + emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs + ) + if self.config.addition_embed_type == "image_hint": + aug_emb, hint = aug_emb + sample = torch.cat([sample, hint], dim=1) + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + encoder_hidden_states = self.process_encoder_hidden_states( + encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs + ) + + # 2. pre-process + sample = self.conv_in(sample) + + # 2.5 GLIGEN position net + if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: + cross_attention_kwargs = cross_attention_kwargs.copy() + gligen_args = cross_attention_kwargs.pop("gligen") + cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} + + # 3. down + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None + # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets + is_adapter = down_intrablock_additional_residuals is not None + # maintain backward compatibility for legacy usage, where + # T2I-Adapter and ControlNet both use down_block_additional_residuals arg + # but can only use one or the other + if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: + deprecate( + "T2I should not use down_block_additional_residuals", + "1.3.0", + "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ + and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ + for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", + standard_warn=False, + ) + down_intrablock_additional_residuals = down_block_additional_residuals + is_adapter = True + + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + # For t2i-adapter CrossAttnDownBlock2D + additional_residuals = {} + if is_adapter and len(down_intrablock_additional_residuals) > 0: + additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + if is_adapter and len(down_intrablock_additional_residuals) > 0: + sample += down_intrablock_additional_residuals.pop(0) + + down_block_res_samples += res_samples + + if is_controlnet: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = self.mid_block(sample, emb) + + # To support T2I-Adapter-XL + if ( + is_adapter + and len(down_intrablock_additional_residuals) > 0 + and sample.shape == down_intrablock_additional_residuals[0].shape + ): + sample += down_intrablock_additional_residuals.pop(0) + + if is_controlnet: + sample = sample + mid_block_additional_residual + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + if not self.fuse_gn_silu: + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition_flax.py new file mode 100644 index 000000000..a5ec2875c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_2d_condition_flax.py @@ -0,0 +1,453 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Tuple, Union + +import flax +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict + +from ...configuration_utils import ConfigMixin, flax_register_to_config +from ...utils import BaseOutput +from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps +from ..modeling_flax_utils import FlaxModelMixin +from .unet_2d_blocks_flax import ( + FlaxCrossAttnDownBlock2D, + FlaxCrossAttnUpBlock2D, + FlaxDownBlock2D, + FlaxUNetMidBlock2DCrossAttn, + FlaxUpBlock2D, +) + + +@flax.struct.dataclass +class FlaxUNet2DConditionOutput(BaseOutput): + """ + The output of [`FlaxUNet2DConditionModel`]. + + Args: + sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. + """ + + sample: jnp.ndarray + + +@flax_register_to_config +class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods + implemented for all models (such as downloading or saving). + + This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) + subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its + general usage and behavior. + + Inherent JAX features such as the following are supported: + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + sample_size (`int`, *optional*): + The size of the input sample. + in_channels (`int`, *optional*, defaults to 4): + The number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): + The number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): + The tuple of downsample blocks to use. + up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`. If `None`, the mid block layer is skipped. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): + The dimension of the attention heads. + num_attention_heads (`int` or `Tuple[int]`, *optional*): + The number of attention heads. + cross_attention_dim (`int`, *optional*, defaults to 768): + The dimension of the cross attention features. + dropout (`float`, *optional*, defaults to 0): + Dropout probability for down, up and bottleneck blocks. + flip_sin_to_cos (`bool`, *optional*, defaults to `True`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): + Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682). + split_head_dim (`bool`, *optional*, defaults to `False`): + Whether to split the head dimension into a new axis for the self-attention computation. In most cases, + enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. + """ + + sample_size: int = 32 + in_channels: int = 4 + out_channels: int = 4 + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ) + up_block_types: Tuple[str, ...] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn" + only_cross_attention: Union[bool, Tuple[bool]] = False + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) + layers_per_block: int = 2 + attention_head_dim: Union[int, Tuple[int, ...]] = 8 + num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None + cross_attention_dim: int = 1280 + dropout: float = 0.0 + use_linear_projection: bool = False + dtype: jnp.dtype = jnp.float32 + flip_sin_to_cos: bool = True + freq_shift: int = 0 + use_memory_efficient_attention: bool = False + split_head_dim: bool = False + transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 + addition_embed_type: Optional[str] = None + addition_time_embed_dim: Optional[int] = None + addition_embed_type_num_heads: int = 64 + projection_class_embeddings_input_dim: Optional[int] = None + + def init_weights(self, rng: jax.Array) -> FrozenDict: + # init input tensors + sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) + sample = jnp.zeros(sample_shape, dtype=jnp.float32) + timesteps = jnp.ones((1,), dtype=jnp.int32) + encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + added_cond_kwargs = None + if self.addition_embed_type == "text_time": + # we retrieve the expected `text_embeds_dim` by first checking if the architecture is a refiner + # or non-refiner architecture and then by "reverse-computing" from `projection_class_embeddings_input_dim` + is_refiner = ( + 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim + == self.config.projection_class_embeddings_input_dim + ) + num_micro_conditions = 5 if is_refiner else 6 + + text_embeds_dim = self.config.projection_class_embeddings_input_dim - ( + num_micro_conditions * self.config.addition_time_embed_dim + ) + + time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim + time_ids_dims = time_ids_channels // self.addition_time_embed_dim + added_cond_kwargs = { + "text_embeds": jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), + "time_ids": jnp.zeros((1, time_ids_dims), dtype=jnp.float32), + } + return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] + + def setup(self) -> None: + block_out_channels = self.block_out_channels + time_embed_dim = block_out_channels[0] * 4 + + if self.num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = self.num_attention_heads or self.attention_head_dim + + # input + self.conv_in = nn.Conv( + block_out_channels[0], + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + # time + self.time_proj = FlaxTimesteps( + block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift + ) + self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) + + only_cross_attention = self.only_cross_attention + if isinstance(only_cross_attention, bool): + only_cross_attention = (only_cross_attention,) * len(self.down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(self.down_block_types) + + # transformer layers per block + transformer_layers_per_block = self.transformer_layers_per_block + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(self.down_block_types) + + # addition embed types + if self.addition_embed_type is None: + self.add_embedding = None + elif self.addition_embed_type == "text_time": + if self.addition_time_embed_dim is None: + raise ValueError( + f"addition_embed_type {self.addition_embed_type} requires `addition_time_embed_dim` to not be None" + ) + self.add_time_proj = FlaxTimesteps(self.addition_time_embed_dim, self.flip_sin_to_cos, self.freq_shift) + self.add_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) + else: + raise ValueError(f"addition_embed_type: {self.addition_embed_type} must be None or `text_time`.") + + # down + down_blocks = [] + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(self.down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + if down_block_type == "CrossAttnDownBlock2D": + down_block = FlaxCrossAttnDownBlock2D( + in_channels=input_channel, + out_channels=output_channel, + dropout=self.dropout, + num_layers=self.layers_per_block, + transformer_layers_per_block=transformer_layers_per_block[i], + num_attention_heads=num_attention_heads[i], + add_downsample=not is_final_block, + use_linear_projection=self.use_linear_projection, + only_cross_attention=only_cross_attention[i], + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + else: + down_block = FlaxDownBlock2D( + in_channels=input_channel, + out_channels=output_channel, + dropout=self.dropout, + num_layers=self.layers_per_block, + add_downsample=not is_final_block, + dtype=self.dtype, + ) + + down_blocks.append(down_block) + self.down_blocks = down_blocks + + # mid + if self.config.mid_block_type == "UNetMidBlock2DCrossAttn": + self.mid_block = FlaxUNetMidBlock2DCrossAttn( + in_channels=block_out_channels[-1], + dropout=self.dropout, + num_attention_heads=num_attention_heads[-1], + transformer_layers_per_block=transformer_layers_per_block[-1], + use_linear_projection=self.use_linear_projection, + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + elif self.config.mid_block_type is None: + self.mid_block = None + else: + raise ValueError(f"Unexpected mid_block_type {self.config.mid_block_type}") + + # up + up_blocks = [] + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + only_cross_attention = list(reversed(only_cross_attention)) + output_channel = reversed_block_out_channels[0] + reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) + for i, up_block_type in enumerate(self.up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + is_final_block = i == len(block_out_channels) - 1 + + if up_block_type == "CrossAttnUpBlock2D": + up_block = FlaxCrossAttnUpBlock2D( + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + num_layers=self.layers_per_block + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + num_attention_heads=reversed_num_attention_heads[i], + add_upsample=not is_final_block, + dropout=self.dropout, + use_linear_projection=self.use_linear_projection, + only_cross_attention=only_cross_attention[i], + use_memory_efficient_attention=self.use_memory_efficient_attention, + split_head_dim=self.split_head_dim, + dtype=self.dtype, + ) + else: + up_block = FlaxUpBlock2D( + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + num_layers=self.layers_per_block + 1, + add_upsample=not is_final_block, + dropout=self.dropout, + dtype=self.dtype, + ) + + up_blocks.append(up_block) + prev_output_channel = output_channel + self.up_blocks = up_blocks + + # out + self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5) + self.conv_out = nn.Conv( + self.out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + def __call__( + self, + sample: jnp.ndarray, + timesteps: Union[jnp.ndarray, float, int], + encoder_hidden_states: jnp.ndarray, + added_cond_kwargs: Optional[Union[Dict, FrozenDict]] = None, + down_block_additional_residuals: Optional[Tuple[jnp.ndarray, ...]] = None, + mid_block_additional_residual: Optional[jnp.ndarray] = None, + return_dict: bool = True, + train: bool = False, + ) -> Union[FlaxUNet2DConditionOutput, Tuple[jnp.ndarray]]: + r""" + Args: + sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor + timestep (`jnp.ndarray` or `float` or `int`): timesteps + encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a + plain tuple. + train (`bool`, *optional*, defaults to `False`): + Use deterministic functions and disable dropout when not training. + + Returns: + [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: + [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. + When returning a tuple, the first element is the sample tensor. + """ + # 1. time + if not isinstance(timesteps, jnp.ndarray): + timesteps = jnp.array([timesteps], dtype=jnp.int32) + elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: + timesteps = timesteps.astype(dtype=jnp.float32) + timesteps = jnp.expand_dims(timesteps, 0) + + t_emb = self.time_proj(timesteps) + t_emb = self.time_embedding(t_emb) + + # additional embeddings + aug_emb = None + if self.addition_embed_type == "text_time": + if added_cond_kwargs is None: + raise ValueError( + f"Need to provide argument `added_cond_kwargs` for {self.__class__} when using `addition_embed_type={self.addition_embed_type}`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if text_embeds is None: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + if time_ids is None: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + # compute time embeds + time_embeds = self.add_time_proj(jnp.ravel(time_ids)) # (1, 6) => (6,) => (6, 256) + time_embeds = jnp.reshape(time_embeds, (text_embeds.shape[0], -1)) + add_embeds = jnp.concatenate([text_embeds, time_embeds], axis=-1) + aug_emb = self.add_embedding(add_embeds) + + t_emb = t_emb + aug_emb if aug_emb is not None else t_emb + + # 2. pre-process + sample = jnp.transpose(sample, (0, 2, 3, 1)) + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for down_block in self.down_blocks: + if isinstance(down_block, FlaxCrossAttnDownBlock2D): + sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) + else: + sample, res_samples = down_block(sample, t_emb, deterministic=not train) + down_block_res_samples += res_samples + + if down_block_additional_residuals is not None: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample += down_block_additional_residual + new_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) + + if mid_block_additional_residual is not None: + sample += mid_block_additional_residual + + # 5. up + for up_block in self.up_blocks: + res_samples = down_block_res_samples[-(self.layers_per_block + 1) :] + down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)] + if isinstance(up_block, FlaxCrossAttnUpBlock2D): + sample = up_block( + sample, + temb=t_emb, + encoder_hidden_states=encoder_hidden_states, + res_hidden_states_tuple=res_samples, + deterministic=not train, + ) + else: + sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train) + + # 6. post-process + sample = self.conv_norm_out(sample) + sample = nn.silu(sample) + sample = self.conv_out(sample) + sample = jnp.transpose(sample, (0, 3, 1, 2)) + + if not return_dict: + return (sample,) + + return FlaxUNet2DConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_blocks.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_blocks.py new file mode 100644 index 000000000..a48f1841c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_blocks.py @@ -0,0 +1,2405 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +from torch import nn + +from ...utils import deprecate, is_torch_version, logging +from ...utils.torch_utils import apply_freeu +from ..attention import Attention +from ..resnet import ( + Downsample2D, + ResnetBlock2D, + SpatioTemporalResBlock, + TemporalConvLayer, + Upsample2D, +) +from ..transformers.dual_transformer_2d import DualTransformer2DModel +from ..transformers.transformer_2d import Transformer2DModel +from ..transformers.transformer_temporal import ( + TransformerSpatioTemporalModel, + TransformerTemporalModel, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def get_down_block( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, + resnet_eps: float, + resnet_act_fn: str, + num_attention_heads: int, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + downsample_padding: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = True, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + temporal_num_attention_heads: int = 8, + temporal_max_seq_length: int = 32, + transformer_layers_per_block: int = 1, +) -> Union[ + "DownBlock3D", + "CrossAttnDownBlock3D", + "DownBlockMotion", + "CrossAttnDownBlockMotion", + "DownBlockSpatioTemporal", + "CrossAttnDownBlockSpatioTemporal", +]: + if down_block_type == "DownBlock3D": + return DownBlock3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "CrossAttnDownBlock3D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") + return CrossAttnDownBlock3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + if down_block_type == "DownBlockMotion": + return DownBlockMotion( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + temporal_num_attention_heads=temporal_num_attention_heads, + temporal_max_seq_length=temporal_max_seq_length, + ) + elif down_block_type == "CrossAttnDownBlockMotion": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockMotion") + return CrossAttnDownBlockMotion( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + temporal_num_attention_heads=temporal_num_attention_heads, + temporal_max_seq_length=temporal_max_seq_length, + ) + elif down_block_type == "DownBlockSpatioTemporal": + # added for SDV + return DownBlockSpatioTemporal( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + ) + elif down_block_type == "CrossAttnDownBlockSpatioTemporal": + # added for SDV + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockSpatioTemporal") + return CrossAttnDownBlockSpatioTemporal( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + add_downsample=add_downsample, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + ) + + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block( + up_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + add_upsample: bool, + resnet_eps: float, + resnet_act_fn: str, + num_attention_heads: int, + resolution_idx: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = True, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + temporal_num_attention_heads: int = 8, + temporal_cross_attention_dim: Optional[int] = None, + temporal_max_seq_length: int = 32, + transformer_layers_per_block: int = 1, + dropout: float = 0.0, +) -> Union[ + "UpBlock3D", + "CrossAttnUpBlock3D", + "UpBlockMotion", + "CrossAttnUpBlockMotion", + "UpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", +]: + if up_block_type == "UpBlock3D": + return UpBlock3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + resolution_idx=resolution_idx, + ) + elif up_block_type == "CrossAttnUpBlock3D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") + return CrossAttnUpBlock3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + resolution_idx=resolution_idx, + ) + if up_block_type == "UpBlockMotion": + return UpBlockMotion( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + resolution_idx=resolution_idx, + temporal_num_attention_heads=temporal_num_attention_heads, + temporal_max_seq_length=temporal_max_seq_length, + ) + elif up_block_type == "CrossAttnUpBlockMotion": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMotion") + return CrossAttnUpBlockMotion( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + resolution_idx=resolution_idx, + temporal_num_attention_heads=temporal_num_attention_heads, + temporal_max_seq_length=temporal_max_seq_length, + ) + elif up_block_type == "UpBlockSpatioTemporal": + # added for SDV + return UpBlockSpatioTemporal( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + resolution_idx=resolution_idx, + add_upsample=add_upsample, + ) + elif up_block_type == "CrossAttnUpBlockSpatioTemporal": + # added for SDV + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockSpatioTemporal") + return CrossAttnUpBlockSpatioTemporal( + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + add_upsample=add_upsample, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + resolution_idx=resolution_idx, + ) + + raise ValueError(f"{up_block_type} does not exist.") + + +class UNetMidBlock3DCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + dual_cross_attention: bool = False, + use_linear_projection: bool = True, + upcast_attention: bool = False, + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + temp_convs = [ + TemporalConvLayer( + in_channels, + in_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ] + attentions = [] + temp_attentions = [] + + for _ in range(num_layers): + attentions.append( + Transformer2DModel( + in_channels // num_attention_heads, + num_attention_heads, + in_channels=in_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + ) + temp_attentions.append( + TransformerTemporalModel( + in_channels // num_attention_heads, + num_attention_heads, + in_channels=in_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + temp_convs.append( + TemporalConvLayer( + in_channels, + in_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.temp_convs = nn.ModuleList(temp_convs) + self.attentions = nn.ModuleList(attentions) + self.temp_attentions = nn.ModuleList(temp_attentions) + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) + for attn, temp_attn, resnet, temp_conv in zip( + self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:] + ): + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + hidden_states = temp_attn( + hidden_states, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + hidden_states = resnet(hidden_states, temb) + hidden_states = temp_conv(hidden_states, num_frames=num_frames) + + return hidden_states + + +class CrossAttnDownBlock3D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + add_downsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + ): + super().__init__() + resnets = [] + attentions = [] + temp_attentions = [] + temp_convs = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + temp_convs.append( + TemporalConvLayer( + out_channels, + out_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ) + attentions.append( + Transformer2DModel( + out_channels // num_attention_heads, + num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + ) + ) + temp_attentions.append( + TransformerTemporalModel( + out_channels // num_attention_heads, + num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.resnets = nn.ModuleList(resnets) + self.temp_convs = nn.ModuleList(temp_convs) + self.attentions = nn.ModuleList(attentions) + self.temp_attentions = nn.ModuleList(temp_attentions) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=downsample_padding, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + cross_attention_kwargs: Dict[str, Any] = None, + ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + # TODO(Patrick, William) - attention mask is not used + output_states = () + + for resnet, temp_conv, attn, temp_attn in zip( + self.resnets, self.temp_convs, self.attentions, self.temp_attentions + ): + hidden_states = resnet(hidden_states, temb) + hidden_states = temp_conv(hidden_states, num_frames=num_frames) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + hidden_states = temp_attn( + hidden_states, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + output_states += (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states += (hidden_states,) + + return hidden_states, output_states + + +class DownBlock3D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + temp_convs = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + temp_convs.append( + TemporalConvLayer( + out_channels, + out_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.temp_convs = nn.ModuleList(temp_convs) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=downsample_padding, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + for resnet, temp_conv in zip(self.resnets, self.temp_convs): + hidden_states = resnet(hidden_states, temb) + hidden_states = temp_conv(hidden_states, num_frames=num_frames) + + output_states += (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states += (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnUpBlock3D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resolution_idx: Optional[int] = None, + ): + super().__init__() + resnets = [] + temp_convs = [] + attentions = [] + temp_attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + temp_convs.append( + TemporalConvLayer( + out_channels, + out_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ) + attentions.append( + Transformer2DModel( + out_channels // num_attention_heads, + num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + ) + ) + temp_attentions.append( + TransformerTemporalModel( + out_channels // num_attention_heads, + num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.resnets = nn.ModuleList(resnets) + self.temp_convs = nn.ModuleList(temp_convs) + self.attentions = nn.ModuleList(attentions) + self.temp_attentions = nn.ModuleList(temp_attentions) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + cross_attention_kwargs: Dict[str, Any] = None, + ) -> torch.FloatTensor: + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + # TODO(Patrick, William) - attention mask is not used + for resnet, temp_conv, attn, temp_attn in zip( + self.resnets, self.temp_convs, self.attentions, self.temp_attentions + ): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = temp_conv(hidden_states, num_frames=num_frames) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + hidden_states = temp_attn( + hidden_states, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class UpBlock3D(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + resolution_idx: Optional[int] = None, + ): + super().__init__() + resnets = [] + temp_convs = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + temp_convs.append( + TemporalConvLayer( + out_channels, + out_channels, + dropout=0.1, + norm_num_groups=resnet_groups, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.temp_convs = nn.ModuleList(temp_convs) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + num_frames: int = 1, + ) -> torch.FloatTensor: + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + for resnet, temp_conv in zip(self.resnets, self.temp_convs): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = temp_conv(hidden_states, num_frames=num_frames) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class DownBlockMotion(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + temporal_num_attention_heads: int = 1, + temporal_cross_attention_dim: Optional[int] = None, + temporal_max_seq_length: int = 32, + ): + super().__init__() + resnets = [] + motion_modules = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + motion_modules.append( + TransformerTemporalModel( + num_attention_heads=temporal_num_attention_heads, + in_channels=out_channels, + norm_num_groups=resnet_groups, + cross_attention_dim=temporal_cross_attention_dim, + attention_bias=False, + activation_fn="geglu", + positional_embeddings="sinusoidal", + num_positional_embeddings=temporal_max_seq_length, + attention_head_dim=out_channels // temporal_num_attention_heads, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.motion_modules = nn.ModuleList(motion_modules) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=downsample_padding, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + *args, + **kwargs, + ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + output_states = () + + blocks = zip(self.resnets, self.motion_modules) + for resnet, motion_module in blocks: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = motion_module(hidden_states, num_frames=num_frames)[0] + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnDownBlockMotion(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + add_downsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + temporal_cross_attention_dim: Optional[int] = None, + temporal_num_attention_heads: int = 8, + temporal_max_seq_length: int = 32, + ): + super().__init__() + resnets = [] + attentions = [] + motion_modules = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + + motion_modules.append( + TransformerTemporalModel( + num_attention_heads=temporal_num_attention_heads, + in_channels=out_channels, + norm_num_groups=resnet_groups, + cross_attention_dim=temporal_cross_attention_dim, + attention_bias=False, + activation_fn="geglu", + positional_embeddings="sinusoidal", + num_positional_embeddings=temporal_max_seq_length, + attention_head_dim=out_channels // temporal_num_attention_heads, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.motion_modules = nn.ModuleList(motion_modules) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=downsample_padding, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + additional_residuals: Optional[torch.FloatTensor] = None, + ): + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + output_states = () + + blocks = list(zip(self.resnets, self.attentions, self.motion_modules)) + for i, (resnet, attn, motion_module) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = motion_module( + hidden_states, + num_frames=num_frames, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnUpBlockMotion(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + temporal_cross_attention_dim: Optional[int] = None, + temporal_num_attention_heads: int = 8, + temporal_max_seq_length: int = 32, + ): + super().__init__() + resnets = [] + attentions = [] + motion_modules = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + motion_modules.append( + TransformerTemporalModel( + num_attention_heads=temporal_num_attention_heads, + in_channels=out_channels, + norm_num_groups=resnet_groups, + cross_attention_dim=temporal_cross_attention_dim, + attention_bias=False, + activation_fn="geglu", + positional_embeddings="sinusoidal", + num_positional_embeddings=temporal_max_seq_length, + attention_head_dim=out_channels // temporal_num_attention_heads, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.motion_modules = nn.ModuleList(motion_modules) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + blocks = zip(self.resnets, self.attentions, self.motion_modules) + for resnet, attn, motion_module in blocks: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = motion_module( + hidden_states, + num_frames=num_frames, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class UpBlockMotion(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + temporal_norm_num_groups: int = 32, + temporal_cross_attention_dim: Optional[int] = None, + temporal_num_attention_heads: int = 8, + temporal_max_seq_length: int = 32, + ): + super().__init__() + resnets = [] + motion_modules = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + motion_modules.append( + TransformerTemporalModel( + num_attention_heads=temporal_num_attention_heads, + in_channels=out_channels, + norm_num_groups=temporal_norm_num_groups, + cross_attention_dim=temporal_cross_attention_dim, + attention_bias=False, + activation_fn="geglu", + positional_embeddings="sinusoidal", + num_positional_embeddings=temporal_max_seq_length, + attention_head_dim=out_channels // temporal_num_attention_heads, + ) + ) + + self.resnets = nn.ModuleList(resnets) + self.motion_modules = nn.ModuleList(motion_modules) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size=None, + num_frames: int = 1, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + blocks = zip(self.resnets, self.motion_modules) + + for resnet, motion_module in blocks: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = motion_module(hidden_states, num_frames=num_frames)[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +class UNetMidBlockCrossAttnMotion(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + dual_cross_attention: float = False, + use_linear_projection: float = False, + upcast_attention: float = False, + attention_type: str = "default", + temporal_num_attention_heads: int = 1, + temporal_cross_attention_dim: Optional[int] = None, + temporal_max_seq_length: int = 32, + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + motion_modules = [] + + for _ in range(num_layers): + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + motion_modules.append( + TransformerTemporalModel( + num_attention_heads=temporal_num_attention_heads, + attention_head_dim=in_channels // temporal_num_attention_heads, + in_channels=in_channels, + norm_num_groups=resnet_groups, + cross_attention_dim=temporal_cross_attention_dim, + attention_bias=False, + positional_embeddings="sinusoidal", + num_positional_embeddings=temporal_max_seq_length, + activation_fn="geglu", + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.motion_modules = nn.ModuleList(motion_modules) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + num_frames: int = 1, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + hidden_states = self.resnets[0](hidden_states, temb) + + blocks = zip(self.attentions, self.resnets[1:], self.motion_modules) + for attn, resnet, motion_module in blocks: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(motion_module), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = motion_module( + hidden_states, + num_frames=num_frames, + )[0] + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class MidBlockTemporalDecoder(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + attention_head_dim: int = 512, + num_layers: int = 1, + upcast_attention: bool = False, + ): + super().__init__() + + resnets = [] + attentions = [] + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + resnets.append( + SpatioTemporalResBlock( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=None, + eps=1e-6, + temporal_eps=1e-5, + merge_factor=0.0, + merge_strategy="learned", + switch_spatial_to_temporal_mix=True, + ) + ) + + attentions.append( + Attention( + query_dim=in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + eps=1e-6, + upcast_attention=upcast_attention, + norm_num_groups=32, + bias=True, + residual_connection=True, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward( + self, + hidden_states: torch.FloatTensor, + image_only_indicator: torch.FloatTensor, + ): + hidden_states = self.resnets[0]( + hidden_states, + image_only_indicator=image_only_indicator, + ) + for resnet, attn in zip(self.resnets[1:], self.attentions): + hidden_states = attn(hidden_states) + hidden_states = resnet( + hidden_states, + image_only_indicator=image_only_indicator, + ) + + return hidden_states + + +class UpBlockTemporalDecoder(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + num_layers: int = 1, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + + resnets.append( + SpatioTemporalResBlock( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=None, + eps=1e-6, + temporal_eps=1e-5, + merge_factor=0.0, + merge_strategy="learned", + switch_spatial_to_temporal_mix=True, + ) + ) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + def forward( + self, + hidden_states: torch.FloatTensor, + image_only_indicator: torch.FloatTensor, + ) -> torch.FloatTensor: + for resnet in self.resnets: + hidden_states = resnet( + hidden_states, + image_only_indicator=image_only_indicator, + ) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class UNetMidBlockSpatioTemporal(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + # support for variable transformer layers per block + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + # there is always at least one resnet + resnets = [ + SpatioTemporalResBlock( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=1e-5, + ) + ] + attentions = [] + + for i in range(num_layers): + attentions.append( + TransformerSpatioTemporalModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + ) + ) + + resnets.append( + SpatioTemporalResBlock( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=1e-5, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + hidden_states = self.resnets[0]( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if self.training and self.gradient_checkpointing: # TODO + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + **ckpt_kwargs, + ) + else: + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + hidden_states = resnet( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + + return hidden_states + + +class DownBlockSpatioTemporal(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + num_layers: int = 1, + add_downsample: bool = True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + SpatioTemporalResBlock( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=1e-5, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + ) + else: + hidden_states = resnet( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnDownBlockSpatioTemporal(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + add_downsample: bool = True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + SpatioTemporalResBlock( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=1e-6, + ) + ) + attentions.append( + TransformerSpatioTemporalModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=1, + name="op", + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + blocks = list(zip(self.resnets, self.attentions)) + for resnet, attn in blocks: + if self.training and self.gradient_checkpointing: # TODO + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + **ckpt_kwargs, + ) + + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + else: + hidden_states = resnet( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class UpBlockSpatioTemporal(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + num_layers: int = 1, + resnet_eps: float = 1e-6, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + SpatioTemporalResBlock( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + use_reentrant=False, + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + ) + else: + hidden_states = resnet( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class CrossAttnUpBlockSpatioTemporal(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + SpatioTemporalResBlock( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + ) + ) + attentions.append( + TransformerSpatioTemporalModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + image_only_indicator: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: # TODO + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + image_only_indicator, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + else: + hidden_states = resnet( + hidden_states, + temb, + image_only_indicator=image_only_indicator, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_condition.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_condition.py new file mode 100644 index 000000000..b7641a96a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_3d_condition.py @@ -0,0 +1,753 @@ +# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. +# Copyright 2024 The ModelScope Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...utils import BaseOutput, deprecate, logging +from ..activations import get_activation +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from ..transformers.transformer_temporal import TransformerTemporalModel +from .unet_3d_blocks import ( + CrossAttnDownBlock3D, + CrossAttnUpBlock3D, + DownBlock3D, + UNetMidBlock3DCrossAttn, + UpBlock3D, + get_down_block, + get_up_block, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class UNet3DConditionOutput(BaseOutput): + """ + The output of [`UNet3DConditionModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, num_frames, height, width)`): + The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. + """ + + sample: torch.FloatTensor + + +class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + A conditional 3D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D")`): + The tuple of downsample blocks to use. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D")`): + The tuple of upsample blocks to use. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int`, *optional*, defaults to 1024): The dimension of the cross attention features. + attention_head_dim (`int`, *optional*, defaults to 64): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): The number of attention heads. + """ + + _supports_gradient_checkpointing = False + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock3D", + "CrossAttnDownBlock3D", + "CrossAttnDownBlock3D", + "DownBlock3D", + ), + up_block_types: Tuple[str, ...] = ( + "UpBlock3D", + "CrossAttnUpBlock3D", + "CrossAttnUpBlock3D", + "CrossAttnUpBlock3D", + ), + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1024, + attention_head_dim: Union[int, Tuple[int]] = 64, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise NotImplementedError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + # input + conv_in_kernel = 3 + conv_out_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], True, 0) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + ) + + self.transformer_in = TransformerTemporalModel( + num_attention_heads=8, + attention_head_dim=attention_head_dim, + in_channels=block_out_channels[0], + num_layers=1, + norm_num_groups=norm_num_groups, + ) + + # class embedding + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + dual_cross_attention=False, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock3DCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=False, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=False, + resolution_idx=i, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + self.conv_act = get_activation("silu") + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = nn.Conv2d( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: + """ + Sets the attention processor to use [feed forward + chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). + + Parameters: + chunk_size (`int`, *optional*): + The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually + over each tensor of dim=`dim`. + dim (`int`, *optional*, defaults to `0`): + The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) + or dim=1 (sequence length). + """ + if dim not in [0, 1]: + raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") + + # By default chunk size is 1 + chunk_size = chunk_size or 1 + + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, chunk_size, dim) + + def disable_forward_chunking(self): + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, None, 0) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): + module.gradient_checkpointing = value + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu + def enable_freeu(self, s1, s2, b1, b2): + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu + def disable_freeu(self): + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unload_lora + def unload_lora(self): + """Unloads LoRA weights.""" + deprecate( + "unload_lora", + "0.28.0", + "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", + ) + for module in self.modules(): + if hasattr(module, "set_lora_layer"): + module.set_lora_layer(None) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet3DConditionOutput, Tuple[torch.FloatTensor]]: + r""" + The [`UNet3DConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, num_channels, num_frames, height, width`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + + Returns: + [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # prepare attention_mask + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + num_frames = sample.shape[2] + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=self.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + emb = emb.repeat_interleave(repeats=num_frames, dim=0) + encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0) + + # 2. pre-process + sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) + sample = self.conv_in(sample) + + sample = self.transformer_in( + sample, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) + + down_block_res_samples += res_samples + + if down_block_additional_residuals is not None: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if mid_block_additional_residual is not None: + sample = sample + mid_block_additional_residual + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + upsample_size=upsample_size, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + num_frames=num_frames, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + + sample = self.conv_out(sample) + + # reshape to (batch, channel, framerate, width, height) + sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) + + if not return_dict: + return (sample,) + + return UNet3DConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_i2vgen_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_i2vgen_xl.py new file mode 100644 index 000000000..5c5c6a2cc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_i2vgen_xl.py @@ -0,0 +1,724 @@ +# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...utils import logging +from ..activations import get_activation +from ..attention import Attention, FeedForward +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from ..transformers.transformer_temporal import TransformerTemporalModel +from .unet_3d_blocks import ( + CrossAttnDownBlock3D, + CrossAttnUpBlock3D, + DownBlock3D, + UNetMidBlock3DCrossAttn, + UpBlock3D, + get_down_block, + get_up_block, +) +from .unet_3d_condition import UNet3DConditionOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class I2VGenXLTransformerTemporalEncoder(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + activation_fn: str = "geglu", + upcast_attention: bool = False, + ff_inner_dim: Optional[int] = None, + dropout: int = 0.0, + ): + super().__init__() + self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=1e-5) + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=False, + upcast_attention=upcast_attention, + out_bias=True, + ) + self.ff = FeedForward( + dim, + dropout=dropout, + activation_fn=activation_fn, + final_dropout=False, + inner_dim=ff_inner_dim, + bias=True, + ) + + def forward( + self, + hidden_states: torch.FloatTensor, + ) -> torch.FloatTensor: + norm_hidden_states = self.norm1(hidden_states) + attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) + hidden_states = attn_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + ff_output = self.ff(hidden_states) + hidden_states = ff_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + return hidden_states + + +class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + I2VGenXL UNet. It is a conditional 3D UNet model that takes a noisy sample, conditional state, and a timestep + and returns a sample-shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. + attention_head_dim (`int`, *optional*, defaults to 64): Attention head dim. + num_attention_heads (`int`, *optional*): The number of attention heads. + """ + + _supports_gradient_checkpointing = False + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlock3D", + "CrossAttnDownBlock3D", + "CrossAttnDownBlock3D", + "DownBlock3D", + ), + up_block_types: Tuple[str, ...] = ( + "UpBlock3D", + "CrossAttnUpBlock3D", + "CrossAttnUpBlock3D", + "CrossAttnUpBlock3D", + ), + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + norm_num_groups: Optional[int] = 32, + cross_attention_dim: int = 1024, + attention_head_dim: Union[int, Tuple[int]] = 64, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + ): + super().__init__() + + # When we first integrated the UNet into the library, we didn't have `attention_head_dim`. As a consequence + # of that, we used `num_attention_heads` for arguments that actually denote attention head dimension. This + # is why we ignore `num_attention_heads` and calculate it from `attention_head_dims` below. + # This is still an incorrect way of calculating `num_attention_heads` but we need to stick to it + # without running proper depcrecation cycles for the {down,mid,up} blocks which are a + # part of the public API. + num_attention_heads = attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + # input + self.conv_in = nn.Conv2d(in_channels + in_channels, block_out_channels[0], kernel_size=3, padding=1) + + self.transformer_in = TransformerTemporalModel( + num_attention_heads=8, + attention_head_dim=num_attention_heads, + in_channels=block_out_channels[0], + num_layers=1, + norm_num_groups=norm_num_groups, + ) + + # image embedding + self.image_latents_proj_in = nn.Sequential( + nn.Conv2d(4, in_channels * 4, 3, padding=1), + nn.SiLU(), + nn.Conv2d(in_channels * 4, in_channels * 4, 3, stride=1, padding=1), + nn.SiLU(), + nn.Conv2d(in_channels * 4, in_channels, 3, stride=1, padding=1), + ) + self.image_latents_temporal_encoder = I2VGenXLTransformerTemporalEncoder( + dim=in_channels, + num_attention_heads=2, + ff_inner_dim=in_channels * 4, + attention_head_dim=in_channels, + activation_fn="gelu", + ) + self.image_latents_context_embedding = nn.Sequential( + nn.Conv2d(4, in_channels * 8, 3, padding=1), + nn.SiLU(), + nn.AdaptiveAvgPool2d((32, 32)), + nn.Conv2d(in_channels * 8, in_channels * 16, 3, stride=2, padding=1), + nn.SiLU(), + nn.Conv2d(in_channels * 16, cross_attention_dim, 3, stride=2, padding=1), + ) + + # other embeddings -- time, context, fps, etc. + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], True, 0) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn="silu") + self.context_embedding = nn.Sequential( + nn.Linear(cross_attention_dim, time_embed_dim), + nn.SiLU(), + nn.Linear(time_embed_dim, cross_attention_dim * in_channels), + ) + self.fps_embedding = nn.Sequential( + nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim) + ) + + # blocks + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=1e-05, + resnet_act_fn="silu", + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + downsample_padding=1, + dual_cross_attention=False, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock3DCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=1e-05, + resnet_act_fn="silu", + output_scale_factor=1, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=False, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=1e-05, + resnet_act_fn="silu", + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=False, + resolution_idx=i, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-05) + self.conv_act = get_activation("silu") + self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking + def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: + """ + Sets the attention processor to use [feed forward + chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). + + Parameters: + chunk_size (`int`, *optional*): + The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually + over each tensor of dim=`dim`. + dim (`int`, *optional*, defaults to `0`): + The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) + or dim=1 (sequence length). + """ + if dim not in [0, 1]: + raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") + + # By default chunk size is 1 + chunk_size = chunk_size or 1 + + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, chunk_size, dim) + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking + def disable_forward_chunking(self): + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, None, 0) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel._set_gradient_checkpointing + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): + module.gradient_checkpointing = value + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu + def enable_freeu(self, s1, s2, b1, b2): + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu + def disable_freeu(self): + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + fps: torch.Tensor, + image_latents: torch.Tensor, + image_embeddings: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, + ) -> Union[UNet3DConditionOutput, Tuple[torch.FloatTensor]]: + r""" + The [`I2VGenXLUNet`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + fps (`torch.Tensor`): Frames per second for the video being generated. Used as a "micro-condition". + image_latents (`torch.FloatTensor`): Image encodings from the VAE. + image_embeddings (`torch.FloatTensor`): Projection embeddings of the conditioning image computed with a vision encoder. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain + tuple. + + Returns: + [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + batch_size, channels, num_frames, height, width = sample.shape + + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass `timesteps` as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timesteps, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=self.dtype) + t_emb = self.time_embedding(t_emb, timestep_cond) + + # 2. FPS + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + fps = fps.expand(fps.shape[0]) + fps_emb = self.fps_embedding(self.time_proj(fps).to(dtype=self.dtype)) + + # 3. time + FPS embeddings. + emb = t_emb + fps_emb + emb = emb.repeat_interleave(repeats=num_frames, dim=0) + + # 4. context embeddings. + # The context embeddings consist of both text embeddings from the input prompt + # AND the image embeddings from the input image. For images, both VAE encodings + # and the CLIP image embeddings are incorporated. + # So the final `context_embeddings` becomes the query for cross-attention. + context_emb = sample.new_zeros(batch_size, 0, self.config.cross_attention_dim) + context_emb = torch.cat([context_emb, encoder_hidden_states], dim=1) + + image_latents_for_context_embds = image_latents[:, :, :1, :] + image_latents_context_embs = image_latents_for_context_embds.permute(0, 2, 1, 3, 4).reshape( + image_latents_for_context_embds.shape[0] * image_latents_for_context_embds.shape[2], + image_latents_for_context_embds.shape[1], + image_latents_for_context_embds.shape[3], + image_latents_for_context_embds.shape[4], + ) + image_latents_context_embs = self.image_latents_context_embedding(image_latents_context_embs) + + _batch_size, _channels, _height, _width = image_latents_context_embs.shape + image_latents_context_embs = image_latents_context_embs.permute(0, 2, 3, 1).reshape( + _batch_size, _height * _width, _channels + ) + context_emb = torch.cat([context_emb, image_latents_context_embs], dim=1) + + image_emb = self.context_embedding(image_embeddings) + image_emb = image_emb.view(-1, self.config.in_channels, self.config.cross_attention_dim) + context_emb = torch.cat([context_emb, image_emb], dim=1) + context_emb = context_emb.repeat_interleave(repeats=num_frames, dim=0) + + image_latents = image_latents.permute(0, 2, 1, 3, 4).reshape( + image_latents.shape[0] * image_latents.shape[2], + image_latents.shape[1], + image_latents.shape[3], + image_latents.shape[4], + ) + image_latents = self.image_latents_proj_in(image_latents) + image_latents = ( + image_latents[None, :] + .reshape(batch_size, num_frames, channels, height, width) + .permute(0, 3, 4, 1, 2) + .reshape(batch_size * height * width, num_frames, channels) + ) + image_latents = self.image_latents_temporal_encoder(image_latents) + image_latents = image_latents.reshape(batch_size, height, width, num_frames, channels).permute(0, 4, 3, 1, 2) + + # 5. pre-process + sample = torch.cat([sample, image_latents], dim=1) + sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) + sample = self.conv_in(sample) + sample = self.transformer_in( + sample, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # 6. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=context_emb, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) + + down_block_res_samples += res_samples + + # 7. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=context_emb, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + # 8. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=context_emb, + upsample_size=upsample_size, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + num_frames=num_frames, + ) + + # 9. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + + sample = self.conv_out(sample) + + # reshape to (batch, channel, framerate, width, height) + sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) + + if not return_dict: + return (sample,) + + return UNet3DConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_kandinsky3.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_kandinsky3.py new file mode 100644 index 000000000..b981c8e17 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_kandinsky3.py @@ -0,0 +1,535 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Dict, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput, logging +from ..attention_processor import Attention, AttentionProcessor, AttnProcessor +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class Kandinsky3UNetOutput(BaseOutput): + sample: torch.FloatTensor = None + + +class Kandinsky3EncoderProj(nn.Module): + def __init__(self, encoder_hid_dim, cross_attention_dim): + super().__init__() + self.projection_linear = nn.Linear(encoder_hid_dim, cross_attention_dim, bias=False) + self.projection_norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, x): + x = self.projection_linear(x) + x = self.projection_norm(x) + return x + + +class Kandinsky3UNet(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + in_channels: int = 4, + time_embedding_dim: int = 1536, + groups: int = 32, + attention_head_dim: int = 64, + layers_per_block: Union[int, Tuple[int]] = 3, + block_out_channels: Tuple[int] = (384, 768, 1536, 3072), + cross_attention_dim: Union[int, Tuple[int]] = 4096, + encoder_hid_dim: int = 4096, + ): + super().__init__() + + # TOOD(Yiyi): Give better name and put into config for the following 4 parameters + expansion_ratio = 4 + compression_ratio = 2 + add_cross_attention = (False, True, True, True) + add_self_attention = (False, True, True, True) + + out_channels = in_channels + init_channels = block_out_channels[0] // 2 + self.time_proj = Timesteps(init_channels, flip_sin_to_cos=False, downscale_freq_shift=1) + + self.time_embedding = TimestepEmbedding( + init_channels, + time_embedding_dim, + ) + + self.add_time_condition = Kandinsky3AttentionPooling( + time_embedding_dim, cross_attention_dim, attention_head_dim + ) + + self.conv_in = nn.Conv2d(in_channels, init_channels, kernel_size=3, padding=1) + + self.encoder_hid_proj = Kandinsky3EncoderProj(encoder_hid_dim, cross_attention_dim) + + hidden_dims = [init_channels] + list(block_out_channels) + in_out_dims = list(zip(hidden_dims[:-1], hidden_dims[1:])) + text_dims = [cross_attention_dim if is_exist else None for is_exist in add_cross_attention] + num_blocks = len(block_out_channels) * [layers_per_block] + layer_params = [num_blocks, text_dims, add_self_attention] + rev_layer_params = map(reversed, layer_params) + + cat_dims = [] + self.num_levels = len(in_out_dims) + self.down_blocks = nn.ModuleList([]) + for level, ((in_dim, out_dim), res_block_num, text_dim, self_attention) in enumerate( + zip(in_out_dims, *layer_params) + ): + down_sample = level != (self.num_levels - 1) + cat_dims.append(out_dim if level != (self.num_levels - 1) else 0) + self.down_blocks.append( + Kandinsky3DownSampleBlock( + in_dim, + out_dim, + time_embedding_dim, + text_dim, + res_block_num, + groups, + attention_head_dim, + expansion_ratio, + compression_ratio, + down_sample, + self_attention, + ) + ) + + self.up_blocks = nn.ModuleList([]) + for level, ((out_dim, in_dim), res_block_num, text_dim, self_attention) in enumerate( + zip(reversed(in_out_dims), *rev_layer_params) + ): + up_sample = level != 0 + self.up_blocks.append( + Kandinsky3UpSampleBlock( + in_dim, + cat_dims.pop(), + out_dim, + time_embedding_dim, + text_dim, + res_block_num, + groups, + attention_head_dim, + expansion_ratio, + compression_ratio, + up_sample, + self_attention, + ) + ) + + self.conv_norm_out = nn.GroupNorm(groups, init_channels) + self.conv_act_out = nn.SiLU() + self.conv_out = nn.Conv2d(init_channels, out_channels, kernel_size=3, padding=1) + + @property + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "set_processor"): + processors[f"{name}.processor"] = module.processor + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + self.set_attn_processor(AttnProcessor()) + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def forward(self, sample, timestep, encoder_hidden_states=None, encoder_attention_mask=None, return_dict=True): + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + if not torch.is_tensor(timestep): + dtype = torch.float32 if isinstance(timestep, float) else torch.int32 + timestep = torch.tensor([timestep], dtype=dtype, device=sample.device) + elif len(timestep.shape) == 0: + timestep = timestep[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = timestep.expand(sample.shape[0]) + time_embed_input = self.time_proj(timestep).to(sample.dtype) + time_embed = self.time_embedding(time_embed_input) + + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) + + if encoder_hidden_states is not None: + time_embed = self.add_time_condition(time_embed, encoder_hidden_states, encoder_attention_mask) + + hidden_states = [] + sample = self.conv_in(sample) + for level, down_sample in enumerate(self.down_blocks): + sample = down_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) + if level != self.num_levels - 1: + hidden_states.append(sample) + + for level, up_sample in enumerate(self.up_blocks): + if level != 0: + sample = torch.cat([sample, hidden_states.pop()], dim=1) + sample = up_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) + + sample = self.conv_norm_out(sample) + sample = self.conv_act_out(sample) + sample = self.conv_out(sample) + + if not return_dict: + return (sample,) + return Kandinsky3UNetOutput(sample=sample) + + +class Kandinsky3UpSampleBlock(nn.Module): + def __init__( + self, + in_channels, + cat_dim, + out_channels, + time_embed_dim, + context_dim=None, + num_blocks=3, + groups=32, + head_dim=64, + expansion_ratio=4, + compression_ratio=2, + up_sample=True, + self_attention=True, + ): + super().__init__() + up_resolutions = [[None, True if up_sample else None, None, None]] + [[None] * 4] * (num_blocks - 1) + hidden_channels = ( + [(in_channels + cat_dim, in_channels)] + + [(in_channels, in_channels)] * (num_blocks - 2) + + [(in_channels, out_channels)] + ) + attentions = [] + resnets_in = [] + resnets_out = [] + + self.self_attention = self_attention + self.context_dim = context_dim + + if self_attention: + attentions.append( + Kandinsky3AttentionBlock(out_channels, time_embed_dim, None, groups, head_dim, expansion_ratio) + ) + else: + attentions.append(nn.Identity()) + + for (in_channel, out_channel), up_resolution in zip(hidden_channels, up_resolutions): + resnets_in.append( + Kandinsky3ResNetBlock(in_channel, in_channel, time_embed_dim, groups, compression_ratio, up_resolution) + ) + + if context_dim is not None: + attentions.append( + Kandinsky3AttentionBlock( + in_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio + ) + ) + else: + attentions.append(nn.Identity()) + + resnets_out.append( + Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets_in = nn.ModuleList(resnets_in) + self.resnets_out = nn.ModuleList(resnets_out) + + def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): + for attention, resnet_in, resnet_out in zip(self.attentions[1:], self.resnets_in, self.resnets_out): + x = resnet_in(x, time_embed) + if self.context_dim is not None: + x = attention(x, time_embed, context, context_mask, image_mask) + x = resnet_out(x, time_embed) + + if self.self_attention: + x = self.attentions[0](x, time_embed, image_mask=image_mask) + return x + + +class Kandinsky3DownSampleBlock(nn.Module): + def __init__( + self, + in_channels, + out_channels, + time_embed_dim, + context_dim=None, + num_blocks=3, + groups=32, + head_dim=64, + expansion_ratio=4, + compression_ratio=2, + down_sample=True, + self_attention=True, + ): + super().__init__() + attentions = [] + resnets_in = [] + resnets_out = [] + + self.self_attention = self_attention + self.context_dim = context_dim + + if self_attention: + attentions.append( + Kandinsky3AttentionBlock(in_channels, time_embed_dim, None, groups, head_dim, expansion_ratio) + ) + else: + attentions.append(nn.Identity()) + + up_resolutions = [[None] * 4] * (num_blocks - 1) + [[None, None, False if down_sample else None, None]] + hidden_channels = [(in_channels, out_channels)] + [(out_channels, out_channels)] * (num_blocks - 1) + for (in_channel, out_channel), up_resolution in zip(hidden_channels, up_resolutions): + resnets_in.append( + Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio) + ) + + if context_dim is not None: + attentions.append( + Kandinsky3AttentionBlock( + out_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio + ) + ) + else: + attentions.append(nn.Identity()) + + resnets_out.append( + Kandinsky3ResNetBlock( + out_channel, out_channel, time_embed_dim, groups, compression_ratio, up_resolution + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets_in = nn.ModuleList(resnets_in) + self.resnets_out = nn.ModuleList(resnets_out) + + def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): + if self.self_attention: + x = self.attentions[0](x, time_embed, image_mask=image_mask) + + for attention, resnet_in, resnet_out in zip(self.attentions[1:], self.resnets_in, self.resnets_out): + x = resnet_in(x, time_embed) + if self.context_dim is not None: + x = attention(x, time_embed, context, context_mask, image_mask) + x = resnet_out(x, time_embed) + return x + + +class Kandinsky3ConditionalGroupNorm(nn.Module): + def __init__(self, groups, normalized_shape, context_dim): + super().__init__() + self.norm = nn.GroupNorm(groups, normalized_shape, affine=False) + self.context_mlp = nn.Sequential(nn.SiLU(), nn.Linear(context_dim, 2 * normalized_shape)) + self.context_mlp[1].weight.data.zero_() + self.context_mlp[1].bias.data.zero_() + + def forward(self, x, context): + context = self.context_mlp(context) + + for _ in range(len(x.shape[2:])): + context = context.unsqueeze(-1) + + scale, shift = context.chunk(2, dim=1) + x = self.norm(x) * (scale + 1.0) + shift + return x + + +class Kandinsky3Block(nn.Module): + def __init__(self, in_channels, out_channels, time_embed_dim, kernel_size=3, norm_groups=32, up_resolution=None): + super().__init__() + self.group_norm = Kandinsky3ConditionalGroupNorm(norm_groups, in_channels, time_embed_dim) + self.activation = nn.SiLU() + if up_resolution is not None and up_resolution: + self.up_sample = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) + else: + self.up_sample = nn.Identity() + + padding = int(kernel_size > 1) + self.projection = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding) + + if up_resolution is not None and not up_resolution: + self.down_sample = nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) + else: + self.down_sample = nn.Identity() + + def forward(self, x, time_embed): + x = self.group_norm(x, time_embed) + x = self.activation(x) + x = self.up_sample(x) + x = self.projection(x) + x = self.down_sample(x) + return x + + +class Kandinsky3ResNetBlock(nn.Module): + def __init__( + self, in_channels, out_channels, time_embed_dim, norm_groups=32, compression_ratio=2, up_resolutions=4 * [None] + ): + super().__init__() + kernel_sizes = [1, 3, 3, 1] + hidden_channel = max(in_channels, out_channels) // compression_ratio + hidden_channels = ( + [(in_channels, hidden_channel)] + [(hidden_channel, hidden_channel)] * 2 + [(hidden_channel, out_channels)] + ) + self.resnet_blocks = nn.ModuleList( + [ + Kandinsky3Block(in_channel, out_channel, time_embed_dim, kernel_size, norm_groups, up_resolution) + for (in_channel, out_channel), kernel_size, up_resolution in zip( + hidden_channels, kernel_sizes, up_resolutions + ) + ] + ) + self.shortcut_up_sample = ( + nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) + if True in up_resolutions + else nn.Identity() + ) + self.shortcut_projection = ( + nn.Conv2d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else nn.Identity() + ) + self.shortcut_down_sample = ( + nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) + if False in up_resolutions + else nn.Identity() + ) + + def forward(self, x, time_embed): + out = x + for resnet_block in self.resnet_blocks: + out = resnet_block(out, time_embed) + + x = self.shortcut_up_sample(x) + x = self.shortcut_projection(x) + x = self.shortcut_down_sample(x) + x = x + out + return x + + +class Kandinsky3AttentionPooling(nn.Module): + def __init__(self, num_channels, context_dim, head_dim=64): + super().__init__() + self.attention = Attention( + context_dim, + context_dim, + dim_head=head_dim, + out_dim=num_channels, + out_bias=False, + ) + + def forward(self, x, context, context_mask=None): + context_mask = context_mask.to(dtype=context.dtype) + context = self.attention(context.mean(dim=1, keepdim=True), context, context_mask) + return x + context.squeeze(1) + + +class Kandinsky3AttentionBlock(nn.Module): + def __init__(self, num_channels, time_embed_dim, context_dim=None, norm_groups=32, head_dim=64, expansion_ratio=4): + super().__init__() + self.in_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) + self.attention = Attention( + num_channels, + context_dim or num_channels, + dim_head=head_dim, + out_dim=num_channels, + out_bias=False, + ) + + hidden_channels = expansion_ratio * num_channels + self.out_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) + self.feed_forward = nn.Sequential( + nn.Conv2d(num_channels, hidden_channels, kernel_size=1, bias=False), + nn.SiLU(), + nn.Conv2d(hidden_channels, num_channels, kernel_size=1, bias=False), + ) + + def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): + height, width = x.shape[-2:] + out = self.in_norm(x, time_embed) + out = out.reshape(x.shape[0], -1, height * width).permute(0, 2, 1) + context = context if context is not None else out + if context_mask is not None: + context_mask = context_mask.to(dtype=context.dtype) + + out = self.attention(out, context, context_mask) + out = out.permute(0, 2, 1).unsqueeze(-1).reshape(out.shape[0], -1, height, width) + x = x + out + + out = self.out_norm(x, time_embed) + out = self.feed_forward(out) + x = x + out + return x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_motion_model.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_motion_model.py new file mode 100644 index 000000000..ab2eac4c9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_motion_model.py @@ -0,0 +1,948 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...utils import logging +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from ..transformers.transformer_temporal import TransformerTemporalModel +from .unet_2d_blocks import UNetMidBlock2DCrossAttn +from .unet_2d_condition import UNet2DConditionModel +from .unet_3d_blocks import ( + CrossAttnDownBlockMotion, + CrossAttnUpBlockMotion, + DownBlockMotion, + UNetMidBlockCrossAttnMotion, + UpBlockMotion, + get_down_block, + get_up_block, +) +from .unet_3d_condition import UNet3DConditionOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class MotionModules(nn.Module): + def __init__( + self, + in_channels: int, + layers_per_block: int = 2, + num_attention_heads: int = 8, + attention_bias: bool = False, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + norm_num_groups: int = 32, + max_seq_length: int = 32, + ): + super().__init__() + self.motion_modules = nn.ModuleList([]) + + for i in range(layers_per_block): + self.motion_modules.append( + TransformerTemporalModel( + in_channels=in_channels, + norm_num_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + attention_bias=attention_bias, + num_attention_heads=num_attention_heads, + attention_head_dim=in_channels // num_attention_heads, + positional_embeddings="sinusoidal", + num_positional_embeddings=max_seq_length, + ) + ) + + +class MotionAdapter(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + motion_layers_per_block: int = 2, + motion_mid_block_layers_per_block: int = 1, + motion_num_attention_heads: int = 8, + motion_norm_num_groups: int = 32, + motion_max_seq_length: int = 32, + use_motion_mid_block: bool = True, + conv_in_channels: Optional[int] = None, + ): + """Container to store AnimateDiff Motion Modules + + Args: + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each UNet block. + motion_layers_per_block (`int`, *optional*, defaults to 2): + The number of motion layers per UNet block. + motion_mid_block_layers_per_block (`int`, *optional*, defaults to 1): + The number of motion layers in the middle UNet block. + motion_num_attention_heads (`int`, *optional*, defaults to 8): + The number of heads to use in each attention layer of the motion module. + motion_norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use in each group normalization layer of the motion module. + motion_max_seq_length (`int`, *optional*, defaults to 32): + The maximum sequence length to use in the motion module. + use_motion_mid_block (`bool`, *optional*, defaults to True): + Whether to use a motion module in the middle of the UNet. + """ + + super().__init__() + down_blocks = [] + up_blocks = [] + + if conv_in_channels: + # input + self.conv_in = nn.Conv2d(conv_in_channels, block_out_channels[0], kernel_size=3, padding=1) + else: + self.conv_in = None + + for i, channel in enumerate(block_out_channels): + output_channel = block_out_channels[i] + down_blocks.append( + MotionModules( + in_channels=output_channel, + norm_num_groups=motion_norm_num_groups, + cross_attention_dim=None, + activation_fn="geglu", + attention_bias=False, + num_attention_heads=motion_num_attention_heads, + max_seq_length=motion_max_seq_length, + layers_per_block=motion_layers_per_block, + ) + ) + + if use_motion_mid_block: + self.mid_block = MotionModules( + in_channels=block_out_channels[-1], + norm_num_groups=motion_norm_num_groups, + cross_attention_dim=None, + activation_fn="geglu", + attention_bias=False, + num_attention_heads=motion_num_attention_heads, + layers_per_block=motion_mid_block_layers_per_block, + max_seq_length=motion_max_seq_length, + ) + else: + self.mid_block = None + + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, channel in enumerate(reversed_block_out_channels): + output_channel = reversed_block_out_channels[i] + up_blocks.append( + MotionModules( + in_channels=output_channel, + norm_num_groups=motion_norm_num_groups, + cross_attention_dim=None, + activation_fn="geglu", + attention_bias=False, + num_attention_heads=motion_num_attention_heads, + max_seq_length=motion_max_seq_length, + layers_per_block=motion_layers_per_block + 1, + ) + ) + + self.down_blocks = nn.ModuleList(down_blocks) + self.up_blocks = nn.ModuleList(up_blocks) + + def forward(self, sample): + pass + + +class UNetMotionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + A modified conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a + sample shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + down_block_types: Tuple[str, ...] = ( + "CrossAttnDownBlockMotion", + "CrossAttnDownBlockMotion", + "CrossAttnDownBlockMotion", + "DownBlockMotion", + ), + up_block_types: Tuple[str, ...] = ( + "UpBlockMotion", + "CrossAttnUpBlockMotion", + "CrossAttnUpBlockMotion", + "CrossAttnUpBlockMotion", + ), + block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), + layers_per_block: int = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: int = 32, + norm_eps: float = 1e-5, + cross_attention_dim: int = 1280, + use_linear_projection: bool = False, + num_attention_heads: Union[int, Tuple[int, ...]] = 8, + motion_max_seq_length: int = 32, + motion_num_attention_heads: int = 8, + use_motion_mid_block: int = True, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + ): + super().__init__() + + self.sample_size = sample_size + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + # input + conv_in_kernel = 3 + conv_out_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(block_out_channels[0], True, 0) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, time_embed_dim, act_fn=act_fn, cond_proj_dim=time_cond_proj_dim + ) + + if encoder_hid_dim_type is None: + self.encoder_hid_proj = None + + # class embedding + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + dual_cross_attention=False, + temporal_num_attention_heads=motion_num_attention_heads, + temporal_max_seq_length=motion_max_seq_length, + ) + self.down_blocks.append(down_block) + + # mid + if use_motion_mid_block: + self.mid_block = UNetMidBlockCrossAttnMotion( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=False, + use_linear_projection=use_linear_projection, + temporal_num_attention_heads=motion_num_attention_heads, + temporal_max_seq_length=motion_max_seq_length, + ) + + else: + self.mid_block = UNetMidBlock2DCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=False, + use_linear_projection=use_linear_projection, + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=False, + resolution_idx=i, + use_linear_projection=use_linear_projection, + temporal_num_attention_heads=motion_num_attention_heads, + temporal_max_seq_length=motion_max_seq_length, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + self.conv_act = nn.SiLU() + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = nn.Conv2d( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + @classmethod + def from_unet2d( + cls, + unet: UNet2DConditionModel, + motion_adapter: Optional[MotionAdapter] = None, + load_weights: bool = True, + ): + has_motion_adapter = motion_adapter is not None + + # based on https://github.com/guoyww/AnimateDiff/blob/895f3220c06318ea0760131ec70408b466c49333/animatediff/models/unet.py#L459 + config = unet.config + config["_class_name"] = cls.__name__ + + down_blocks = [] + for down_blocks_type in config["down_block_types"]: + if "CrossAttn" in down_blocks_type: + down_blocks.append("CrossAttnDownBlockMotion") + else: + down_blocks.append("DownBlockMotion") + config["down_block_types"] = down_blocks + + up_blocks = [] + for down_blocks_type in config["up_block_types"]: + if "CrossAttn" in down_blocks_type: + up_blocks.append("CrossAttnUpBlockMotion") + else: + up_blocks.append("UpBlockMotion") + + config["up_block_types"] = up_blocks + + if has_motion_adapter: + config["motion_num_attention_heads"] = motion_adapter.config["motion_num_attention_heads"] + config["motion_max_seq_length"] = motion_adapter.config["motion_max_seq_length"] + config["use_motion_mid_block"] = motion_adapter.config["use_motion_mid_block"] + + # For PIA UNets we need to set the number input channels to 9 + if motion_adapter.config["conv_in_channels"]: + config["in_channels"] = motion_adapter.config["conv_in_channels"] + + # Need this for backwards compatibility with UNet2DConditionModel checkpoints + if not config.get("num_attention_heads"): + config["num_attention_heads"] = config["attention_head_dim"] + + model = cls.from_config(config) + + if not load_weights: + return model + + # Logic for loading PIA UNets which allow the first 4 channels to be any UNet2DConditionModel conv_in weight + # while the last 5 channels must be PIA conv_in weights. + if has_motion_adapter and motion_adapter.config["conv_in_channels"]: + model.conv_in = motion_adapter.conv_in + updated_conv_in_weight = torch.cat( + [unet.conv_in.weight, motion_adapter.conv_in.weight[:, 4:, :, :]], dim=1 + ) + model.conv_in.load_state_dict({"weight": updated_conv_in_weight, "bias": unet.conv_in.bias}) + else: + model.conv_in.load_state_dict(unet.conv_in.state_dict()) + + model.time_proj.load_state_dict(unet.time_proj.state_dict()) + model.time_embedding.load_state_dict(unet.time_embedding.state_dict()) + + for i, down_block in enumerate(unet.down_blocks): + model.down_blocks[i].resnets.load_state_dict(down_block.resnets.state_dict()) + if hasattr(model.down_blocks[i], "attentions"): + model.down_blocks[i].attentions.load_state_dict(down_block.attentions.state_dict()) + if model.down_blocks[i].downsamplers: + model.down_blocks[i].downsamplers.load_state_dict(down_block.downsamplers.state_dict()) + + for i, up_block in enumerate(unet.up_blocks): + model.up_blocks[i].resnets.load_state_dict(up_block.resnets.state_dict()) + if hasattr(model.up_blocks[i], "attentions"): + model.up_blocks[i].attentions.load_state_dict(up_block.attentions.state_dict()) + if model.up_blocks[i].upsamplers: + model.up_blocks[i].upsamplers.load_state_dict(up_block.upsamplers.state_dict()) + + model.mid_block.resnets.load_state_dict(unet.mid_block.resnets.state_dict()) + model.mid_block.attentions.load_state_dict(unet.mid_block.attentions.state_dict()) + + if unet.conv_norm_out is not None: + model.conv_norm_out.load_state_dict(unet.conv_norm_out.state_dict()) + if unet.conv_act is not None: + model.conv_act.load_state_dict(unet.conv_act.state_dict()) + model.conv_out.load_state_dict(unet.conv_out.state_dict()) + + if has_motion_adapter: + model.load_motion_modules(motion_adapter) + + # ensure that the Motion UNet is the same dtype as the UNet2DConditionModel + model.to(unet.dtype) + + return model + + def freeze_unet2d_params(self) -> None: + """Freeze the weights of just the UNet2DConditionModel, and leave the motion modules + unfrozen for fine tuning. + """ + # Freeze everything + for param in self.parameters(): + param.requires_grad = False + + # Unfreeze Motion Modules + for down_block in self.down_blocks: + motion_modules = down_block.motion_modules + for param in motion_modules.parameters(): + param.requires_grad = True + + for up_block in self.up_blocks: + motion_modules = up_block.motion_modules + for param in motion_modules.parameters(): + param.requires_grad = True + + if hasattr(self.mid_block, "motion_modules"): + motion_modules = self.mid_block.motion_modules + for param in motion_modules.parameters(): + param.requires_grad = True + + def load_motion_modules(self, motion_adapter: Optional[MotionAdapter]) -> None: + for i, down_block in enumerate(motion_adapter.down_blocks): + self.down_blocks[i].motion_modules.load_state_dict(down_block.motion_modules.state_dict()) + for i, up_block in enumerate(motion_adapter.up_blocks): + self.up_blocks[i].motion_modules.load_state_dict(up_block.motion_modules.state_dict()) + + # to support older motion modules that don't have a mid_block + if hasattr(self.mid_block, "motion_modules"): + self.mid_block.motion_modules.load_state_dict(motion_adapter.mid_block.motion_modules.state_dict()) + + def save_motion_modules( + self, + save_directory: str, + is_main_process: bool = True, + safe_serialization: bool = True, + variant: Optional[str] = None, + push_to_hub: bool = False, + **kwargs, + ) -> None: + state_dict = self.state_dict() + + # Extract all motion modules + motion_state_dict = {} + for k, v in state_dict.items(): + if "motion_modules" in k: + motion_state_dict[k] = v + + adapter = MotionAdapter( + block_out_channels=self.config["block_out_channels"], + motion_layers_per_block=self.config["layers_per_block"], + motion_norm_num_groups=self.config["norm_num_groups"], + motion_num_attention_heads=self.config["motion_num_attention_heads"], + motion_max_seq_length=self.config["motion_max_seq_length"], + use_motion_mid_block=self.config["use_motion_mid_block"], + ) + adapter.load_state_dict(motion_state_dict) + adapter.save_pretrained( + save_directory=save_directory, + is_main_process=is_main_process, + safe_serialization=safe_serialization, + variant=variant, + push_to_hub=push_to_hub, + **kwargs, + ) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking + def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: + """ + Sets the attention processor to use [feed forward + chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). + + Parameters: + chunk_size (`int`, *optional*): + The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually + over each tensor of dim=`dim`. + dim (`int`, *optional*, defaults to `0`): + The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) + or dim=1 (sequence length). + """ + if dim not in [0, 1]: + raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") + + # By default chunk size is 1 + chunk_size = chunk_size or 1 + + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, chunk_size, dim) + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking + def disable_forward_chunking(self) -> None: + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, None, 0) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self) -> None: + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + if isinstance(module, (CrossAttnDownBlockMotion, DownBlockMotion, CrossAttnUpBlockMotion, UpBlockMotion)): + module.gradient_checkpointing = value + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu + def enable_freeu(self, s1: float, s2: float, b1: float, b2: float) -> None: + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu + def disable_freeu(self) -> None: + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet3DConditionOutput, Tuple[torch.Tensor]]: + r""" + The [`UNetMotionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain + tuple. + + Returns: + [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # prepare attention_mask + if attention_mask is not None: + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + num_frames = sample.shape[2] + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # timesteps does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=self.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + emb = emb.repeat_interleave(repeats=num_frames, dim=0) + encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0) + + if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + image_embeds = self.encoder_hid_proj(image_embeds) + image_embeds = [image_embed.repeat_interleave(repeats=num_frames, dim=0) for image_embed in image_embeds] + encoder_hidden_states = (encoder_hidden_states, image_embeds) + + # 2. pre-process + sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) + + down_block_res_samples += res_samples + + if down_block_additional_residuals is not None: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + # To support older versions of motion modules that don't have a mid_block + if hasattr(self.mid_block, "motion_modules"): + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if mid_block_additional_residual is not None: + sample = sample + mid_block_additional_residual + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + upsample_size=upsample_size, + attention_mask=attention_mask, + num_frames=num_frames, + cross_attention_kwargs=cross_attention_kwargs, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + num_frames=num_frames, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + + sample = self.conv_out(sample) + + # reshape to (batch, channel, framerate, width, height) + sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) + + if not return_dict: + return (sample,) + + return UNet3DConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_spatio_temporal_condition.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_spatio_temporal_condition.py new file mode 100644 index 000000000..5fe265e63 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_spatio_temporal_condition.py @@ -0,0 +1,489 @@ +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...utils import BaseOutput, logging +from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor +from ..embeddings import TimestepEmbedding, Timesteps +from ..modeling_utils import ModelMixin +from .unet_3d_blocks import UNetMidBlockSpatioTemporal, get_down_block, get_up_block + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class UNetSpatioTemporalConditionOutput(BaseOutput): + """ + The output of [`UNetSpatioTemporalConditionModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): + The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. + """ + + sample: torch.FloatTensor = None + + +class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + A conditional Spatio-Temporal UNet model that takes a noisy video frames, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 8): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal")`): + The tuple of downsample blocks to use. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal")`): + The tuple of upsample blocks to use. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + addition_time_embed_dim: (`int`, defaults to 256): + Dimension to to encode the additional time ids. + projection_class_embeddings_input_dim (`int`, defaults to 768): + The dimension of the projection of encoded `added_time_ids`. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`], [`~models.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`], + [`~models.unet_3d_blocks.UNetMidBlockSpatioTemporal`]. + num_attention_heads (`int`, `Tuple[int]`, defaults to `(5, 10, 10, 20)`): + The number of attention heads. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 8, + out_channels: int = 4, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlockSpatioTemporal", + "CrossAttnDownBlockSpatioTemporal", + "CrossAttnDownBlockSpatioTemporal", + "DownBlockSpatioTemporal", + ), + up_block_types: Tuple[str] = ( + "UpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", + ), + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + addition_time_embed_dim: int = 256, + projection_class_embeddings_input_dim: int = 768, + layers_per_block: Union[int, Tuple[int]] = 2, + cross_attention_dim: Union[int, Tuple[int]] = 1024, + transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, + num_attention_heads: Union[int, Tuple[int]] = (5, 10, 20, 20), + num_frames: int = 25, + ): + super().__init__() + + self.sample_size = sample_size + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + + # input + self.conv_in = nn.Conv2d( + in_channels, + block_out_channels[0], + kernel_size=3, + padding=1, + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], True, downscale_freq_shift=0) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + + self.add_time_proj = Timesteps(addition_time_embed_dim, True, downscale_freq_shift=0) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=1e-5, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + resnet_act_fn="silu", + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlockSpatioTemporal( + block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + transformer_layers_per_block=transformer_layers_per_block[-1], + cross_attention_dim=cross_attention_dim[-1], + num_attention_heads=num_attention_heads[-1], + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=1e-5, + resolution_idx=i, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + resnet_act_fn="silu", + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-5) + self.conv_act = nn.SiLU() + + self.conv_out = nn.Conv2d( + block_out_channels[0], + out_channels, + kernel_size=3, + padding=1, + ) + + @property + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors( + name: str, + module: torch.nn.Module, + processors: Dict[str, AttentionProcessor], + ): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking + def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: + """ + Sets the attention processor to use [feed forward + chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). + + Parameters: + chunk_size (`int`, *optional*): + The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually + over each tensor of dim=`dim`. + dim (`int`, *optional*, defaults to `0`): + The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) + or dim=1 (sequence length). + """ + if dim not in [0, 1]: + raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") + + # By default chunk size is 1 + chunk_size = chunk_size or 1 + + def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): + if hasattr(module, "set_chunk_feed_forward"): + module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) + + for child in module.children(): + fn_recursive_feed_forward(child, chunk_size, dim) + + for module in self.children(): + fn_recursive_feed_forward(module, chunk_size, dim) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + added_time_ids: torch.Tensor, + return_dict: bool = True, + ) -> Union[UNetSpatioTemporalConditionOutput, Tuple]: + r""" + The [`UNetSpatioTemporalConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, num_frames, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, cross_attention_dim)`. + added_time_ids: (`torch.FloatTensor`): + The additional time ids with shape `(batch, num_additional_ids)`. These are encoded with sinusoidal + embeddings and added to the time embeddings. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] instead of a plain + tuple. + Returns: + [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + batch_size, num_frames = sample.shape[:2] + timesteps = timesteps.expand(batch_size) + + t_emb = self.time_proj(timesteps) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb) + + time_embeds = self.add_time_proj(added_time_ids.flatten()) + time_embeds = time_embeds.reshape((batch_size, -1)) + time_embeds = time_embeds.to(emb.dtype) + aug_emb = self.add_embedding(time_embeds) + emb = emb + aug_emb + + # Flatten the batch and frames dimensions + # sample: [batch, frames, channels, height, width] -> [batch * frames, channels, height, width] + sample = sample.flatten(0, 1) + # Repeat the embeddings num_video_frames times + # emb: [batch, channels] -> [batch * frames, channels] + emb = emb.repeat_interleave(num_frames, dim=0) + # encoder_hidden_states: [batch, 1, channels] -> [batch * frames, 1, channels] + encoder_hidden_states = encoder_hidden_states.repeat_interleave(num_frames, dim=0) + + # 2. pre-process + sample = self.conv_in(sample) + + image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device) + + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + ) + else: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + image_only_indicator=image_only_indicator, + ) + + down_block_res_samples += res_samples + + # 4. mid + sample = self.mid_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + ) + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + image_only_indicator=image_only_indicator, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + image_only_indicator=image_only_indicator, + ) + + # 6. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + # 7. Reshape back to original shape + sample = sample.reshape(batch_size, num_frames, *sample.shape[1:]) + + if not return_dict: + return (sample,) + + return UNetSpatioTemporalConditionOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_stable_cascade.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_stable_cascade.py new file mode 100644 index 000000000..9f81e5024 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/unet_stable_cascade.py @@ -0,0 +1,610 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders.unet import FromOriginalUNetMixin +from ...utils import BaseOutput +from ..attention_processor import Attention +from ..modeling_utils import ModelMixin + + +# Copied from diffusers.pipelines.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm +class SDCascadeLayerNorm(nn.LayerNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + x = x.permute(0, 2, 3, 1) + x = super().forward(x) + return x.permute(0, 3, 1, 2) + + +class SDCascadeTimestepBlock(nn.Module): + def __init__(self, c, c_timestep, conds=[]): + super().__init__() + linear_cls = nn.Linear + self.mapper = linear_cls(c_timestep, c * 2) + self.conds = conds + for cname in conds: + setattr(self, f"mapper_{cname}", linear_cls(c_timestep, c * 2)) + + def forward(self, x, t): + t = t.chunk(len(self.conds) + 1, dim=1) + a, b = self.mapper(t[0])[:, :, None, None].chunk(2, dim=1) + for i, c in enumerate(self.conds): + ac, bc = getattr(self, f"mapper_{c}")(t[i + 1])[:, :, None, None].chunk(2, dim=1) + a, b = a + ac, b + bc + return x * (1 + a) + b + + +class SDCascadeResBlock(nn.Module): + def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): + super().__init__() + self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) + self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(c + c_skip, c * 4), + nn.GELU(), + GlobalResponseNorm(c * 4), + nn.Dropout(dropout), + nn.Linear(c * 4, c), + ) + + def forward(self, x, x_skip=None): + x_res = x + x = self.norm(self.depthwise(x)) + if x_skip is not None: + x = torch.cat([x, x_skip], dim=1) + x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + x_res + + +# from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 +class GlobalResponseNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) + self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) + + def forward(self, x): + agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) + stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6) + return self.gamma * (x * stand_div_norm) + self.beta + x + + +class SDCascadeAttnBlock(nn.Module): + def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): + super().__init__() + linear_cls = nn.Linear + + self.self_attn = self_attn + self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) + self.kv_mapper = nn.Sequential(nn.SiLU(), linear_cls(c_cond, c)) + + def forward(self, x, kv): + kv = self.kv_mapper(kv) + norm_x = self.norm(x) + if self.self_attn: + batch_size, channel, _, _ = x.shape + kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) + x = x + self.attention(norm_x, encoder_hidden_states=kv) + return x + + +class UpDownBlock2d(nn.Module): + def __init__(self, in_channels, out_channels, mode, enabled=True): + super().__init__() + if mode not in ["up", "down"]: + raise ValueError(f"{mode} not supported") + interpolation = ( + nn.Upsample(scale_factor=2 if mode == "up" else 0.5, mode="bilinear", align_corners=True) + if enabled + else nn.Identity() + ) + mapping = nn.Conv2d(in_channels, out_channels, kernel_size=1) + self.blocks = nn.ModuleList([interpolation, mapping] if mode == "up" else [mapping, interpolation]) + + def forward(self, x): + for block in self.blocks: + x = block(x) + return x + + +@dataclass +class StableCascadeUNetOutput(BaseOutput): + sample: torch.FloatTensor = None + + +class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalUNetMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 16, + out_channels: int = 16, + timestep_ratio_embedding_dim: int = 64, + patch_size: int = 1, + conditioning_dim: int = 2048, + block_out_channels: Tuple[int] = (2048, 2048), + num_attention_heads: Tuple[int] = (32, 32), + down_num_layers_per_block: Tuple[int] = (8, 24), + up_num_layers_per_block: Tuple[int] = (24, 8), + down_blocks_repeat_mappers: Optional[Tuple[int]] = ( + 1, + 1, + ), + up_blocks_repeat_mappers: Optional[Tuple[int]] = (1, 1), + block_types_per_layer: Tuple[Tuple[str]] = ( + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), + ), + clip_text_in_channels: Optional[int] = None, + clip_text_pooled_in_channels=1280, + clip_image_in_channels: Optional[int] = None, + clip_seq=4, + effnet_in_channels: Optional[int] = None, + pixel_mapper_in_channels: Optional[int] = None, + kernel_size=3, + dropout: Union[float, Tuple[float]] = (0.1, 0.1), + self_attn: Union[bool, Tuple[bool]] = True, + timestep_conditioning_type: Tuple[str] = ("sca", "crp"), + switch_level: Optional[Tuple[bool]] = None, + ): + """ + + Parameters: + in_channels (`int`, defaults to 16): + Number of channels in the input sample. + out_channels (`int`, defaults to 16): + Number of channels in the output sample. + timestep_ratio_embedding_dim (`int`, defaults to 64): + Dimension of the projected time embedding. + patch_size (`int`, defaults to 1): + Patch size to use for pixel unshuffling layer + conditioning_dim (`int`, defaults to 2048): + Dimension of the image and text conditional embedding. + block_out_channels (Tuple[int], defaults to (2048, 2048)): + Tuple of output channels for each block. + num_attention_heads (Tuple[int], defaults to (32, 32)): + Number of attention heads in each attention block. Set to -1 to if block types in a layer do not have attention. + down_num_layers_per_block (Tuple[int], defaults to [8, 24]): + Number of layers in each down block. + up_num_layers_per_block (Tuple[int], defaults to [24, 8]): + Number of layers in each up block. + down_blocks_repeat_mappers (Tuple[int], optional, defaults to [1, 1]): + Number of 1x1 Convolutional layers to repeat in each down block. + up_blocks_repeat_mappers (Tuple[int], optional, defaults to [1, 1]): + Number of 1x1 Convolutional layers to repeat in each up block. + block_types_per_layer (Tuple[Tuple[str]], optional, + defaults to ( + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock") + ): + Block types used in each layer of the up/down blocks. + clip_text_in_channels (`int`, *optional*, defaults to `None`): + Number of input channels for CLIP based text conditioning. + clip_text_pooled_in_channels (`int`, *optional*, defaults to 1280): + Number of input channels for pooled CLIP text embeddings. + clip_image_in_channels (`int`, *optional*): + Number of input channels for CLIP based image conditioning. + clip_seq (`int`, *optional*, defaults to 4): + effnet_in_channels (`int`, *optional*, defaults to `None`): + Number of input channels for effnet conditioning. + pixel_mapper_in_channels (`int`, defaults to `None`): + Number of input channels for pixel mapper conditioning. + kernel_size (`int`, *optional*, defaults to 3): + Kernel size to use in the block convolutional layers. + dropout (Tuple[float], *optional*, defaults to (0.1, 0.1)): + Dropout to use per block. + self_attn (Union[bool, Tuple[bool]]): + Tuple of booleans that determine whether to use self attention in a block or not. + timestep_conditioning_type (Tuple[str], defaults to ("sca", "crp")): + Timestep conditioning type. + switch_level (Optional[Tuple[bool]], *optional*, defaults to `None`): + Tuple that indicates whether upsampling or downsampling should be applied in a block + """ + + super().__init__() + + if len(block_out_channels) != len(down_num_layers_per_block): + raise ValueError( + f"Number of elements in `down_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}" + ) + + elif len(block_out_channels) != len(up_num_layers_per_block): + raise ValueError( + f"Number of elements in `up_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}" + ) + + elif len(block_out_channels) != len(down_blocks_repeat_mappers): + raise ValueError( + f"Number of elements in `down_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}" + ) + + elif len(block_out_channels) != len(up_blocks_repeat_mappers): + raise ValueError( + f"Number of elements in `up_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}" + ) + + elif len(block_out_channels) != len(block_types_per_layer): + raise ValueError( + f"Number of elements in `block_types_per_layer` must match the length of `block_out_channels`: {len(block_out_channels)}" + ) + + if isinstance(dropout, float): + dropout = (dropout,) * len(block_out_channels) + if isinstance(self_attn, bool): + self_attn = (self_attn,) * len(block_out_channels) + + # CONDITIONING + if effnet_in_channels is not None: + self.effnet_mapper = nn.Sequential( + nn.Conv2d(effnet_in_channels, block_out_channels[0] * 4, kernel_size=1), + nn.GELU(), + nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), + SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), + ) + if pixel_mapper_in_channels is not None: + self.pixels_mapper = nn.Sequential( + nn.Conv2d(pixel_mapper_in_channels, block_out_channels[0] * 4, kernel_size=1), + nn.GELU(), + nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), + SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), + ) + + self.clip_txt_pooled_mapper = nn.Linear(clip_text_pooled_in_channels, conditioning_dim * clip_seq) + if clip_text_in_channels is not None: + self.clip_txt_mapper = nn.Linear(clip_text_in_channels, conditioning_dim) + if clip_image_in_channels is not None: + self.clip_img_mapper = nn.Linear(clip_image_in_channels, conditioning_dim * clip_seq) + self.clip_norm = nn.LayerNorm(conditioning_dim, elementwise_affine=False, eps=1e-6) + + self.embedding = nn.Sequential( + nn.PixelUnshuffle(patch_size), + nn.Conv2d(in_channels * (patch_size**2), block_out_channels[0], kernel_size=1), + SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), + ) + + def get_block(block_type, in_channels, nhead, c_skip=0, dropout=0, self_attn=True): + if block_type == "SDCascadeResBlock": + return SDCascadeResBlock(in_channels, c_skip, kernel_size=kernel_size, dropout=dropout) + elif block_type == "SDCascadeAttnBlock": + return SDCascadeAttnBlock(in_channels, conditioning_dim, nhead, self_attn=self_attn, dropout=dropout) + elif block_type == "SDCascadeTimestepBlock": + return SDCascadeTimestepBlock( + in_channels, timestep_ratio_embedding_dim, conds=timestep_conditioning_type + ) + else: + raise ValueError(f"Block type {block_type} not supported") + + # BLOCKS + # -- down blocks + self.down_blocks = nn.ModuleList() + self.down_downscalers = nn.ModuleList() + self.down_repeat_mappers = nn.ModuleList() + for i in range(len(block_out_channels)): + if i > 0: + self.down_downscalers.append( + nn.Sequential( + SDCascadeLayerNorm(block_out_channels[i - 1], elementwise_affine=False, eps=1e-6), + UpDownBlock2d( + block_out_channels[i - 1], block_out_channels[i], mode="down", enabled=switch_level[i - 1] + ) + if switch_level is not None + else nn.Conv2d(block_out_channels[i - 1], block_out_channels[i], kernel_size=2, stride=2), + ) + ) + else: + self.down_downscalers.append(nn.Identity()) + + down_block = nn.ModuleList() + for _ in range(down_num_layers_per_block[i]): + for block_type in block_types_per_layer[i]: + block = get_block( + block_type, + block_out_channels[i], + num_attention_heads[i], + dropout=dropout[i], + self_attn=self_attn[i], + ) + down_block.append(block) + self.down_blocks.append(down_block) + + if down_blocks_repeat_mappers is not None: + block_repeat_mappers = nn.ModuleList() + for _ in range(down_blocks_repeat_mappers[i] - 1): + block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) + self.down_repeat_mappers.append(block_repeat_mappers) + + # -- up blocks + self.up_blocks = nn.ModuleList() + self.up_upscalers = nn.ModuleList() + self.up_repeat_mappers = nn.ModuleList() + for i in reversed(range(len(block_out_channels))): + if i > 0: + self.up_upscalers.append( + nn.Sequential( + SDCascadeLayerNorm(block_out_channels[i], elementwise_affine=False, eps=1e-6), + UpDownBlock2d( + block_out_channels[i], block_out_channels[i - 1], mode="up", enabled=switch_level[i - 1] + ) + if switch_level is not None + else nn.ConvTranspose2d( + block_out_channels[i], block_out_channels[i - 1], kernel_size=2, stride=2 + ), + ) + ) + else: + self.up_upscalers.append(nn.Identity()) + + up_block = nn.ModuleList() + for j in range(up_num_layers_per_block[::-1][i]): + for k, block_type in enumerate(block_types_per_layer[i]): + c_skip = block_out_channels[i] if i < len(block_out_channels) - 1 and j == k == 0 else 0 + block = get_block( + block_type, + block_out_channels[i], + num_attention_heads[i], + c_skip=c_skip, + dropout=dropout[i], + self_attn=self_attn[i], + ) + up_block.append(block) + self.up_blocks.append(up_block) + + if up_blocks_repeat_mappers is not None: + block_repeat_mappers = nn.ModuleList() + for _ in range(up_blocks_repeat_mappers[::-1][i] - 1): + block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) + self.up_repeat_mappers.append(block_repeat_mappers) + + # OUTPUT + self.clf = nn.Sequential( + SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), + nn.Conv2d(block_out_channels[0], out_channels * (patch_size**2), kernel_size=1), + nn.PixelShuffle(patch_size), + ) + + self.gradient_checkpointing = False + + def _set_gradient_checkpointing(self, value=False): + self.gradient_checkpointing = value + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + torch.nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + nn.init.normal_(self.clip_txt_pooled_mapper.weight, std=0.02) + nn.init.normal_(self.clip_txt_mapper.weight, std=0.02) if hasattr(self, "clip_txt_mapper") else None + nn.init.normal_(self.clip_img_mapper.weight, std=0.02) if hasattr(self, "clip_img_mapper") else None + + if hasattr(self, "effnet_mapper"): + nn.init.normal_(self.effnet_mapper[0].weight, std=0.02) # conditionings + nn.init.normal_(self.effnet_mapper[2].weight, std=0.02) # conditionings + + if hasattr(self, "pixels_mapper"): + nn.init.normal_(self.pixels_mapper[0].weight, std=0.02) # conditionings + nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings + + torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs + nn.init.constant_(self.clf[1].weight, 0) # outputs + + # blocks + for level_block in self.down_blocks + self.up_blocks: + for block in level_block: + if isinstance(block, SDCascadeResBlock): + block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks[0])) + elif isinstance(block, SDCascadeTimestepBlock): + nn.init.constant_(block.mapper.weight, 0) + + def get_timestep_ratio_embedding(self, timestep_ratio, max_positions=10000): + r = timestep_ratio * max_positions + half_dim = self.config.timestep_ratio_embedding_dim // 2 + + emb = math.log(max_positions) / (half_dim - 1) + emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() + emb = r[:, None] * emb[None, :] + emb = torch.cat([emb.sin(), emb.cos()], dim=1) + + if self.config.timestep_ratio_embedding_dim % 2 == 1: # zero pad + emb = nn.functional.pad(emb, (0, 1), mode="constant") + + return emb.to(dtype=r.dtype) + + def get_clip_embeddings(self, clip_txt_pooled, clip_txt=None, clip_img=None): + if len(clip_txt_pooled.shape) == 2: + clip_txt_pool = clip_txt_pooled.unsqueeze(1) + clip_txt_pool = self.clip_txt_pooled_mapper(clip_txt_pooled).view( + clip_txt_pooled.size(0), clip_txt_pooled.size(1) * self.config.clip_seq, -1 + ) + if clip_txt is not None and clip_img is not None: + clip_txt = self.clip_txt_mapper(clip_txt) + if len(clip_img.shape) == 2: + clip_img = clip_img.unsqueeze(1) + clip_img = self.clip_img_mapper(clip_img).view( + clip_img.size(0), clip_img.size(1) * self.config.clip_seq, -1 + ) + clip = torch.cat([clip_txt, clip_txt_pool, clip_img], dim=1) + else: + clip = clip_txt_pool + return self.clip_norm(clip) + + def _down_encode(self, x, r_embed, clip): + level_outputs = [] + block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + for down_block, downscaler, repmap in block_group: + x = downscaler(x) + for i in range(len(repmap) + 1): + for block in down_block: + if isinstance(block, SDCascadeResBlock): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) + elif isinstance(block, SDCascadeAttnBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, clip, use_reentrant=False + ) + elif isinstance(block, SDCascadeTimestepBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, r_embed, use_reentrant=False + ) + else: + x = x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), use_reentrant=False + ) + if i < len(repmap): + x = repmap[i](x) + level_outputs.insert(0, x) + else: + for down_block, downscaler, repmap in block_group: + x = downscaler(x) + for i in range(len(repmap) + 1): + for block in down_block: + if isinstance(block, SDCascadeResBlock): + x = block(x) + elif isinstance(block, SDCascadeAttnBlock): + x = block(x, clip) + elif isinstance(block, SDCascadeTimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + if i < len(repmap): + x = repmap[i](x) + level_outputs.insert(0, x) + return level_outputs + + def _up_decode(self, level_outputs, r_embed, clip): + x = level_outputs[0] + block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + for i, (up_block, upscaler, repmap) in enumerate(block_group): + for j in range(len(repmap) + 1): + for k, block in enumerate(up_block): + if isinstance(block, SDCascadeResBlock): + skip = level_outputs[i] if k == 0 and i > 0 else None + if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): + x = torch.nn.functional.interpolate( + x.float(), skip.shape[-2:], mode="bilinear", align_corners=True + ) + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, skip, use_reentrant=False + ) + elif isinstance(block, SDCascadeAttnBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, clip, use_reentrant=False + ) + elif isinstance(block, SDCascadeTimestepBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, r_embed, use_reentrant=False + ) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) + if j < len(repmap): + x = repmap[j](x) + x = upscaler(x) + else: + for i, (up_block, upscaler, repmap) in enumerate(block_group): + for j in range(len(repmap) + 1): + for k, block in enumerate(up_block): + if isinstance(block, SDCascadeResBlock): + skip = level_outputs[i] if k == 0 and i > 0 else None + if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): + x = torch.nn.functional.interpolate( + x.float(), skip.shape[-2:], mode="bilinear", align_corners=True + ) + x = block(x, skip) + elif isinstance(block, SDCascadeAttnBlock): + x = block(x, clip) + elif isinstance(block, SDCascadeTimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + if j < len(repmap): + x = repmap[j](x) + x = upscaler(x) + return x + + def forward( + self, + sample, + timestep_ratio, + clip_text_pooled, + clip_text=None, + clip_img=None, + effnet=None, + pixels=None, + sca=None, + crp=None, + return_dict=True, + ): + if pixels is None: + pixels = sample.new_zeros(sample.size(0), 3, 8, 8) + + # Process the conditioning embeddings + timestep_ratio_embed = self.get_timestep_ratio_embedding(timestep_ratio) + for c in self.config.timestep_conditioning_type: + if c == "sca": + cond = sca + elif c == "crp": + cond = crp + else: + cond = None + t_cond = cond or torch.zeros_like(timestep_ratio) + timestep_ratio_embed = torch.cat([timestep_ratio_embed, self.get_timestep_ratio_embedding(t_cond)], dim=1) + clip = self.get_clip_embeddings(clip_txt_pooled=clip_text_pooled, clip_txt=clip_text, clip_img=clip_img) + + # Model Blocks + x = self.embedding(sample) + if hasattr(self, "effnet_mapper") and effnet is not None: + x = x + self.effnet_mapper( + nn.functional.interpolate(effnet, size=x.shape[-2:], mode="bilinear", align_corners=True) + ) + if hasattr(self, "pixels_mapper"): + x = x + nn.functional.interpolate( + self.pixels_mapper(pixels), size=x.shape[-2:], mode="bilinear", align_corners=True + ) + level_outputs = self._down_encode(x, timestep_ratio_embed, clip) + x = self._up_decode(level_outputs, timestep_ratio_embed, clip) + sample = self.clf(x) + + if not return_dict: + return (sample,) + return StableCascadeUNetOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/uvit_2d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/uvit_2d.py new file mode 100644 index 000000000..bfd865d12 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/unets/uvit_2d.py @@ -0,0 +1,470 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Union + +import torch +import torch.nn.functional as F +from torch import nn +from torch.utils.checkpoint import checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import PeftAdapterMixin +from ..attention import BasicTransformerBlock, SkipFFTransformerBlock +from ..attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ..embeddings import TimestepEmbedding, get_timestep_embedding +from ..modeling_utils import ModelMixin +from ..normalization import GlobalResponseNorm, RMSNorm +from ..resnet import Downsample2D, Upsample2D + + +class UVit2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + # global config + hidden_size: int = 1024, + use_bias: bool = False, + hidden_dropout: float = 0.0, + # conditioning dimensions + cond_embed_dim: int = 768, + micro_cond_encode_dim: int = 256, + micro_cond_embed_dim: int = 1280, + encoder_hidden_size: int = 768, + # num tokens + vocab_size: int = 8256, # codebook_size + 1 (for the mask token) rounded + codebook_size: int = 8192, + # `UVit2DConvEmbed` + in_channels: int = 768, + block_out_channels: int = 768, + num_res_blocks: int = 3, + downsample: bool = False, + upsample: bool = False, + block_num_heads: int = 12, + # `TransformerLayer` + num_hidden_layers: int = 22, + num_attention_heads: int = 16, + # `Attention` + attention_dropout: float = 0.0, + # `FeedForward` + intermediate_size: int = 2816, + # `Norm` + layer_norm_eps: float = 1e-6, + ln_elementwise_affine: bool = True, + sample_size: int = 64, + ): + super().__init__() + + self.encoder_proj = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) + self.encoder_proj_layer_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) + + self.embed = UVit2DConvEmbed( + in_channels, block_out_channels, vocab_size, ln_elementwise_affine, layer_norm_eps, use_bias + ) + + self.cond_embed = TimestepEmbedding( + micro_cond_embed_dim + cond_embed_dim, hidden_size, sample_proj_bias=use_bias + ) + + self.down_block = UVitBlock( + block_out_channels, + num_res_blocks, + hidden_size, + hidden_dropout, + ln_elementwise_affine, + layer_norm_eps, + use_bias, + block_num_heads, + attention_dropout, + downsample, + False, + ) + + self.project_to_hidden_norm = RMSNorm(block_out_channels, layer_norm_eps, ln_elementwise_affine) + self.project_to_hidden = nn.Linear(block_out_channels, hidden_size, bias=use_bias) + + self.transformer_layers = nn.ModuleList( + [ + BasicTransformerBlock( + dim=hidden_size, + num_attention_heads=num_attention_heads, + attention_head_dim=hidden_size // num_attention_heads, + dropout=hidden_dropout, + cross_attention_dim=hidden_size, + attention_bias=use_bias, + norm_type="ada_norm_continuous", + ada_norm_continous_conditioning_embedding_dim=hidden_size, + norm_elementwise_affine=ln_elementwise_affine, + norm_eps=layer_norm_eps, + ada_norm_bias=use_bias, + ff_inner_dim=intermediate_size, + ff_bias=use_bias, + attention_out_bias=use_bias, + ) + for _ in range(num_hidden_layers) + ] + ) + + self.project_from_hidden_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) + self.project_from_hidden = nn.Linear(hidden_size, block_out_channels, bias=use_bias) + + self.up_block = UVitBlock( + block_out_channels, + num_res_blocks, + hidden_size, + hidden_dropout, + ln_elementwise_affine, + layer_norm_eps, + use_bias, + block_num_heads, + attention_dropout, + downsample=False, + upsample=upsample, + ) + + self.mlm_layer = ConvMlmLayer( + block_out_channels, in_channels, use_bias, ln_elementwise_affine, layer_norm_eps, codebook_size + ) + + self.gradient_checkpointing = False + + def _set_gradient_checkpointing(self, module, value: bool = False) -> None: + pass + + def forward(self, input_ids, encoder_hidden_states, pooled_text_emb, micro_conds, cross_attention_kwargs=None): + encoder_hidden_states = self.encoder_proj(encoder_hidden_states) + encoder_hidden_states = self.encoder_proj_layer_norm(encoder_hidden_states) + + micro_cond_embeds = get_timestep_embedding( + micro_conds.flatten(), self.config.micro_cond_encode_dim, flip_sin_to_cos=True, downscale_freq_shift=0 + ) + + micro_cond_embeds = micro_cond_embeds.reshape((input_ids.shape[0], -1)) + + pooled_text_emb = torch.cat([pooled_text_emb, micro_cond_embeds], dim=1) + pooled_text_emb = pooled_text_emb.to(dtype=self.dtype) + pooled_text_emb = self.cond_embed(pooled_text_emb).to(encoder_hidden_states.dtype) + + hidden_states = self.embed(input_ids) + + hidden_states = self.down_block( + hidden_states, + pooled_text_emb=pooled_text_emb, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + batch_size, channels, height, width = hidden_states.shape + hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) + + hidden_states = self.project_to_hidden_norm(hidden_states) + hidden_states = self.project_to_hidden(hidden_states) + + for layer in self.transformer_layers: + if self.training and self.gradient_checkpointing: + + def layer_(*args): + return checkpoint(layer, *args) + + else: + layer_ = layer + + hidden_states = layer_( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs={"pooled_text_emb": pooled_text_emb}, + ) + + hidden_states = self.project_from_hidden_norm(hidden_states) + hidden_states = self.project_from_hidden(hidden_states) + + hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) + + hidden_states = self.up_block( + hidden_states, + pooled_text_emb=pooled_text_emb, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + logits = self.mlm_layer(hidden_states) + + return logits + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + +class UVit2DConvEmbed(nn.Module): + def __init__(self, in_channels, block_out_channels, vocab_size, elementwise_affine, eps, bias): + super().__init__() + self.embeddings = nn.Embedding(vocab_size, in_channels) + self.layer_norm = RMSNorm(in_channels, eps, elementwise_affine) + self.conv = nn.Conv2d(in_channels, block_out_channels, kernel_size=1, bias=bias) + + def forward(self, input_ids): + embeddings = self.embeddings(input_ids) + embeddings = self.layer_norm(embeddings) + embeddings = embeddings.permute(0, 3, 1, 2) + embeddings = self.conv(embeddings) + return embeddings + + +class UVitBlock(nn.Module): + def __init__( + self, + channels, + num_res_blocks: int, + hidden_size, + hidden_dropout, + ln_elementwise_affine, + layer_norm_eps, + use_bias, + block_num_heads, + attention_dropout, + downsample: bool, + upsample: bool, + ): + super().__init__() + + if downsample: + self.downsample = Downsample2D( + channels, + use_conv=True, + padding=0, + name="Conv2d_0", + kernel_size=2, + norm_type="rms_norm", + eps=layer_norm_eps, + elementwise_affine=ln_elementwise_affine, + bias=use_bias, + ) + else: + self.downsample = None + + self.res_blocks = nn.ModuleList( + [ + ConvNextBlock( + channels, + layer_norm_eps, + ln_elementwise_affine, + use_bias, + hidden_dropout, + hidden_size, + ) + for i in range(num_res_blocks) + ] + ) + + self.attention_blocks = nn.ModuleList( + [ + SkipFFTransformerBlock( + channels, + block_num_heads, + channels // block_num_heads, + hidden_size, + use_bias, + attention_dropout, + channels, + attention_bias=use_bias, + attention_out_bias=use_bias, + ) + for _ in range(num_res_blocks) + ] + ) + + if upsample: + self.upsample = Upsample2D( + channels, + use_conv_transpose=True, + kernel_size=2, + padding=0, + name="conv", + norm_type="rms_norm", + eps=layer_norm_eps, + elementwise_affine=ln_elementwise_affine, + bias=use_bias, + interpolate=False, + ) + else: + self.upsample = None + + def forward(self, x, pooled_text_emb, encoder_hidden_states, cross_attention_kwargs): + if self.downsample is not None: + x = self.downsample(x) + + for res_block, attention_block in zip(self.res_blocks, self.attention_blocks): + x = res_block(x, pooled_text_emb) + + batch_size, channels, height, width = x.shape + x = x.view(batch_size, channels, height * width).permute(0, 2, 1) + x = attention_block( + x, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs + ) + x = x.permute(0, 2, 1).view(batch_size, channels, height, width) + + if self.upsample is not None: + x = self.upsample(x) + + return x + + +class ConvNextBlock(nn.Module): + def __init__( + self, channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, res_ffn_factor=4 + ): + super().__init__() + self.depthwise = nn.Conv2d( + channels, + channels, + kernel_size=3, + padding=1, + groups=channels, + bias=use_bias, + ) + self.norm = RMSNorm(channels, layer_norm_eps, ln_elementwise_affine) + self.channelwise_linear_1 = nn.Linear(channels, int(channels * res_ffn_factor), bias=use_bias) + self.channelwise_act = nn.GELU() + self.channelwise_norm = GlobalResponseNorm(int(channels * res_ffn_factor)) + self.channelwise_linear_2 = nn.Linear(int(channels * res_ffn_factor), channels, bias=use_bias) + self.channelwise_dropout = nn.Dropout(hidden_dropout) + self.cond_embeds_mapper = nn.Linear(hidden_size, channels * 2, use_bias) + + def forward(self, x, cond_embeds): + x_res = x + + x = self.depthwise(x) + + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + + x = self.channelwise_linear_1(x) + x = self.channelwise_act(x) + x = self.channelwise_norm(x) + x = self.channelwise_linear_2(x) + x = self.channelwise_dropout(x) + + x = x.permute(0, 3, 1, 2) + + x = x + x_res + + scale, shift = self.cond_embeds_mapper(F.silu(cond_embeds)).chunk(2, dim=1) + x = x * (1 + scale[:, :, None, None]) + shift[:, :, None, None] + + return x + + +class ConvMlmLayer(nn.Module): + def __init__( + self, + block_out_channels: int, + in_channels: int, + use_bias: bool, + ln_elementwise_affine: bool, + layer_norm_eps: float, + codebook_size: int, + ): + super().__init__() + self.conv1 = nn.Conv2d(block_out_channels, in_channels, kernel_size=1, bias=use_bias) + self.layer_norm = RMSNorm(in_channels, layer_norm_eps, ln_elementwise_affine) + self.conv2 = nn.Conv2d(in_channels, codebook_size, kernel_size=1, bias=use_bias) + + def forward(self, hidden_states): + hidden_states = self.conv1(hidden_states) + hidden_states = self.layer_norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + logits = self.conv2(hidden_states) + return logits diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/upsampling.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/upsampling.py new file mode 100644 index 000000000..4ecf6ebc2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/upsampling.py @@ -0,0 +1,448 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import deprecate +from .normalization import RMSNorm + + +class Upsample1D(nn.Module): + """A 1D upsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + use_conv_transpose (`bool`, default `False`): + option to use a convolution transpose. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + name (`str`, default `conv`): + name of the upsampling 1D layer. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + use_conv_transpose: bool = False, + out_channels: Optional[int] = None, + name: str = "conv", + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_conv_transpose = use_conv_transpose + self.name = name + + self.conv = None + if use_conv_transpose: + self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) + elif use_conv: + self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + assert inputs.shape[1] == self.channels + if self.use_conv_transpose: + return self.conv(inputs) + + outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") + + if self.use_conv: + outputs = self.conv(outputs) + + return outputs + + +class Upsample2D(nn.Module): + """A 2D upsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + use_conv_transpose (`bool`, default `False`): + option to use a convolution transpose. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + name (`str`, default `conv`): + name of the upsampling 2D layer. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + use_conv_transpose: bool = False, + out_channels: Optional[int] = None, + name: str = "conv", + kernel_size: Optional[int] = None, + padding=1, + norm_type=None, + eps=None, + elementwise_affine=None, + bias=True, + interpolate=True, + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_conv_transpose = use_conv_transpose + self.name = name + self.interpolate = interpolate + conv_cls = nn.Conv2d + + if norm_type == "ln_norm": + self.norm = nn.LayerNorm(channels, eps, elementwise_affine) + elif norm_type == "rms_norm": + self.norm = RMSNorm(channels, eps, elementwise_affine) + elif norm_type is None: + self.norm = None + else: + raise ValueError(f"unknown norm_type: {norm_type}") + + conv = None + if use_conv_transpose: + if kernel_size is None: + kernel_size = 4 + conv = nn.ConvTranspose2d( + channels, self.out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=bias + ) + elif use_conv: + if kernel_size is None: + kernel_size = 3 + conv = conv_cls(self.channels, self.out_channels, kernel_size=kernel_size, padding=padding, bias=bias) + + # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed + if name == "conv": + self.conv = conv + else: + self.Conv2d_0 = conv + + def forward( + self, hidden_states: torch.FloatTensor, output_size: Optional[int] = None, *args, **kwargs + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + assert hidden_states.shape[1] == self.channels + + if self.norm is not None: + hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + if self.use_conv_transpose: + return self.conv(hidden_states) + + # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 + # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch + # https://github.com/pytorch/pytorch/issues/86679 + dtype = hidden_states.dtype + if dtype == torch.bfloat16: + hidden_states = hidden_states.to(torch.float32) + + # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 + if hidden_states.shape[0] >= 64: + hidden_states = hidden_states.contiguous() + + # if `output_size` is passed we force the interpolation output + # size and do not make use of `scale_factor=2` + if self.interpolate: + if output_size is None: + hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") + else: + hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") + + # If the input is bfloat16, we cast back to bfloat16 + if dtype == torch.bfloat16: + hidden_states = hidden_states.to(dtype) + + # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed + if self.use_conv: + if self.name == "conv": + hidden_states = self.conv(hidden_states) + else: + hidden_states = self.Conv2d_0(hidden_states) + + return hidden_states + + +class FirUpsample2D(nn.Module): + """A 2D FIR upsampling layer with an optional convolution. + + Parameters: + channels (`int`, optional): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + fir_kernel (`tuple`, default `(1, 3, 3, 1)`): + kernel for the FIR filter. + """ + + def __init__( + self, + channels: Optional[int] = None, + out_channels: Optional[int] = None, + use_conv: bool = False, + fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), + ): + super().__init__() + out_channels = out_channels if out_channels else channels + if use_conv: + self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) + self.use_conv = use_conv + self.fir_kernel = fir_kernel + self.out_channels = out_channels + + def _upsample_2d( + self, + hidden_states: torch.FloatTensor, + weight: Optional[torch.FloatTensor] = None, + kernel: Optional[torch.FloatTensor] = None, + factor: int = 2, + gain: float = 1, + ) -> torch.FloatTensor: + """Fused `upsample_2d()` followed by `Conv2d()`. + + Padding is performed only once at the beginning, not between the operations. The fused op is considerably more + efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of + arbitrary order. + + Args: + hidden_states (`torch.FloatTensor`): + Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. + weight (`torch.FloatTensor`, *optional*): + Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be + performed by `inChannels = x.shape[0] // numGroups`. + kernel (`torch.FloatTensor`, *optional*): + FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which + corresponds to nearest-neighbor upsampling. + factor (`int`, *optional*): Integer upsampling factor (default: 2). + gain (`float`, *optional*): Scaling factor for signal magnitude (default: 1.0). + + Returns: + output (`torch.FloatTensor`): + Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same + datatype as `hidden_states`. + """ + + assert isinstance(factor, int) and factor >= 1 + + # Setup filter kernel. + if kernel is None: + kernel = [1] * factor + + # setup kernel + kernel = torch.tensor(kernel, dtype=torch.float32) + if kernel.ndim == 1: + kernel = torch.outer(kernel, kernel) + kernel /= torch.sum(kernel) + + kernel = kernel * (gain * (factor**2)) + + if self.use_conv: + convH = weight.shape[2] + convW = weight.shape[3] + inC = weight.shape[1] + + pad_value = (kernel.shape[0] - factor) - (convW - 1) + + stride = (factor, factor) + # Determine data dimensions. + output_shape = ( + (hidden_states.shape[2] - 1) * factor + convH, + (hidden_states.shape[3] - 1) * factor + convW, + ) + output_padding = ( + output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, + output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, + ) + assert output_padding[0] >= 0 and output_padding[1] >= 0 + num_groups = hidden_states.shape[1] // inC + + # Transpose weights. + weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) + weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) + weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) + + inverse_conv = F.conv_transpose2d( + hidden_states, + weight, + stride=stride, + output_padding=output_padding, + padding=0, + ) + + output = upfirdn2d_native( + inverse_conv, + torch.tensor(kernel, device=inverse_conv.device), + pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), + ) + else: + pad_value = kernel.shape[0] - factor + output = upfirdn2d_native( + hidden_states, + torch.tensor(kernel, device=hidden_states.device), + up=factor, + pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), + ) + + return output + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + if self.use_conv: + height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) + height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) + else: + height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) + + return height + + +class KUpsample2D(nn.Module): + r"""A 2D K-upsampling layer. + + Parameters: + pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. + """ + + def __init__(self, pad_mode: str = "reflect"): + super().__init__() + self.pad_mode = pad_mode + kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 + self.pad = kernel_1d.shape[1] // 2 - 1 + self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) + weight = inputs.new_zeros( + [ + inputs.shape[1], + inputs.shape[1], + self.kernel.shape[0], + self.kernel.shape[1], + ] + ) + indices = torch.arange(inputs.shape[1], device=inputs.device) + kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) + weight[indices, indices] = kernel + return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) + + +def upfirdn2d_native( + tensor: torch.Tensor, + kernel: torch.Tensor, + up: int = 1, + down: int = 1, + pad: Tuple[int, int] = (0, 0), +) -> torch.Tensor: + up_x = up_y = up + down_x = down_y = down + pad_x0 = pad_y0 = pad[0] + pad_x1 = pad_y1 = pad[1] + + _, channel, in_h, in_w = tensor.shape + tensor = tensor.reshape(-1, in_h, in_w, 1) + + _, in_h, in_w, minor = tensor.shape + kernel_h, kernel_w = kernel.shape + + out = tensor.view(-1, in_h, 1, in_w, 1, minor) + out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) + out = out.view(-1, in_h * up_y, in_w * up_x, minor) + + out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) + out = out.to(tensor.device) # Move back to mps if necessary + out = out[ + :, + max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), + max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), + :, + ] + + out = out.permute(0, 3, 1, 2) + out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + out = out.permute(0, 2, 3, 1) + out = out[:, ::down_y, ::down_x, :] + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 + + return out.view(-1, channel, out_h, out_w) + + +def upsample_2d( + hidden_states: torch.FloatTensor, + kernel: Optional[torch.FloatTensor] = None, + factor: int = 2, + gain: float = 1, +) -> torch.FloatTensor: + r"""Upsample2D a batch of 2D images with the given filter. + Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given + filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified + `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is + a: multiple of the upsampling factor. + + Args: + hidden_states (`torch.FloatTensor`): + Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. + kernel (`torch.FloatTensor`, *optional*): + FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which + corresponds to nearest-neighbor upsampling. + factor (`int`, *optional*, default to `2`): + Integer upsampling factor. + gain (`float`, *optional*, default to `1.0`): + Scaling factor for signal magnitude (default: 1.0). + + Returns: + output (`torch.FloatTensor`): + Tensor of the shape `[N, C, H * factor, W * factor]` + """ + assert isinstance(factor, int) and factor >= 1 + if kernel is None: + kernel = [1] * factor + + kernel = torch.tensor(kernel, dtype=torch.float32) + if kernel.ndim == 1: + kernel = torch.outer(kernel, kernel) + kernel /= torch.sum(kernel) + + kernel = kernel * (gain * (factor**2)) + pad_value = kernel.shape[0] - factor + output = upfirdn2d_native( + hidden_states, + kernel.to(device=hidden_states.device), + up=factor, + pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), + ) + return output diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vae_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vae_flax.py new file mode 100644 index 000000000..5027f4230 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vae_flax.py @@ -0,0 +1,876 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# JAX implementation of VQGAN from taming-transformers https://github.com/CompVis/taming-transformers + +import math +from functools import partial +from typing import Tuple + +import flax +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict + +from ..configuration_utils import ConfigMixin, flax_register_to_config +from ..utils import BaseOutput +from .modeling_flax_utils import FlaxModelMixin + + +@flax.struct.dataclass +class FlaxDecoderOutput(BaseOutput): + """ + Output of decoding method. + + Args: + sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): + The decoded output sample from the last layer of the model. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + The `dtype` of the parameters. + """ + + sample: jnp.ndarray + + +@flax.struct.dataclass +class FlaxAutoencoderKLOutput(BaseOutput): + """ + Output of AutoencoderKL encoding method. + + Args: + latent_dist (`FlaxDiagonalGaussianDistribution`): + Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`. + `FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution. + """ + + latent_dist: "FlaxDiagonalGaussianDistribution" + + +class FlaxUpsample2D(nn.Module): + """ + Flax implementation of 2D Upsample layer + + Args: + in_channels (`int`): + Input channels + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.conv = nn.Conv( + self.in_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + def __call__(self, hidden_states): + batch, height, width, channels = hidden_states.shape + hidden_states = jax.image.resize( + hidden_states, + shape=(batch, height * 2, width * 2, channels), + method="nearest", + ) + hidden_states = self.conv(hidden_states) + return hidden_states + + +class FlaxDownsample2D(nn.Module): + """ + Flax implementation of 2D Downsample layer + + Args: + in_channels (`int`): + Input channels + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.conv = nn.Conv( + self.in_channels, + kernel_size=(3, 3), + strides=(2, 2), + padding="VALID", + dtype=self.dtype, + ) + + def __call__(self, hidden_states): + pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim + hidden_states = jnp.pad(hidden_states, pad_width=pad) + hidden_states = self.conv(hidden_states) + return hidden_states + + +class FlaxResnetBlock2D(nn.Module): + """ + Flax implementation of 2D Resnet Block. + + Args: + in_channels (`int`): + Input channels + out_channels (`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + groups (:obj:`int`, *optional*, defaults to `32`): + The number of groups to use for group norm. + use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`): + Whether to use `nin_shortcut`. This activates a new layer inside ResNet block + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int = None + dropout: float = 0.0 + groups: int = 32 + use_nin_shortcut: bool = None + dtype: jnp.dtype = jnp.float32 + + def setup(self): + out_channels = self.in_channels if self.out_channels is None else self.out_channels + + self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) + self.conv1 = nn.Conv( + out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) + self.dropout_layer = nn.Dropout(self.dropout) + self.conv2 = nn.Conv( + out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut + + self.conv_shortcut = None + if use_nin_shortcut: + self.conv_shortcut = nn.Conv( + out_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + + def __call__(self, hidden_states, deterministic=True): + residual = hidden_states + hidden_states = self.norm1(hidden_states) + hidden_states = nn.swish(hidden_states) + hidden_states = self.conv1(hidden_states) + + hidden_states = self.norm2(hidden_states) + hidden_states = nn.swish(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + residual = self.conv_shortcut(residual) + + return hidden_states + residual + + +class FlaxAttentionBlock(nn.Module): + r""" + Flax Convolutional based multi-head attention block for diffusion-based VAE. + + Parameters: + channels (:obj:`int`): + Input channels + num_head_channels (:obj:`int`, *optional*, defaults to `None`): + Number of attention heads + num_groups (:obj:`int`, *optional*, defaults to `32`): + The number of groups to use for group norm + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + + """ + + channels: int + num_head_channels: int = None + num_groups: int = 32 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 + + dense = partial(nn.Dense, self.channels, dtype=self.dtype) + + self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6) + self.query, self.key, self.value = dense(), dense(), dense() + self.proj_attn = dense() + + def transpose_for_scores(self, projection): + new_projection_shape = projection.shape[:-1] + (self.num_heads, -1) + # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) + new_projection = projection.reshape(new_projection_shape) + # (B, T, H, D) -> (B, H, T, D) + new_projection = jnp.transpose(new_projection, (0, 2, 1, 3)) + return new_projection + + def __call__(self, hidden_states): + residual = hidden_states + batch, height, width, channels = hidden_states.shape + + hidden_states = self.group_norm(hidden_states) + + hidden_states = hidden_states.reshape((batch, height * width, channels)) + + query = self.query(hidden_states) + key = self.key(hidden_states) + value = self.value(hidden_states) + + # transpose + query = self.transpose_for_scores(query) + key = self.transpose_for_scores(key) + value = self.transpose_for_scores(value) + + # compute attentions + scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) + attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale) + attn_weights = nn.softmax(attn_weights, axis=-1) + + # attend to values + hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights) + + hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) + new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,) + hidden_states = hidden_states.reshape(new_hidden_states_shape) + + hidden_states = self.proj_attn(hidden_states) + hidden_states = hidden_states.reshape((batch, height, width, channels)) + hidden_states = hidden_states + residual + return hidden_states + + +class FlaxDownEncoderBlock2D(nn.Module): + r""" + Flax Resnet blocks-based Encoder block for diffusion-based VAE. + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of Resnet layer block + resnet_groups (:obj:`int`, *optional*, defaults to `32`): + The number of groups to use for the Resnet block group norm + add_downsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add downsample layer + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + dropout: float = 0.0 + num_layers: int = 1 + resnet_groups: int = 32 + add_downsample: bool = True + dtype: jnp.dtype = jnp.float32 + + def setup(self): + resnets = [] + for i in range(self.num_layers): + in_channels = self.in_channels if i == 0 else self.out_channels + + res_block = FlaxResnetBlock2D( + in_channels=in_channels, + out_channels=self.out_channels, + dropout=self.dropout, + groups=self.resnet_groups, + dtype=self.dtype, + ) + resnets.append(res_block) + self.resnets = resnets + + if self.add_downsample: + self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, deterministic=True): + for resnet in self.resnets: + hidden_states = resnet(hidden_states, deterministic=deterministic) + + if self.add_downsample: + hidden_states = self.downsamplers_0(hidden_states) + + return hidden_states + + +class FlaxUpDecoderBlock2D(nn.Module): + r""" + Flax Resnet blocks-based Decoder block for diffusion-based VAE. + + Parameters: + in_channels (:obj:`int`): + Input channels + out_channels (:obj:`int`): + Output channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of Resnet layer block + resnet_groups (:obj:`int`, *optional*, defaults to `32`): + The number of groups to use for the Resnet block group norm + add_upsample (:obj:`bool`, *optional*, defaults to `True`): + Whether to add upsample layer + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + out_channels: int + dropout: float = 0.0 + num_layers: int = 1 + resnet_groups: int = 32 + add_upsample: bool = True + dtype: jnp.dtype = jnp.float32 + + def setup(self): + resnets = [] + for i in range(self.num_layers): + in_channels = self.in_channels if i == 0 else self.out_channels + res_block = FlaxResnetBlock2D( + in_channels=in_channels, + out_channels=self.out_channels, + dropout=self.dropout, + groups=self.resnet_groups, + dtype=self.dtype, + ) + resnets.append(res_block) + + self.resnets = resnets + + if self.add_upsample: + self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) + + def __call__(self, hidden_states, deterministic=True): + for resnet in self.resnets: + hidden_states = resnet(hidden_states, deterministic=deterministic) + + if self.add_upsample: + hidden_states = self.upsamplers_0(hidden_states) + + return hidden_states + + +class FlaxUNetMidBlock2D(nn.Module): + r""" + Flax Unet Mid-Block module. + + Parameters: + in_channels (:obj:`int`): + Input channels + dropout (:obj:`float`, *optional*, defaults to 0.0): + Dropout rate + num_layers (:obj:`int`, *optional*, defaults to 1): + Number of Resnet layer block + resnet_groups (:obj:`int`, *optional*, defaults to `32`): + The number of groups to use for the Resnet and Attention block group norm + num_attention_heads (:obj:`int`, *optional*, defaults to `1`): + Number of attention heads for each attention block + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int + dropout: float = 0.0 + num_layers: int = 1 + resnet_groups: int = 32 + num_attention_heads: int = 1 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) + + # there is always at least one resnet + resnets = [ + FlaxResnetBlock2D( + in_channels=self.in_channels, + out_channels=self.in_channels, + dropout=self.dropout, + groups=resnet_groups, + dtype=self.dtype, + ) + ] + + attentions = [] + + for _ in range(self.num_layers): + attn_block = FlaxAttentionBlock( + channels=self.in_channels, + num_head_channels=self.num_attention_heads, + num_groups=resnet_groups, + dtype=self.dtype, + ) + attentions.append(attn_block) + + res_block = FlaxResnetBlock2D( + in_channels=self.in_channels, + out_channels=self.in_channels, + dropout=self.dropout, + groups=resnet_groups, + dtype=self.dtype, + ) + resnets.append(res_block) + + self.resnets = resnets + self.attentions = attentions + + def __call__(self, hidden_states, deterministic=True): + hidden_states = self.resnets[0](hidden_states, deterministic=deterministic) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + hidden_states = attn(hidden_states) + hidden_states = resnet(hidden_states, deterministic=deterministic) + + return hidden_states + + +class FlaxEncoder(nn.Module): + r""" + Flax Implementation of VAE Encoder. + + This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) + subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to + general usage and behavior. + + Finally, this model supports inherent JAX features such as: + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + in_channels (:obj:`int`, *optional*, defaults to 3): + Input channels + out_channels (:obj:`int`, *optional*, defaults to 3): + Output channels + down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): + DownEncoder block type + block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): + Tuple containing the number of output channels for each block + layers_per_block (:obj:`int`, *optional*, defaults to `2`): + Number of Resnet layer for each block + norm_num_groups (:obj:`int`, *optional*, defaults to `32`): + norm num group + act_fn (:obj:`str`, *optional*, defaults to `silu`): + Activation function + double_z (:obj:`bool`, *optional*, defaults to `False`): + Whether to double the last output channels + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + Parameters `dtype` + """ + + in_channels: int = 3 + out_channels: int = 3 + down_block_types: Tuple[str] = ("DownEncoderBlock2D",) + block_out_channels: Tuple[int] = (64,) + layers_per_block: int = 2 + norm_num_groups: int = 32 + act_fn: str = "silu" + double_z: bool = False + dtype: jnp.dtype = jnp.float32 + + def setup(self): + block_out_channels = self.block_out_channels + # in + self.conv_in = nn.Conv( + block_out_channels[0], + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + # downsampling + down_blocks = [] + output_channel = block_out_channels[0] + for i, _ in enumerate(self.down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = FlaxDownEncoderBlock2D( + in_channels=input_channel, + out_channels=output_channel, + num_layers=self.layers_per_block, + resnet_groups=self.norm_num_groups, + add_downsample=not is_final_block, + dtype=self.dtype, + ) + down_blocks.append(down_block) + self.down_blocks = down_blocks + + # middle + self.mid_block = FlaxUNetMidBlock2D( + in_channels=block_out_channels[-1], + resnet_groups=self.norm_num_groups, + num_attention_heads=None, + dtype=self.dtype, + ) + + # end + conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels + self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) + self.conv_out = nn.Conv( + conv_out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + def __call__(self, sample, deterministic: bool = True): + # in + sample = self.conv_in(sample) + + # downsampling + for block in self.down_blocks: + sample = block(sample, deterministic=deterministic) + + # middle + sample = self.mid_block(sample, deterministic=deterministic) + + # end + sample = self.conv_norm_out(sample) + sample = nn.swish(sample) + sample = self.conv_out(sample) + + return sample + + +class FlaxDecoder(nn.Module): + r""" + Flax Implementation of VAE Decoder. + + This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) + subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to + general usage and behavior. + + Finally, this model supports inherent JAX features such as: + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + in_channels (:obj:`int`, *optional*, defaults to 3): + Input channels + out_channels (:obj:`int`, *optional*, defaults to 3): + Output channels + up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): + UpDecoder block type + block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): + Tuple containing the number of output channels for each block + layers_per_block (:obj:`int`, *optional*, defaults to `2`): + Number of Resnet layer for each block + norm_num_groups (:obj:`int`, *optional*, defaults to `32`): + norm num group + act_fn (:obj:`str`, *optional*, defaults to `silu`): + Activation function + double_z (:obj:`bool`, *optional*, defaults to `False`): + Whether to double the last output channels + dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): + parameters `dtype` + """ + + in_channels: int = 3 + out_channels: int = 3 + up_block_types: Tuple[str] = ("UpDecoderBlock2D",) + block_out_channels: int = (64,) + layers_per_block: int = 2 + norm_num_groups: int = 32 + act_fn: str = "silu" + dtype: jnp.dtype = jnp.float32 + + def setup(self): + block_out_channels = self.block_out_channels + + # z to block_in + self.conv_in = nn.Conv( + block_out_channels[-1], + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + # middle + self.mid_block = FlaxUNetMidBlock2D( + in_channels=block_out_channels[-1], + resnet_groups=self.norm_num_groups, + num_attention_heads=None, + dtype=self.dtype, + ) + + # upsampling + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + up_blocks = [] + for i, _ in enumerate(self.up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = FlaxUpDecoderBlock2D( + in_channels=prev_output_channel, + out_channels=output_channel, + num_layers=self.layers_per_block + 1, + resnet_groups=self.norm_num_groups, + add_upsample=not is_final_block, + dtype=self.dtype, + ) + up_blocks.append(up_block) + prev_output_channel = output_channel + + self.up_blocks = up_blocks + + # end + self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) + self.conv_out = nn.Conv( + self.out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding=((1, 1), (1, 1)), + dtype=self.dtype, + ) + + def __call__(self, sample, deterministic: bool = True): + # z to block_in + sample = self.conv_in(sample) + + # middle + sample = self.mid_block(sample, deterministic=deterministic) + + # upsampling + for block in self.up_blocks: + sample = block(sample, deterministic=deterministic) + + sample = self.conv_norm_out(sample) + sample = nn.swish(sample) + sample = self.conv_out(sample) + + return sample + + +class FlaxDiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + # Last axis to account for channels-last + self.mean, self.logvar = jnp.split(parameters, 2, axis=-1) + self.logvar = jnp.clip(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = jnp.exp(0.5 * self.logvar) + self.var = jnp.exp(self.logvar) + if self.deterministic: + self.var = self.std = jnp.zeros_like(self.mean) + + def sample(self, key): + return self.mean + self.std * jax.random.normal(key, self.mean.shape) + + def kl(self, other=None): + if self.deterministic: + return jnp.array([0.0]) + + if other is None: + return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3]) + + return 0.5 * jnp.sum( + jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, + axis=[1, 2, 3], + ) + + def nll(self, sample, axis=[1, 2, 3]): + if self.deterministic: + return jnp.array([0.0]) + + logtwopi = jnp.log(2.0 * jnp.pi) + return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis) + + def mode(self): + return self.mean + + +@flax_register_to_config +class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): + r""" + Flax implementation of a VAE model with KL loss for decoding latent representations. + + This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods + implemented for all models (such as downloading or saving). + + This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) + subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matter related to its + general usage and behavior. + + Inherent JAX features such as the following are supported: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + in_channels (`int`, *optional*, defaults to 3): + Number of channels in the input image. + out_channels (`int`, *optional*, defaults to 3): + Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): + Tuple of downsample block types. + up_block_types (`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): + Tuple of upsample block types. + block_out_channels (`Tuple[str]`, *optional*, defaults to `(64,)`): + Tuple of block output channels. + layers_per_block (`int`, *optional*, defaults to `2`): + Number of ResNet layer for each block. + act_fn (`str`, *optional*, defaults to `silu`): + The activation function to use. + latent_channels (`int`, *optional*, defaults to `4`): + Number of channels in the latent space. + norm_num_groups (`int`, *optional*, defaults to `32`): + The number of groups for normalization. + sample_size (`int`, *optional*, defaults to 32): + Sample input size. + scaling_factor (`float`, *optional*, defaults to 0.18215): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + The `dtype` of the parameters. + """ + + in_channels: int = 3 + out_channels: int = 3 + down_block_types: Tuple[str] = ("DownEncoderBlock2D",) + up_block_types: Tuple[str] = ("UpDecoderBlock2D",) + block_out_channels: Tuple[int] = (64,) + layers_per_block: int = 1 + act_fn: str = "silu" + latent_channels: int = 4 + norm_num_groups: int = 32 + sample_size: int = 32 + scaling_factor: float = 0.18215 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.encoder = FlaxEncoder( + in_channels=self.config.in_channels, + out_channels=self.config.latent_channels, + down_block_types=self.config.down_block_types, + block_out_channels=self.config.block_out_channels, + layers_per_block=self.config.layers_per_block, + act_fn=self.config.act_fn, + norm_num_groups=self.config.norm_num_groups, + double_z=True, + dtype=self.dtype, + ) + self.decoder = FlaxDecoder( + in_channels=self.config.latent_channels, + out_channels=self.config.out_channels, + up_block_types=self.config.up_block_types, + block_out_channels=self.config.block_out_channels, + layers_per_block=self.config.layers_per_block, + norm_num_groups=self.config.norm_num_groups, + act_fn=self.config.act_fn, + dtype=self.dtype, + ) + self.quant_conv = nn.Conv( + 2 * self.config.latent_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + self.post_quant_conv = nn.Conv( + self.config.latent_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + dtype=self.dtype, + ) + + def init_weights(self, rng: jax.Array) -> FrozenDict: + # init input tensors + sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) + sample = jnp.zeros(sample_shape, dtype=jnp.float32) + + params_rng, dropout_rng, gaussian_rng = jax.random.split(rng, 3) + rngs = {"params": params_rng, "dropout": dropout_rng, "gaussian": gaussian_rng} + + return self.init(rngs, sample)["params"] + + def encode(self, sample, deterministic: bool = True, return_dict: bool = True): + sample = jnp.transpose(sample, (0, 2, 3, 1)) + + hidden_states = self.encoder(sample, deterministic=deterministic) + moments = self.quant_conv(hidden_states) + posterior = FlaxDiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return FlaxAutoencoderKLOutput(latent_dist=posterior) + + def decode(self, latents, deterministic: bool = True, return_dict: bool = True): + if latents.shape[-1] != self.config.latent_channels: + latents = jnp.transpose(latents, (0, 2, 3, 1)) + + hidden_states = self.post_quant_conv(latents) + hidden_states = self.decoder(hidden_states, deterministic=deterministic) + + hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2)) + + if not return_dict: + return (hidden_states,) + + return FlaxDecoderOutput(sample=hidden_states) + + def __call__(self, sample, sample_posterior=False, deterministic: bool = True, return_dict: bool = True): + posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict) + if sample_posterior: + rng = self.make_rng("gaussian") + hidden_states = posterior.latent_dist.sample(rng) + else: + hidden_states = posterior.latent_dist.mode() + + sample = self.decode(hidden_states, return_dict=return_dict).sample + + if not return_dict: + return (sample,) + + return FlaxDecoderOutput(sample=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vq_model.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vq_model.py new file mode 100644 index 000000000..e5184446c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/models/vq_model.py @@ -0,0 +1,181 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.accelerate_utils import apply_forward_hook +from .autoencoders.vae import Decoder, DecoderOutput, Encoder, VectorQuantizer +from .modeling_utils import ModelMixin + + +@dataclass +class VQEncoderOutput(BaseOutput): + """ + Output of VQModel encoding method. + + Args: + latents (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The encoded output sample from the last layer of the model. + """ + + latents: torch.FloatTensor + + +class VQModel(ModelMixin, ConfigMixin): + r""" + A VQ-VAE model for decoding latent representations. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): + Tuple of downsample block types. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): + Tuple of upsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): + Tuple of block output channels. + layers_per_block (`int`, *optional*, defaults to `1`): Number of layers per block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space. + sample_size (`int`, *optional*, defaults to `32`): Sample input size. + num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE. + norm_num_groups (`int`, *optional*, defaults to `32`): Number of groups for normalization layers. + vq_embed_dim (`int`, *optional*): Hidden dim of codebook vectors in the VQ-VAE. + scaling_factor (`float`, *optional*, defaults to `0.18215`): + The component-wise standard deviation of the trained latent space computed using the first batch of the + training set. This is used to scale the latent space to have unit variance when training the diffusion + model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the + diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 + / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image + Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. + norm_type (`str`, *optional*, defaults to `"group"`): + Type of normalization layer to use. Can be one of `"group"` or `"spatial"`. + """ + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",), + up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 1, + act_fn: str = "silu", + latent_channels: int = 3, + sample_size: int = 32, + num_vq_embeddings: int = 256, + norm_num_groups: int = 32, + vq_embed_dim: Optional[int] = None, + scaling_factor: float = 0.18215, + norm_type: str = "group", # group, spatial + mid_block_add_attention=True, + lookup_from_codebook=False, + force_upcast=False, + ): + super().__init__() + + # pass init params to Encoder + self.encoder = Encoder( + in_channels=in_channels, + out_channels=latent_channels, + down_block_types=down_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + double_z=False, + mid_block_add_attention=mid_block_add_attention, + ) + + vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels + + self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1) + self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False) + self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1) + + # pass init params to Decoder + self.decoder = Decoder( + in_channels=latent_channels, + out_channels=out_channels, + up_block_types=up_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + norm_type=norm_type, + mid_block_add_attention=mid_block_add_attention, + ) + + @apply_forward_hook + def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput: + h = self.encoder(x) + h = self.quant_conv(h) + + if not return_dict: + return (h,) + + return VQEncoderOutput(latents=h) + + @apply_forward_hook + def decode( + self, h: torch.FloatTensor, force_not_quantize: bool = False, return_dict: bool = True, shape=None + ) -> Union[DecoderOutput, torch.FloatTensor]: + # also go through quantization layer + if not force_not_quantize: + quant, _, _ = self.quantize(h) + elif self.config.lookup_from_codebook: + quant = self.quantize.get_codebook_entry(h, shape) + else: + quant = h + quant2 = self.post_quant_conv(quant) + dec = self.decoder(quant2, quant if self.config.norm_type == "spatial" else None) + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def forward( + self, sample: torch.FloatTensor, return_dict: bool = True + ) -> Union[DecoderOutput, Tuple[torch.FloatTensor, ...]]: + r""" + The [`VQModel`] forward method. + + Args: + sample (`torch.FloatTensor`): Input sample. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.vq_model.VQEncoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vq_model.VQEncoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vq_model.VQEncoderOutput`] is returned, otherwise a plain `tuple` + is returned. + """ + + h = self.encode(sample).latents + dec = self.decode(h).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/optimization.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/optimization.py new file mode 100644 index 000000000..fbaa14365 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/optimization.py @@ -0,0 +1,361 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for diffusion models.""" + +import math +from enum import Enum +from typing import Optional, Union + +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR + +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class SchedulerType(Enum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + PIECEWISE_CONSTANT = "piecewise_constant" + + +def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) + + +def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + step_rules (`string`): + The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate + if multiple 1 for the first 10 steps, mutiple 0.1 for the next 20 steps, multiple 0.01 for the next 30 + steps and multiple 0.005 for the other steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + rules_dict = {} + rule_list = step_rules.split(",") + for rule_str in rule_list[:-1]: + value_str, steps_str = rule_str.split(":") + steps = int(steps_str) + value = float(value_str) + rules_dict[steps] = value + last_lr_multiple = float(rule_list[-1]) + + def create_rules_function(rules_dict, last_lr_multiple): + def rule_func(steps: int) -> float: + sorted_steps = sorted(rules_dict.keys()) + for i, sorted_step in enumerate(sorted_steps): + if steps < sorted_step: + return rules_dict[sorted_steps[i]] + return last_lr_multiple + + return rule_func + + rules_func = create_rules_function(rules_dict, last_lr_multiple) + + return LambdaLR(optimizer, rules_func, last_epoch=last_epoch) + + +def get_linear_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after + a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max( + 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) + ) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_periods (`float`, *optional*, defaults to 0.5): + The number of periods of the cosine function in a schedule (the default is to just decrease from the max + value to 0 following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_with_hard_restarts_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases + linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`int`, *optional*, defaults to 1): + The number of hard restarts to use. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.0: + return 0.0 + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_polynomial_decay_schedule_with_warmup( + optimizer: Optimizer, + num_warmup_steps: int, + num_training_steps: int, + lr_end: float = 1e-7, + power: float = 1.0, + last_epoch: int = -1, +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +TYPE_TO_SCHEDULER_FUNCTION = { + SchedulerType.LINEAR: get_linear_schedule_with_warmup, + SchedulerType.COSINE: get_cosine_schedule_with_warmup, + SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, + SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, + SchedulerType.CONSTANT: get_constant_schedule, + SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, + SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, +} + + +def get_scheduler( + name: Union[str, SchedulerType], + optimizer: Optimizer, + step_rules: Optional[str] = None, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + num_cycles: int = 1, + power: float = 1.0, + last_epoch: int = -1, +) -> LambdaLR: + """ + Unified API to get any scheduler from its name. + + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + step_rules (`str`, *optional*): + A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_cycles (`int`, *optional*): + The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. + power (`float`, *optional*, defaults to 1.0): + Power factor. See `POLYNOMIAL` scheduler + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer, last_epoch=last_epoch) + + if name == SchedulerType.PIECEWISE_CONSTANT: + return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + last_epoch=last_epoch, + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + power=power, + last_epoch=last_epoch, + ) + + return schedule_func( + optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/README.md new file mode 100644 index 000000000..d5125ae5c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/README.md @@ -0,0 +1,171 @@ +# 🧨 Diffusers Pipelines + +Pipelines provide a simple way to run state-of-the-art diffusion models in inference. +Most diffusion systems consist of multiple independently-trained models and highly adaptable scheduler +components - all of which are needed to have a functioning end-to-end diffusion system. + +As an example, [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) has three independently trained models: +- [Autoencoder](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/vae.py#L392) +- [Conditional Unet](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/unet_2d_condition.py#L12) +- [CLIP text encoder](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel) +- a scheduler component, [scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_pndm.py), +- a [CLIPImageProcessor](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor), +- as well as a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py). +All of these components are necessary to run stable diffusion in inference even though they were trained +or created independently from each other. + +To that end, we strive to offer all open-sourced, state-of-the-art diffusion system under a unified API. +More specifically, we strive to provide pipelines that +- 1. can load the officially published weights and yield 1-to-1 the same outputs as the original implementation according to the corresponding paper (*e.g.* [LDMTextToImagePipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/latent_diffusion), uses the officially released weights of [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)), +- 2. have a simple user interface to run the model in inference (see the [Pipelines API](#pipelines-api) section), +- 3. are easy to understand with code that is self-explanatory and can be read along-side the official paper (see [Pipelines summary](#pipelines-summary)), +- 4. can easily be contributed by the community (see the [Contribution](#contribution) section). + +**Note** that pipelines do not (and should not) offer any training functionality. +If you are looking for *official* training examples, please have a look at [examples](https://github.com/huggingface/diffusers/tree/main/examples). + + +## Pipelines Summary + +The following table summarizes all officially supported pipelines, their corresponding paper, and if +available a colab notebook to directly try them out. + +| Pipeline | Source | Tasks | Colab +|-------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|:---:|:---:| +| [dance diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/Harmonai-org/sample-generator) | *Unconditional Audio Generation* | +| [ddpm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | *Unconditional Image Generation* | +| [ddim](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | *Unconditional Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) +| [latent_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Text-to-Image Generation* | +| [latent_diffusion_uncond](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Unconditional Image Generation* | +| [pndm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | *Unconditional Image Generation* | +| [score_sde_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* | +| [score_sde_vp](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* | +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-to-Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb) +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Image-to-Image Text-Guided Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-Guided Image Inpainting* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) +| [stochastic_karras_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | *Unconditional Image Generation* | + +**Note**: Pipelines are simple examples of how to play around with the diffusion systems as described in the corresponding papers. +However, most of them can be adapted to use different scheduler components or even different model components. Some pipeline examples are shown in the [Examples](#examples) below. + +## Pipelines API + +Diffusion models often consist of multiple independently-trained models or other previously existing components. + + +Each model has been trained independently on a different task and the scheduler can easily be swapped out and replaced with a different one. +During inference, we however want to be able to easily load all components and use them in inference - even if one component, *e.g.* CLIP's text encoder, originates from a different library, such as [Transformers](https://github.com/huggingface/transformers). To that end, all pipelines provide the following functionality: + +- [`from_pretrained` method](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L139) that accepts a Hugging Face Hub repository id, *e.g.* [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) or a path to a local directory, *e.g.* +"./stable-diffusion". To correctly retrieve which models and components should be loaded, one has to provide a `model_index.json` file, *e.g.* [runwayml/stable-diffusion-v1-5/model_index.json](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), which defines all components that should be +loaded into the pipelines. More specifically, for each model/component one needs to define the format `: ["", ""]`. `` is the attribute name given to the loaded instance of `` which can be found in the library or pipeline folder called `""`. +- [`save_pretrained`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L90) that accepts a local path, *e.g.* `./stable-diffusion` under which all models/components of the pipeline will be saved. For each component/model a folder is created inside the local path that is named after the given attribute name, *e.g.* `./stable_diffusion/unet`. +In addition, a `model_index.json` file is created at the root of the local path, *e.g.* `./stable_diffusion/model_index.json` so that the complete pipeline can again be instantiated +from the local path. +- [`to`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L118) which accepts a `string` or `torch.device` to move all models that are of type `torch.nn.Module` to the passed device. The behavior is fully analogous to [PyTorch's `to` method](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.to). +- [`__call__`] method to use the pipeline in inference. `__call__` defines inference logic of the pipeline and should ideally encompass all aspects of it, from pre-processing to forwarding tensors to the different models and schedulers, as well as post-processing. The API of the `__call__` method can strongly vary from pipeline to pipeline. *E.g.* a text-to-image pipeline, such as [`StableDiffusionPipeline`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) should accept among other things the text prompt to generate the image. A pure image generation pipeline, such as [DDPMPipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/ddpm) on the other hand can be run without providing any inputs. To better understand what inputs can be adapted for +each pipeline, one should look directly into the respective pipeline. + +**Note**: All pipelines have PyTorch's autograd disabled by decorating the `__call__` method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should +not be used for training. If you want to store the gradients during the forward pass, we recommend writing your own pipeline, see also our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community) + +## Contribution + +We are more than happy about any contribution to the officially supported pipelines 🤗. We aspire +all of our pipelines to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**. + +- **Self-contained**: A pipeline shall be as self-contained as possible. More specifically, this means that all functionality should be either directly defined in the pipeline file itself, should be inherited from (and only from) the [`DiffusionPipeline` class](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L56) or be directly attached to the model and scheduler components of the pipeline. +- **Easy-to-use**: Pipelines should be extremely easy to use - one should be able to load the pipeline and +use it for its designated task, *e.g.* text-to-image generation, in just a couple of lines of code. Most +logic including pre-processing, an unrolled diffusion loop, and post-processing should all happen inside the `__call__` method. +- **Easy-to-tweak**: Certain pipelines will not be able to handle all use cases and tasks that you might like them to. If you want to use a certain pipeline for a specific use case that is not yet supported, you might have to copy the pipeline file and tweak the code to your needs. We try to make the pipeline code as readable as possible so that each part –from pre-processing to diffusing to post-processing– can easily be adapted. If you would like the community to benefit from your customized pipeline, we would love to see a contribution to our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community). If you feel that an important pipeline should be part of the official pipelines but isn't, a contribution to the [official pipelines](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines) would be even better. +- **One-purpose-only**: Pipelines should be used for one task and one task only. Even if two tasks are very similar from a modeling point of view, *e.g.* image2image translation and in-painting, pipelines shall be used for one task only to keep them *easy-to-tweak* and *readable*. + +## Examples + +### Text-to-Image generation with Stable Diffusion + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler + +pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Image-to-Image text-guided generation with Stable Diffusion + +The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images. + +```python +import requests +from PIL import Image +from io import BytesIO + +from diffusers import StableDiffusionImg2ImgPipeline + +# load the pipeline +device = "cuda" +pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + torch_dtype=torch.float16, +).to(device) + +# let's download an initial image +url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((768, 512)) + +prompt = "A fantasy landscape, trending on artstation" + +images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + +images[0].save("fantasy_landscape.png") +``` +You can also run this example on colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) + +### Tweak prompts reusing seeds and latents + +You can generate your own latents to reproduce results, or tweak your prompt on a specific result you liked. [This notebook](https://github.com/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb) shows how to do it step by step. You can also run it in Google Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb). + + +### In-painting using Stable Diffusion + +The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by providing a mask and text prompt. + +```python +import PIL +import requests +import torch +from io import BytesIO + +from diffusers import StableDiffusionInpaintPipeline + +def download_image(url): + response = requests.get(url) + return PIL.Image.open(BytesIO(response.content)).convert("RGB") + +img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" +mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + +init_image = download_image(img_url).resize((512, 512)) +mask_image = download_image(mask_url).resize((512, 512)) + +pipe = StableDiffusionInpaintPipeline.from_pretrained( + "runwayml/stable-diffusion-inpainting", + torch_dtype=torch.float16, +) +pipe = pipe.to("cuda") + +prompt = "Face of a yellow cat, high resolution, sitting on a park bench" +image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] +``` + +You can also run this example on colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/__init__.py new file mode 100644 index 000000000..2b2277809 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/__init__.py @@ -0,0 +1,581 @@ +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_k_diffusion_available, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_torch_available, + is_torch_npu_available, + is_transformers_available, +) + + +# These modules contain pipelines from multiple libraries/frameworks +_dummy_objects = {} +_import_structure = { + "controlnet": [], + "controlnet_xs": [], + "deprecated": [], + "latent_diffusion": [], + "ledits_pp": [], + "stable_diffusion": [], + "stable_diffusion_xl": [], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_pt_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["auto_pipeline"] = [ + "AutoPipelineForImage2Image", + "AutoPipelineForInpainting", + "AutoPipelineForText2Image", + ] + _import_structure["consistency_models"] = ["ConsistencyModelPipeline"] + _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] + _import_structure["ddim"] = ["DDIMPipeline"] + _import_structure["ddpm"] = ["DDPMPipeline"] + _import_structure["dit"] = ["DiTPipeline"] + _import_structure["latent_diffusion"].extend(["LDMSuperResolutionPipeline"]) + _import_structure["pipeline_utils"] = [ + "AudioPipelineOutput", + "DiffusionPipeline", + "StableDiffusionMixin", + "ImagePipelineOutput", + ] + _import_structure["deprecated"].extend( + [ + "PNDMPipeline", + "LDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "KarrasVePipeline", + ] + ) +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_librosa_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) +else: + _import_structure["deprecated"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["deprecated"].extend( + [ + "MidiProcessor", + "SpectrogramDiffusionPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["deprecated"].extend( + [ + "VQDiffusionPipeline", + "AltDiffusionPipeline", + "AltDiffusionImg2ImgPipeline", + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + ) + _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] + _import_structure["animatediff"] = [ + "AnimateDiffPipeline", + "AnimateDiffVideoToVideoPipeline", + ] + _import_structure["audioldm"] = ["AudioLDMPipeline"] + _import_structure["audioldm2"] = [ + "AudioLDM2Pipeline", + "AudioLDM2ProjectionModel", + "AudioLDM2UNet2DConditionModel", + ] + _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] + _import_structure["controlnet"].extend( + [ + "BlipDiffusionControlNetPipeline", + "StableDiffusionControlNetImg2ImgPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionControlNetPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + "StableDiffusionXLControlNetPipeline", + ] + ) + _import_structure["deepfloyd_if"] = [ + "IFImg2ImgPipeline", + "IFImg2ImgSuperResolutionPipeline", + "IFInpaintingPipeline", + "IFInpaintingSuperResolutionPipeline", + "IFPipeline", + "IFSuperResolutionPipeline", + ] + _import_structure["kandinsky"] = [ + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyImg2ImgPipeline", + "KandinskyInpaintCombinedPipeline", + "KandinskyInpaintPipeline", + "KandinskyPipeline", + "KandinskyPriorPipeline", + ] + _import_structure["kandinsky2_2"] = [ + "KandinskyV22CombinedPipeline", + "KandinskyV22ControlnetImg2ImgPipeline", + "KandinskyV22ControlnetPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22Img2ImgPipeline", + "KandinskyV22InpaintCombinedPipeline", + "KandinskyV22InpaintPipeline", + "KandinskyV22Pipeline", + "KandinskyV22PriorEmb2EmbPipeline", + "KandinskyV22PriorPipeline", + ] + _import_structure["kandinsky3"] = [ + "Kandinsky3Img2ImgPipeline", + "Kandinsky3Pipeline", + ] + _import_structure["latent_consistency_models"] = [ + "LatentConsistencyModelImg2ImgPipeline", + "LatentConsistencyModelPipeline", + ] + _import_structure["latent_diffusion"].extend(["LDMTextToImagePipeline"]) + _import_structure["ledits_pp"].extend( + [ + "LEditsPPPipelineStableDiffusion", + "LEditsPPPipelineStableDiffusionXL", + ] + ) + _import_structure["musicldm"] = ["MusicLDMPipeline"] + _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] + _import_structure["pia"] = ["PIAPipeline"] + _import_structure["pixart_alpha"] = ["PixArtAlphaPipeline"] + _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + _import_structure["shap_e"] = ["ShapEImg2ImgPipeline", "ShapEPipeline"] + _import_structure["stable_cascade"] = [ + "StableCascadeCombinedPipeline", + "StableCascadeDecoderPipeline", + "StableCascadePriorPipeline", + ] + _import_structure["stable_diffusion"].extend( + [ + "CLIPImageProjection", + "StableDiffusionDepth2ImgPipeline", + "StableDiffusionImageVariationPipeline", + "StableDiffusionImg2ImgPipeline", + "StableDiffusionInpaintPipeline", + "StableDiffusionInstructPix2PixPipeline", + "StableDiffusionLatentUpscalePipeline", + "StableDiffusionPipeline", + "StableDiffusionUpscalePipeline", + "StableUnCLIPImg2ImgPipeline", + "StableUnCLIPPipeline", + "StableDiffusionLDM3DPipeline", + ] + ) + _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] + _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + _import_structure["stable_diffusion_gligen"] = [ + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + ] + _import_structure["stable_video_diffusion"] = ["StableVideoDiffusionPipeline"] + _import_structure["stable_diffusion_xl"].extend( + [ + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLInstructPix2PixPipeline", + "StableDiffusionXLPipeline", + ] + ) + _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + _import_structure["t2i_adapter"] = [ + "StableDiffusionAdapterPipeline", + "StableDiffusionXLAdapterPipeline", + ] + _import_structure["text_to_video_synthesis"] = [ + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "VideoToVideoSDPipeline", + ] + _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] + _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] + _import_structure["unidiffuser"] = [ + "ImageTextPipelineOutput", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + ] + _import_structure["wuerstchen"] = [ + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] +try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) +else: + _import_structure["onnx_utils"] = ["OnnxRuntimeModel"] +try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_onnx_objects)) +else: + _import_structure["stable_diffusion"].extend( + [ + "OnnxStableDiffusionImg2ImgPipeline", + "OnnxStableDiffusionInpaintPipeline", + "OnnxStableDiffusionPipeline", + "OnnxStableDiffusionUpscalePipeline", + "StableDiffusionOnnxPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_k_diffusion_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) +else: + _import_structure["stable_diffusion_k_diffusion"] = [ + "StableDiffusionKDiffusionPipeline", + "StableDiffusionXLKDiffusionPipeline", + ] +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_objects)) +else: + _import_structure["pipeline_flax_utils"] = ["FlaxDiffusionPipeline"] +try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["controlnet"].extend(["FlaxStableDiffusionControlNetPipeline"]) + _import_structure["stable_diffusion"].extend( + [ + "FlaxStableDiffusionImg2ImgPipeline", + "FlaxStableDiffusionInpaintPipeline", + "FlaxStableDiffusionPipeline", + ] + ) + _import_structure["stable_diffusion_xl"].extend( + [ + "FlaxStableDiffusionXLPipeline", + ] + ) + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + + else: + from .auto_pipeline import ( + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + ) + from .consistency_models import ConsistencyModelPipeline + from .dance_diffusion import DanceDiffusionPipeline + from .ddim import DDIMPipeline + from .ddpm import DDPMPipeline + from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline + from .dit import DiTPipeline + from .latent_diffusion import LDMSuperResolutionPipeline + from .pipeline_utils import ( + AudioPipelineOutput, + DiffusionPipeline, + ImagePipelineOutput, + StableDiffusionMixin, + ) + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_librosa_objects import * + else: + from .deprecated import AudioDiffusionPipeline, Mel + + try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_objects import * + else: + from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline + from .animatediff import AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline + from .audioldm import AudioLDMPipeline + from .audioldm2 import ( + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + ) + from .blip_diffusion import BlipDiffusionPipeline + from .controlnet import ( + BlipDiffusionControlNetPipeline, + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, + ) + from .deepfloyd_if import ( + IFImg2ImgPipeline, + IFImg2ImgSuperResolutionPipeline, + IFInpaintingPipeline, + IFInpaintingSuperResolutionPipeline, + IFPipeline, + IFSuperResolutionPipeline, + ) + from .deprecated import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VQDiffusionPipeline, + ) + from .i2vgen_xl import I2VGenXLPipeline + from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, + KandinskyPriorPipeline, + ) + from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorEmb2EmbPipeline, + KandinskyV22PriorPipeline, + ) + from .kandinsky3 import ( + Kandinsky3Img2ImgPipeline, + Kandinsky3Pipeline, + ) + from .latent_consistency_models import ( + LatentConsistencyModelImg2ImgPipeline, + LatentConsistencyModelPipeline, + ) + from .latent_diffusion import LDMTextToImagePipeline + from .ledits_pp import ( + LEditsPPDiffusionPipelineOutput, + LEditsPPInversionPipelineOutput, + LEditsPPPipelineStableDiffusion, + LEditsPPPipelineStableDiffusionXL, + ) + from .musicldm import MusicLDMPipeline + from .paint_by_example import PaintByExamplePipeline + from .pia import PIAPipeline + from .pixart_alpha import PixArtAlphaPipeline + from .semantic_stable_diffusion import SemanticStableDiffusionPipeline + from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline + from .stable_cascade import ( + StableCascadeCombinedPipeline, + StableCascadeDecoderPipeline, + StableCascadePriorPipeline, + ) + from .stable_diffusion import ( + CLIPImageProjection, + StableDiffusionDepth2ImgPipeline, + StableDiffusionImageVariationPipeline, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionInstructPix2PixPipeline, + StableDiffusionLatentUpscalePipeline, + StableDiffusionPipeline, + StableDiffusionUpscalePipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + ) + from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline + from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline + from .stable_diffusion_safe import StableDiffusionPipelineSafe + from .stable_diffusion_sag import StableDiffusionSAGPipeline + from .stable_diffusion_xl import ( + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLInstructPix2PixPipeline, + StableDiffusionXLPipeline, + ) + from .stable_video_diffusion import StableVideoDiffusionPipeline + from .t2i_adapter import ( + StableDiffusionAdapterPipeline, + StableDiffusionXLAdapterPipeline, + ) + from .text_to_video_synthesis import ( + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + VideoToVideoSDPipeline, + ) + from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline + from .unidiffuser import ( + ImageTextPipelineOutput, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + ) + from .wuerstchen import ( + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, + ) + + try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_onnx_objects import * # noqa F403 + + else: + from .onnx_utils import OnnxRuntimeModel + + try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_onnx_objects import * + else: + from .stable_diffusion import ( + OnnxStableDiffusionImg2ImgPipeline, + OnnxStableDiffusionInpaintPipeline, + OnnxStableDiffusionPipeline, + OnnxStableDiffusionUpscalePipeline, + StableDiffusionOnnxPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + else: + from .stable_diffusion_k_diffusion import ( + StableDiffusionKDiffusionPipeline, + StableDiffusionXLKDiffusionPipeline, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_objects import * # noqa F403 + else: + from .pipeline_flax_utils import FlaxDiffusionPipeline + + try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_and_transformers_objects import * + else: + from .controlnet import FlaxStableDiffusionControlNetPipeline + from .stable_diffusion import ( + FlaxStableDiffusionImg2ImgPipeline, + FlaxStableDiffusionInpaintPipeline, + FlaxStableDiffusionPipeline, + ) + from .stable_diffusion_xl import ( + FlaxStableDiffusionXLPipeline, + ) + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + + else: + from .deprecated import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/__init__.py new file mode 100644 index 000000000..3c4d07a42 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/__init__.py @@ -0,0 +1,62 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AmusedImg2ImgPipeline, + AmusedInpaintPipeline, + AmusedPipeline, + ) + + _dummy_objects.update( + { + "AmusedPipeline": AmusedPipeline, + "AmusedImg2ImgPipeline": AmusedImg2ImgPipeline, + "AmusedInpaintPipeline": AmusedInpaintPipeline, + } + ) +else: + _import_structure["pipeline_amused"] = ["AmusedPipeline"] + _import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"] + _import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AmusedPipeline, + ) + else: + from .pipeline_amused import AmusedPipeline + from .pipeline_amused_img2img import AmusedImg2ImgPipeline + from .pipeline_amused_inpaint import AmusedInpaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused.py new file mode 100644 index 000000000..aa682b46f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused.py @@ -0,0 +1,328 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedPipeline + + >>> pipe = AmusedPipeline.from_pretrained( + ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +class AmusedPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.IntTensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.transformer.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 16): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.IntTensor`, *optional*): + Pre-generated tokens representing latent vectors in `self.vqvae`, to be used as inputs for image + gneration. If not provided, the starting latents will be completely masked. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.FloatTensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.FloatTensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/ + and the micro-conditioning section of https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if height is None: + height = self.transformer.config.sample_size * self.vae_scale_factor + + if width is None: + width = self.transformer.config.sample_size * self.vae_scale_factor + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + shape = (batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor) + + if latents is None: + latents = torch.full( + shape, self.scheduler.config.mask_token_id, dtype=torch.long, device=self._execution_device + ) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + + num_warmup_steps = len(self.scheduler.timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, timestep in enumerate(self.scheduler.timesteps): + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_img2img.py new file mode 100644 index 000000000..444d6354b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_img2img.py @@ -0,0 +1,347 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> pipe = AmusedImg2ImgPipeline.from_pretrained( + ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "winter mountains" + >>> input_image = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg" + ... ) + ... .resize((512, 512)) + ... .convert("RGB") + ... ) + >>> image = pipe(prompt, input_image).images[0] + ``` +""" + + +class AmusedImg2ImgPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before + # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter + # off the meta device. There should be a way to fix this instead of just not offloading it + _exclude_from_cpu_offload = ["vqvae"] + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + image: PipelineImageInput = None, + strength: float = 0.5, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.5): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 16): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.FloatTensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.FloatTensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/ + and the micro-conditioning section of https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + image = self.image_processor.preprocess(image) + + height, width = image.shape[-2:] + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + num_inference_steps = int(len(self.scheduler.timesteps) * strength) + start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps + + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents + latents_bsz, channels, latents_height, latents_width = latents.shape + latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) + latents = self.scheduler.add_noise( + latents, self.scheduler.timesteps[start_timestep_idx - 1], generator=generator + ) + latents = latents.repeat(num_images_per_prompt, 1, 1) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i in range(start_timestep_idx, len(self.scheduler.timesteps)): + timestep = self.scheduler.timesteps[i] + + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py new file mode 100644 index 000000000..423f5734b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py @@ -0,0 +1,378 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = AmusedInpaintPipeline.from_pretrained( + ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "fall mountains" + >>> input_image = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg" + ... ) + ... .resize((512, 512)) + ... .convert("RGB") + ... ) + >>> mask = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" + ... ) + ... .resize((512, 512)) + ... .convert("L") + ... ) + >>> pipe(prompt, input_image, mask).images[0].save("out.png") + ``` +""" + + +class AmusedInpaintPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before + # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter + # off the meta device. There should be a way to fix this instead of just not offloading it + _exclude_from_cpu_offload = ["vqvae"] + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, + do_normalize=False, + do_binarize=True, + do_convert_grayscale=True, + do_resize=True, + ) + self.scheduler.register_to_config(masking_schedule="linear") + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + strength: float = 1.0, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 16): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.FloatTensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.FloatTensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/ + and the micro-conditioning section of https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + image = self.image_processor.preprocess(image) + + height, width = image.shape[-2:] + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + num_inference_steps = int(len(self.scheduler.timesteps) * strength) + start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps + + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents + latents_bsz, channels, latents_height, latents_width = latents.shape + latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) + + mask = self.mask_processor.preprocess( + mask_image, height // self.vae_scale_factor, width // self.vae_scale_factor + ) + mask = mask.reshape(mask.shape[0], latents_height, latents_width).bool().to(latents.device) + latents[mask] = self.scheduler.config.mask_token_id + + starting_mask_ratio = mask.sum() / latents.numel() + + latents = latents.repeat(num_images_per_prompt, 1, 1) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i in range(start_timestep_idx, len(self.scheduler.timesteps)): + timestep = self.scheduler.timesteps[i] + + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + starting_mask_ratio=starting_mask_ratio, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/__init__.py new file mode 100644 index 000000000..35b99a76f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {"pipeline_output": ["AnimateDiffPipelineOutput"]} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_animatediff"] = ["AnimateDiffPipeline"] + _import_structure["pipeline_animatediff_video2video"] = ["AnimateDiffVideoToVideoPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_animatediff import AnimateDiffPipeline + from .pipeline_animatediff_video2video import AnimateDiffVideoToVideoPipeline + from .pipeline_output import AnimateDiffPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff.py new file mode 100644 index 000000000..cd7f0a283 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff.py @@ -0,0 +1,847 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler + >>> from diffusers.utils import export_to_gif + + >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") + >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) + >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False) + >>> output = pipe(prompt="A corgi walking in the park") + >>> frames = output.frames[0] + >>> export_to_gif(frames, "animation.gif") + ``` +""" + + +def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +class AnimateDiffPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FreeInitMixin, +): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + motion_adapter: MotionAdapter, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or + `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py new file mode 100644 index 000000000..cb6b71351 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py @@ -0,0 +1,997 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import imageio + >>> import requests + >>> import torch + >>> from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter + >>> from diffusers.utils import export_to_gif + >>> from io import BytesIO + >>> from PIL import Image + + >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) + >>> pipe = AnimateDiffVideoToVideoPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter).to("cuda") + >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False, timespace_spacing="linspace") + + >>> def load_video(file_path: str): + ... images = [] + ... + ... if file_path.startswith(('http://', 'https://')): + ... # If the file_path is a URL + ... response = requests.get(file_path) + ... response.raise_for_status() + ... content = BytesIO(response.content) + ... vid = imageio.get_reader(content) + ... else: + ... # Assuming it's a local file path + ... vid = imageio.get_reader(file_path) + ... + ... for frame in vid: + ... pil_image = Image.fromarray(frame) + ... images.append(pil_image) + ... + ... return images + + >>> video = load_video("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif") + >>> output = pipe(video=video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", strength=0.5) + >>> frames = output.frames[0] + >>> export_to_gif(frames, "animation.gif") + ``` +""" + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor, output_type="np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AnimateDiffVideoToVideoPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FreeInitMixin, +): + r""" + Pipeline for video-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + motion_adapter: MotionAdapter, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + height, + width, + video=None, + latents=None, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if video is not None and latents is not None: + raise ValueError("Only one of `video` or `latents` should be provided") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, timesteps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def prepare_latents( + self, + video, + height, + width, + num_channels_latents, + batch_size, + timestep, + dtype, + device, + generator, + latents=None, + ): + # video must be a list of list of images + # the outer list denotes having multiple videos as input, whereas inner list means the frames of the video + # as a list of images + if not isinstance(video[0], list): + video = [video] + if latents is None: + video = torch.cat( + [self.image_processor.preprocess(vid, height=height, width=width).unsqueeze(0) for vid in video], dim=0 + ) + video = video.to(device=device, dtype=dtype) + num_frames = video.shape[1] + else: + num_frames = latents.shape[2] + + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + video = video.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + if len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + init_latents = [ + retrieve_latents(self.vae.encode(video[i]), generator=generator[i]).unsqueeze(0) + for i in range(batch_size) + ] + else: + init_latents = [ + retrieve_latents(self.vae.encode(vid), generator=generator).unsqueeze(0) for vid in video + ] + + init_latents = torch.cat(init_latents, dim=0) + + # restore vae to original dtype + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + error_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Please make sure to update your script to pass as many initial images as text prompts" + ) + raise ValueError(error_message) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4) + else: + if shape != latents.shape: + # [B, C, F, H, W] + raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}") + latents = latents.to(device, dtype=dtype) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + video: List[List[PipelineImageInput]] = None, + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: Optional[List[int]] = None, + guidance_scale: float = 7.5, + strength: float = 0.8, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + video (`List[PipelineImageInput]`): + The input video to condition the generation on. Must be a list of images/frames of the video. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + strength (`float`, *optional*, defaults to 0.8): + Higher strength leads to more differences between original video and generated video. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or + `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`AnimateDiffPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + strength=strength, + height=height, + width=width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + video=video, + latents=latents, + ip_adapter_image=ip_adapter_image, + ip_adapter_image_embeds=ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + video=video, + height=height, + width=width, + num_channels_latents=num_channels_latents, + batch_size=batch_size * num_videos_per_prompt, + timestep=latent_timestep, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + num_inference_steps = len(timesteps) + # make sure to readjust timesteps based on strength + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 9. Post-processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_output.py new file mode 100644 index 000000000..184a45848 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/animatediff/pipeline_output.py @@ -0,0 +1,23 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image +import torch + +from ...utils import BaseOutput + + +@dataclass +class AnimateDiffPipelineOutput(BaseOutput): + r""" + Output class for AnimateDiff pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/__init__.py new file mode 100644 index 000000000..a002b4aa7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/__init__.py @@ -0,0 +1,51 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) +else: + _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + else: + from .pipeline_audioldm import AudioLDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/pipeline_audioldm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/pipeline_audioldm.py new file mode 100644 index 000000000..69bebdd0d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm/pipeline_audioldm.py @@ -0,0 +1,546 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +import torch.nn.functional as F +from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AudioLDMPipeline + >>> import torch + >>> import scipy + + >>> repo_id = "cvssp/audioldm-s-full-v2" + >>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" + >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] + + >>> # save the audio sample as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ``` +""" + + +class AudioLDMPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-audio generation using AudioLDM. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapTextModelWithProjection`]): + Frozen text-encoder (`ClapTextModelWithProjection`, specifically the + [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. + tokenizer ([`PreTrainedTokenizer`]): + A [`~transformers.RobertaTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ClapTextModelWithProjection, + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def _encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLAP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask.to(device), + ) + prompt_embeds = prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + prompt_embeds = F.normalize(prompt_embeds, dim=-1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + ( + bs_embed, + seq_len, + ) = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input_ids, + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + return mel_spectrogram + + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + self.vocoder.config.model_in_dim // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 10, + guidance_scale: float = 2.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 5.12): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 10): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 2.5): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated image. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. + + Examples: + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=None, + class_labels=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + mel_spectrogram = self.decode_latents(latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/__init__.py new file mode 100644 index 000000000..23cd0e44f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_audioldm2"] = ["AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel"] + _import_structure["pipeline_audioldm2"] = ["AudioLDM2Pipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel + from .pipeline_audioldm2 import AudioLDM2Pipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py new file mode 100644 index 000000000..c0b85e4db --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py @@ -0,0 +1,1511 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...models.activations import get_activation +from ...models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ...models.embeddings import ( + TimestepEmbedding, + Timesteps, +) +from ...models.modeling_utils import ModelMixin +from ...models.resnet import Downsample2D, ResnetBlock2D, Upsample2D +from ...models.transformers.transformer_2d import Transformer2DModel +from ...models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D +from ...models.unets.unet_2d_condition import UNet2DConditionOutput +from ...utils import BaseOutput, is_torch_version, logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def add_special_tokens(hidden_states, attention_mask, sos_token, eos_token): + batch_size = hidden_states.shape[0] + + if attention_mask is not None: + # Add two more steps to attn mask + new_attn_mask_step = attention_mask.new_ones((batch_size, 1)) + attention_mask = torch.concat([new_attn_mask_step, attention_mask, new_attn_mask_step], dim=-1) + + # Add the SOS / EOS tokens at the start / end of the sequence respectively + sos_token = sos_token.expand(batch_size, 1, -1) + eos_token = eos_token.expand(batch_size, 1, -1) + hidden_states = torch.concat([sos_token, hidden_states, eos_token], dim=1) + return hidden_states, attention_mask + + +@dataclass +class AudioLDM2ProjectionModelOutput(BaseOutput): + """ + Args: + Class for AudioLDM2 projection layer's outputs. + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states obtained by linearly projecting the hidden-states for each of the text + encoders and subsequently concatenating them together. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices, formed by concatenating the attention masks + for the two text encoders together. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + """ + + hidden_states: torch.FloatTensor + attention_mask: Optional[torch.LongTensor] = None + + +class AudioLDM2ProjectionModel(ModelMixin, ConfigMixin): + """ + A simple linear projection model to map two text embeddings to a shared latent space. It also inserts learned + embedding vectors at the start and end of each text embedding sequence respectively. Each variable appended with + `_1` refers to that corresponding to the second text encoder. Otherwise, it is from the first. + + Args: + text_encoder_dim (`int`): + Dimensionality of the text embeddings from the first text encoder (CLAP). + text_encoder_1_dim (`int`): + Dimensionality of the text embeddings from the second text encoder (T5 or VITS). + langauge_model_dim (`int`): + Dimensionality of the text embeddings from the language model (GPT2). + """ + + @register_to_config + def __init__(self, text_encoder_dim, text_encoder_1_dim, langauge_model_dim): + super().__init__() + # additional projection layers for each text encoder + self.projection = nn.Linear(text_encoder_dim, langauge_model_dim) + self.projection_1 = nn.Linear(text_encoder_1_dim, langauge_model_dim) + + # learnable SOS / EOS token embeddings for each text encoder + self.sos_embed = nn.Parameter(torch.ones(langauge_model_dim)) + self.eos_embed = nn.Parameter(torch.ones(langauge_model_dim)) + + self.sos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) + self.eos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) + + def forward( + self, + hidden_states: Optional[torch.FloatTensor] = None, + hidden_states_1: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + attention_mask_1: Optional[torch.LongTensor] = None, + ): + hidden_states = self.projection(hidden_states) + hidden_states, attention_mask = add_special_tokens( + hidden_states, attention_mask, sos_token=self.sos_embed, eos_token=self.eos_embed + ) + + hidden_states_1 = self.projection_1(hidden_states_1) + hidden_states_1, attention_mask_1 = add_special_tokens( + hidden_states_1, attention_mask_1, sos_token=self.sos_embed_1, eos_token=self.eos_embed_1 + ) + + # concatenate clap and t5 text encoding + hidden_states = torch.cat([hidden_states, hidden_states_1], dim=1) + + # concatenate attention masks + if attention_mask is None and attention_mask_1 is not None: + attention_mask = attention_mask_1.new_ones((hidden_states[:2])) + elif attention_mask is not None and attention_mask_1 is None: + attention_mask_1 = attention_mask.new_ones((hidden_states_1[:2])) + + if attention_mask is not None and attention_mask_1 is not None: + attention_mask = torch.cat([attention_mask, attention_mask_1], dim=-1) + else: + attention_mask = None + + return AudioLDM2ProjectionModelOutput( + hidden_states=hidden_states, + attention_mask=attention_mask, + ) + + +class AudioLDM2UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. Compared to the vanilla [`UNet2DConditionModel`], this variant optionally includes an additional + self-attention layer in each Transformer block, as well as multiple cross-attention layers. It also allows for up + to two cross-attention embeddings, `encoder_hidden_states` and `encoder_hidden_states_1`. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + flip_sin_to_cos (`bool`, *optional*, defaults to `False`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can only be `UNetMidBlock2DCrossAttn` for AudioLDM2. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + only_cross_attention (`bool` or `Tuple[bool]`, *optional*, default to `False`): + Whether to include self-attention in the basic transformer blocks, see + [`~models.attention.BasicTransformerBlock`]. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): + The number of attention heads. If not defined, defaults to `attention_head_dim` + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + time_embedding_type (`str`, *optional*, defaults to `positional`): + The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. + time_embedding_dim (`int`, *optional*, defaults to `None`): + An optional override for the dimension of the projected time embedding. + time_embedding_act_fn (`str`, *optional*, defaults to `None`): + Optional activation function to use only once on the time embeddings before they are passed to the rest of + the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. + timestep_post_act (`str`, *optional*, defaults to `None`): + The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. + time_cond_proj_dim (`int`, *optional*, defaults to `None`): + The dimension of `cond_proj` layer in the timestep embedding. + conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. + conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. + projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when + `class_embed_type="projection"`. Required when `class_embed_type="projection"`. + class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time + embeddings with the class embeddings. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", + up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + layers_per_block: Union[int, Tuple[int]] = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: Union[int, Tuple[int]] = 1280, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + attention_head_dim: Union[int, Tuple[int]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + time_embedding_type: str = "positional", + time_embedding_dim: Optional[int] = None, + time_embedding_act_fn: Optional[str] = None, + timestep_post_act: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + conv_in_kernel: int = 3, + conv_out_kernel: int = 3, + projection_class_embeddings_input_dim: Optional[int] = None, + class_embeddings_concat: bool = False, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + + # input + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + if time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + else: + raise ValueError(f"{time_embedding_type} does not exist. Please make sure to use `positional`.") + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + post_act_fn=timestep_post_act, + cond_proj_dim=time_cond_proj_dim, + ) + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif class_embed_type == "simple_projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" + ) + self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if time_embedding_act_fn is None: + self.time_embed_act = None + else: + self.time_embed_act = get_activation(time_embedding_act_fn) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + if class_embeddings_concat: + # The time embeddings are concatenated with the class embeddings. The dimension of the + # time embeddings passed to the down, middle, and up blocks is twice the dimension of the + # regular time embeddings + blocks_time_embed_dim = time_embed_dim * 2 + else: + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.down_blocks.append(down_block) + + # mid + if mid_block_type == "UNetMidBlock2DCrossAttn": + self.mid_block = UNetMidBlock2DCrossAttn( + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim[-1], + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + else: + raise ValueError( + f"unknown mid_block_type : {mid_block_type}. Should be `UNetMidBlock2DCrossAttn` for AudioLDM2." + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + + self.conv_act = get_activation(act_fn) + + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = nn.Conv2d( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel._set_gradient_checkpointing + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + encoder_hidden_states_1: Optional[torch.Tensor] = None, + encoder_attention_mask_1: Optional[torch.Tensor] = None, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`AudioLDM2UNet2DConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + encoder_hidden_states_1 (`torch.FloatTensor`, *optional*): + A second set of encoder hidden states with shape `(batch, sequence_length_2, feature_dim_2)`. Can be + used to condition the model on a different set of embeddings to `encoder_hidden_states`. + encoder_attention_mask_1 (`torch.Tensor`, *optional*): + A cross-attention mask of shape `(batch, sequence_length_2)` is applied to `encoder_hidden_states_1`. + If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + if encoder_attention_mask_1 is not None: + encoder_attention_mask_1 = (1 - encoder_attention_mask_1.to(sample.dtype)) * -10000.0 + encoder_attention_mask_1 = encoder_attention_mask_1.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # there might be better ways to encapsulate this. + class_labels = class_labels.to(dtype=sample.dtype) + + class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) + + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + # 2. pre-process + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + else: + sample = upsample_block( + hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + add_downsample, + resnet_eps, + resnet_act_fn, + transformer_layers_per_block=1, + num_attention_heads=None, + resnet_groups=None, + cross_attention_dim=None, + downsample_padding=None, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", +): + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlock2D": + return DownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "CrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") + return CrossAttnDownBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + transformer_layers_per_block=1, + num_attention_heads=None, + resnet_groups=None, + cross_attention_dim=None, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", +): + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpBlock2D": + return UpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "CrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") + return CrossAttnUpBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{up_block_type} does not exist.") + + +class CrossAttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + cross_attention_dim=1280, + output_scale_factor=1.0, + downsample_padding=1, + add_downsample=True, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states_1: Optional[torch.FloatTensor] = None, + encoder_attention_mask_1: Optional[torch.FloatTensor] = None, + ): + output_states = () + num_layers = len(self.resnets) + num_attention_per_layer = len(self.attentions) // num_layers + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(num_layers): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i]), + hidden_states, + temb, + **ckpt_kwargs, + ) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + else: + hidden_states = self.resnets[i](hidden_states, temb) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class UNetMidBlock2DCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + output_scale_factor=1.0, + cross_attention_dim=1280, + use_linear_projection=False, + upcast_attention=False, + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + for i in range(num_layers): + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states_1: Optional[torch.FloatTensor] = None, + encoder_attention_mask_1: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + num_attention_per_layer = len(self.attentions) // (len(self.resnets) - 1) + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(len(self.resnets[1:])): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i + 1]), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + hidden_states = self.resnets[i + 1](hidden_states, temb) + + return hidden_states + + +class CrossAttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + cross_attention_dim=1280, + output_scale_factor=1.0, + add_upsample=True, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states_1: Optional[torch.FloatTensor] = None, + encoder_attention_mask_1: Optional[torch.FloatTensor] = None, + ): + num_layers = len(self.resnets) + num_attention_per_layer = len(self.attentions) // num_layers + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(num_layers): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i]), + hidden_states, + temb, + **ckpt_kwargs, + ) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + else: + hidden_states = self.resnets[i](hidden_states, temb) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py new file mode 100644 index 000000000..e01aa9929 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py @@ -0,0 +1,980 @@ +# Copyright 2024 CVSSP, ByteDance and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + ClapFeatureExtractor, + ClapModel, + GPT2Model, + RobertaTokenizer, + RobertaTokenizerFast, + SpeechT5HifiGan, + T5EncoderModel, + T5Tokenizer, + T5TokenizerFast, +) + +from ...models import AutoencoderKL +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + is_accelerate_available, + is_accelerate_version, + is_librosa_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel + + +if is_librosa_available(): + import librosa + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import scipy + >>> import torch + >>> from diffusers import AudioLDM2Pipeline + + >>> repo_id = "cvssp/audioldm2" + >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # define the prompts + >>> prompt = "The sound of a hammer hitting a wooden surface." + >>> negative_prompt = "Low quality." + + >>> # set the seed for generator + >>> generator = torch.Generator("cuda").manual_seed(0) + + >>> # run the generation + >>> audio = pipe( + ... prompt, + ... negative_prompt=negative_prompt, + ... num_inference_steps=200, + ... audio_length_in_s=10.0, + ... num_waveforms_per_prompt=3, + ... generator=generator, + ... ).audios + + >>> # save the best audio sample (index 0) as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio[0]) + ``` +""" + + +def prepare_inputs_for_generation( + inputs_embeds, + attention_mask=None, + past_key_values=None, + **kwargs, +): + if past_key_values is not None: + # only last token for inputs_embeds if past is defined in kwargs + inputs_embeds = inputs_embeds[:, -1:] + + return { + "inputs_embeds": inputs_embeds, + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + } + + +class AudioLDM2Pipeline(DiffusionPipeline): + r""" + Pipeline for text-to-audio generation using AudioLDM2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapModel`]): + First frozen text-encoder. AudioLDM2 uses the joint audio-text embedding model + [CLAP](https://huggingface.co/docs/transformers/model_doc/clap#transformers.CLAPTextModelWithProjection), + specifically the [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. The + text branch is used to encode the text prompt to a prompt embedding. The full audio-text model is used to + rank generated waveforms against the text prompt by computing similarity scores. + text_encoder_2 ([`~transformers.T5EncoderModel`]): + Second frozen text-encoder. AudioLDM2 uses the encoder of + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) variant. + projection_model ([`AudioLDM2ProjectionModel`]): + A trained model used to linearly project the hidden-states from the first and second text encoder models + and insert learned SOS and EOS token embeddings. The projected hidden-states from the two text encoders are + concatenated to give the input to the language model. + language_model ([`~transformers.GPT2Model`]): + An auto-regressive language model used to generate a sequence of hidden-states conditioned on the projected + outputs from the two text encoders. + tokenizer ([`~transformers.RobertaTokenizer`]): + Tokenizer to tokenize text for the first frozen text-encoder. + tokenizer_2 ([`~transformers.T5Tokenizer`]): + Tokenizer to tokenize text for the second frozen text-encoder. + feature_extractor ([`~transformers.ClapFeatureExtractor`]): + Feature extractor to pre-process generated audio waveforms to log-mel spectrograms for automatic scoring. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan` to convert the mel-spectrogram latents to the final audio waveform. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ClapModel, + text_encoder_2: T5EncoderModel, + projection_model: AudioLDM2ProjectionModel, + language_model: GPT2Model, + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + tokenizer_2: Union[T5Tokenizer, T5TokenizerFast], + feature_extractor: ClapFeatureExtractor, + unet: AudioLDM2UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + projection_model=projection_model, + language_model=language_model, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + model_sequence = [ + self.text_encoder.text_model, + self.text_encoder.text_projection, + self.text_encoder_2, + self.projection_model, + self.language_model, + self.unet, + self.vae, + self.vocoder, + self.text_encoder, + ] + + hook = None + for cpu_offloaded_model in model_sequence: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + def generate_language_model( + self, + inputs_embeds: torch.Tensor = None, + max_new_tokens: int = 8, + **model_kwargs, + ): + """ + + Generates a sequence of hidden-states from the language model, conditioned on the embedding inputs. + + Parameters: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The sequence used as a prompt for the generation. + max_new_tokens (`int`): + Number of new tokens to generate. + model_kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of additional model-specific kwargs that will be forwarded to the `forward` + function of the model. + + Return: + `inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The sequence of generated hidden-states. + """ + max_new_tokens = max_new_tokens if max_new_tokens is not None else self.language_model.config.max_new_tokens + for _ in range(max_new_tokens): + # prepare model inputs + model_inputs = prepare_inputs_for_generation(inputs_embeds, **model_kwargs) + + # forward pass to get next hidden states + output = self.language_model(**model_inputs, return_dict=True) + + next_hidden_states = output.last_hidden_state + + # Update the model input + inputs_embeds = torch.cat([inputs_embeds, next_hidden_states[:, -1:, :]], dim=1) + + # Update generated hidden states, model inputs, and length for next step + model_kwargs = self.language_model._update_model_kwargs_for_generation(output, model_kwargs) + + return inputs_embeds[:, -max_new_tokens:, :] + + def encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + generated_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_generated_prompt_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + max_new_tokens: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-computed text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, text embeddings will be computed from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-computed negative text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + generated_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input + argument. + negative_generated_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text + inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will + be computed from `prompt` input argument. + negative_attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention + mask will be computed from `negative_prompt` input argument. + max_new_tokens (`int`, *optional*, defaults to None): + The number of new tokens to generate with the GPT2 language model. + Returns: + prompt_embeds (`torch.FloatTensor`): + Text embeddings from the Flan T5 model. + attention_mask (`torch.LongTensor`): + Attention mask to be applied to the `prompt_embeds`. + generated_prompt_embeds (`torch.FloatTensor`): + Text embeddings generated from the GPT2 langauge model. + + Example: + + ```python + >>> import scipy + >>> import torch + >>> from diffusers import AudioLDM2Pipeline + + >>> repo_id = "cvssp/audioldm2" + >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # Get text embedding vectors + >>> prompt_embeds, attention_mask, generated_prompt_embeds = pipe.encode_prompt( + ... prompt="Techno music with a strong, upbeat tempo and high melodic riffs", + ... device="cuda", + ... do_classifier_free_guidance=True, + ... ) + + >>> # Pass text embeddings to pipeline for text-conditional audio generation + >>> audio = pipe( + ... prompt_embeds=prompt_embeds, + ... attention_mask=attention_mask, + ... generated_prompt_embeds=generated_prompt_embeds, + ... num_inference_steps=200, + ... audio_length_in_s=10.0, + ... ).audios[0] + + >>> # save generated audio sample + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ```""" + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] + text_encoders = [self.text_encoder, self.text_encoder_2] + + if prompt_embeds is None: + prompt_embeds_list = [] + attention_mask_list = [] + + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + text_inputs = tokenizer( + prompt, + padding="max_length" if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast)) else True, + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + f"The following part of your input was truncated because {text_encoder.config.model_type} can " + f"only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + attention_mask = attention_mask.to(device) + + if text_encoder.config.model_type == "clap": + prompt_embeds = text_encoder.get_text_features( + text_input_ids, + attention_mask=attention_mask, + ) + # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) + prompt_embeds = prompt_embeds[:, None, :] + # make sure that we attend to this single hidden-state + attention_mask = attention_mask.new_ones((batch_size, 1)) + else: + prompt_embeds = text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + prompt_embeds_list.append(prompt_embeds) + attention_mask_list.append(attention_mask) + + projection_output = self.projection_model( + hidden_states=prompt_embeds_list[0], + hidden_states_1=prompt_embeds_list[1], + attention_mask=attention_mask_list[0], + attention_mask_1=attention_mask_list[1], + ) + projected_prompt_embeds = projection_output.hidden_states + projected_attention_mask = projection_output.attention_mask + + generated_prompt_embeds = self.generate_language_model( + projected_prompt_embeds, + attention_mask=projected_attention_mask, + max_new_tokens=max_new_tokens, + ) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + attention_mask = ( + attention_mask.to(device=device) + if attention_mask is not None + else torch.ones(prompt_embeds.shape[:2], dtype=torch.long, device=device) + ) + generated_prompt_embeds = generated_prompt_embeds.to(dtype=self.language_model.dtype, device=device) + + bs_embed, seq_len, hidden_size = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) + + # duplicate attention mask for each generation per prompt + attention_mask = attention_mask.repeat(1, num_waveforms_per_prompt) + attention_mask = attention_mask.view(bs_embed * num_waveforms_per_prompt, seq_len) + + bs_embed, seq_len, hidden_size = generated_prompt_embeds.shape + # duplicate generated embeddings for each generation per prompt, using mps friendly method + generated_prompt_embeds = generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + generated_prompt_embeds = generated_prompt_embeds.view( + bs_embed * num_waveforms_per_prompt, seq_len, hidden_size + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + negative_prompt_embeds_list = [] + negative_attention_mask_list = [] + max_length = prompt_embeds.shape[1] + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=tokenizer.model_max_length + if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast)) + else max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + if text_encoder.config.model_type == "clap": + negative_prompt_embeds = text_encoder.get_text_features( + uncond_input_ids, + attention_mask=negative_attention_mask, + ) + # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) + negative_prompt_embeds = negative_prompt_embeds[:, None, :] + # make sure that we attend to this single hidden-state + negative_attention_mask = negative_attention_mask.new_ones((batch_size, 1)) + else: + negative_prompt_embeds = text_encoder( + uncond_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + negative_attention_mask_list.append(negative_attention_mask) + + projection_output = self.projection_model( + hidden_states=negative_prompt_embeds_list[0], + hidden_states_1=negative_prompt_embeds_list[1], + attention_mask=negative_attention_mask_list[0], + attention_mask_1=negative_attention_mask_list[1], + ) + negative_projected_prompt_embeds = projection_output.hidden_states + negative_projected_attention_mask = projection_output.attention_mask + + negative_generated_prompt_embeds = self.generate_language_model( + negative_projected_prompt_embeds, + attention_mask=negative_projected_attention_mask, + max_new_tokens=max_new_tokens, + ) + + if do_classifier_free_guidance: + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + negative_attention_mask = ( + negative_attention_mask.to(device=device) + if negative_attention_mask is not None + else torch.ones(negative_prompt_embeds.shape[:2], dtype=torch.long, device=device) + ) + negative_generated_prompt_embeds = negative_generated_prompt_embeds.to( + dtype=self.language_model.dtype, device=device + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len, -1) + + # duplicate unconditional attention mask for each generation per prompt + negative_attention_mask = negative_attention_mask.repeat(1, num_waveforms_per_prompt) + negative_attention_mask = negative_attention_mask.view(batch_size * num_waveforms_per_prompt, seq_len) + + # duplicate unconditional generated embeddings for each generation per prompt + seq_len = negative_generated_prompt_embeds.shape[1] + negative_generated_prompt_embeds = negative_generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + negative_generated_prompt_embeds = negative_generated_prompt_embeds.view( + batch_size * num_waveforms_per_prompt, seq_len, -1 + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]) + generated_prompt_embeds = torch.cat([negative_generated_prompt_embeds, generated_prompt_embeds]) + + return prompt_embeds, attention_mask, generated_prompt_embeds + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): + if not is_librosa_available(): + logger.info( + "Automatic scoring of the generated audio waveforms against the input prompt text requires the " + "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " + "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." + ) + return audio + inputs = self.tokenizer(text, return_tensors="pt", padding=True) + resampled_audio = librosa.resample( + audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate + ) + inputs["input_features"] = self.feature_extractor( + list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate + ).input_features.type(dtype) + inputs = inputs.to(device) + + # compute the audio-text similarity score using the CLAP model + logits_per_text = self.text_encoder(**inputs).logits_per_text + # sort by the highest matching generations per prompt + indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] + audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) + return audio + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + generated_prompt_embeds=None, + negative_generated_prompt_embeds=None, + attention_mask=None, + negative_attention_mask=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and (prompt_embeds is None or generated_prompt_embeds is None): + raise ValueError( + "Provide either `prompt`, or `prompt_embeds` and `generated_prompt_embeds`. Cannot leave " + "`prompt` undefined without specifying both `prompt_embeds` and `generated_prompt_embeds`." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_embeds is not None and negative_generated_prompt_embeds is None: + raise ValueError( + "Cannot forward `negative_prompt_embeds` without `negative_generated_prompt_embeds`. Ensure that" + "both arguments are specified" + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: + raise ValueError( + "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" + f"`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}" + ) + + if generated_prompt_embeds is not None and negative_generated_prompt_embeds is not None: + if generated_prompt_embeds.shape != negative_generated_prompt_embeds.shape: + raise ValueError( + "`generated_prompt_embeds` and `negative_generated_prompt_embeds` must have the same shape when " + f"passed directly, but got: `generated_prompt_embeds` {generated_prompt_embeds.shape} != " + f"`negative_generated_prompt_embeds` {negative_generated_prompt_embeds.shape}." + ) + if ( + negative_attention_mask is not None + and negative_attention_mask.shape != negative_prompt_embeds.shape[:2] + ): + raise ValueError( + "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" + f"`attention_mask: {negative_attention_mask.shape} != `prompt_embeds` {negative_prompt_embeds.shape}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + self.vocoder.config.model_in_dim // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 200, + guidance_scale: float = 3.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + generated_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_generated_prompt_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + max_new_tokens: Optional[int] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 10.24): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 200): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 3.5): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, then automatic + scoring is performed between the generated outputs and the text prompt. This scoring ranks the + generated waveforms based on their cosine similarity with the text input in the joint text-audio + embedding space. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for spectrogram + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + generated_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input + argument. + negative_generated_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text + inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will + be computed from `prompt` input argument. + negative_attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention + mask will be computed from `negative_prompt` input argument. + max_new_tokens (`int`, *optional*, defaults to None): + Number of new tokens to generate with the GPT2 language model. If not provided, number of tokens will + be taken from the config of the model. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion + model (LDM) output. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + generated_prompt_embeds, + negative_generated_prompt_embeds, + attention_mask, + negative_attention_mask, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, attention_mask, generated_prompt_embeds = self.encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + generated_prompt_embeds=generated_prompt_embeds, + negative_generated_prompt_embeds=negative_generated_prompt_embeds, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + max_new_tokens=max_new_tokens, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=generated_prompt_embeds, + encoder_hidden_states_1=prompt_embeds, + encoder_attention_mask_1=attention_mask, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + # 8. Post-processing + if not output_type == "latent": + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + else: + return AudioPipelineOutput(audios=latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + # 9. Automatic scoring + if num_waveforms_per_prompt > 1 and prompt is not None: + audio = self.score_waveforms( + text=prompt, + audio=audio, + num_waveforms_per_prompt=num_waveforms_per_prompt, + device=device, + dtype=prompt_embeds.dtype, + ) + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/auto_pipeline.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/auto_pipeline.py new file mode 100644 index 000000000..fc30fc4d2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/auto_pipeline.py @@ -0,0 +1,987 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +from huggingface_hub.utils import validate_hf_hub_args + +from ..configuration_utils import ConfigMixin +from .controlnet import ( + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, +) +from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline +from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, +) +from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, +) +from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline +from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline +from .pixart_alpha import PixArtAlphaPipeline +from .stable_diffusion import ( + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, +) +from .stable_diffusion_xl import ( + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPipeline, +) +from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline + + +AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionPipeline), + ("stable-diffusion-xl", StableDiffusionXLPipeline), + ("if", IFPipeline), + ("kandinsky", KandinskyCombinedPipeline), + ("kandinsky22", KandinskyV22CombinedPipeline), + ("kandinsky3", Kandinsky3Pipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline), + ("wuerstchen", WuerstchenCombinedPipeline), + ("lcm", LatentConsistencyModelPipeline), + ("pixart", PixArtAlphaPipeline), + ] +) + +AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionImg2ImgPipeline), + ("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline), + ("if", IFImg2ImgPipeline), + ("kandinsky", KandinskyImg2ImgCombinedPipeline), + ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline), + ("kandinsky3", Kandinsky3Img2ImgPipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), + ("lcm", LatentConsistencyModelImg2ImgPipeline), + ] +) + +AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionInpaintPipeline), + ("stable-diffusion-xl", StableDiffusionXLInpaintPipeline), + ("if", IFInpaintingPipeline), + ("kandinsky", KandinskyInpaintCombinedPipeline), + ("kandinsky22", KandinskyV22InpaintCombinedPipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetInpaintPipeline), + ] +) + +_AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyPipeline), + ("kandinsky22", KandinskyV22Pipeline), + ("wuerstchen", WuerstchenDecoderPipeline), + ] +) +_AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyImg2ImgPipeline), + ("kandinsky22", KandinskyV22Img2ImgPipeline), + ] +) +_AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyInpaintPipeline), + ("kandinsky22", KandinskyV22InpaintPipeline), + ] +) + +SUPPORTED_TASKS_MAPPINGS = [ + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + AUTO_INPAINT_PIPELINES_MAPPING, + _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_INPAINT_DECODER_PIPELINES_MAPPING, +] + + +def _get_connected_pipeline(pipeline_cls): + # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder + if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) + + +def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True): + def get_model(pipeline_class_name): + for task_mapping in SUPPORTED_TASKS_MAPPINGS: + for model_name, pipeline in task_mapping.items(): + if pipeline.__name__ == pipeline_class_name: + return model_name + + model_name = get_model(pipeline_class_name) + + if model_name is not None: + task_class = mapping.get(model_name, None) + if task_class is not None: + return task_class + + if throw_error_if_not_exist: + raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") + + +class AutoPipelineForText2Image(ConfigMixin): + r""" + + [`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipelineForText2Image.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class + name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetPipeline`] object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn’t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image + + >>> pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "resume_download": resume_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + if "controlnet" in kwargs: + orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline") + + text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return text_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a text-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image + pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline contains will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image + + >>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False + ... ) + + >>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i) + >>> image = pipe_t2i(prompt).images[0] + ``` + """ + + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("ControlNet", "").replace("Pipeline", "ControlNetPipeline"), + ) + else: + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("ControlNetPipeline", "Pipeline"), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = text_2_image_cls._get_signature_keys(text_2_image_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config that were not expected by original pipeline is stored as private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + text_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in text_2_image_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(text_2_image_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = text_2_image_cls(**text_2_image_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model + + +class AutoPipelineForImage2Image(ConfigMixin): + r""" + + [`AutoPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForImage2Image.from_pretrained`] or [`~AutoPipelineForImage2Image.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a image-to-image Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class + name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetImg2ImgPipeline`] + object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn’t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForImage2Image + + >>> pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt, image).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "resume_download": resume_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + if "controlnet" in kwargs: + orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline") + + image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a image-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the + image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline contains will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image + + >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False + ... ) + + >>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i) + >>> image = pipe_i2i(prompt, image).images[0] + ``` + """ + + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("ControlNet", "").replace( + "Img2ImgPipeline", "ControlNetImg2ImgPipeline" + ), + ) + else: + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("ControlNetImg2ImgPipeline", "Img2ImgPipeline"), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = image_2_image_cls._get_signature_keys(image_2_image_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config attribute that were not expected by original pipeline is stored as its private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in image_2_image_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(image_2_image_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = image_2_image_cls(**image_2_image_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model + + +class AutoPipelineForInpainting(ConfigMixin): + r""" + + [`AutoPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForInpainting.from_pretrained`] or [`~AutoPipelineForInpainting.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetInpaintPipeline`] + object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn’t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForInpainting + + >>> pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "resume_download": resume_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + if "controlnet" in kwargs: + orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline") + + inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return inpainting_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting + pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting + + >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", requires_safety_checker=False + ... ) + + >>> pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_t2i) + >>> image = pipe_inpaint(prompt, image=init_image, mask_image=mask_image).images[0] + ``` + """ + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("ControlNet", "").replace( + "InpaintPipeline", "ControlNetInpaintPipeline" + ), + ) + else: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("ControlNetInpaintPipeline", "InpaintPipeline"), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = inpainting_cls._get_signature_keys(inpainting_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config that were not expected by original pipeline is stored as private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + inpainting_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in inpainting_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(inpainting_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = inpainting_cls(**inpainting_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/__init__.py new file mode 100644 index 000000000..af6c879d5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/__init__.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline +else: + from .blip_image_processing import BlipImageProcessor + from .modeling_blip2 import Blip2QFormerModel + from .modeling_ctx_clip import ContextCLIPTextModel + from .pipeline_blip_diffusion import BlipDiffusionPipeline diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py new file mode 100644 index 000000000..d71a14810 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py @@ -0,0 +1,318 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for BLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np +import torch +from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from transformers.image_transforms import convert_to_rgb, resize, to_channel_dimension_format +from transformers.image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from transformers.utils import TensorType, is_vision_available, logging + +from diffusers.utils import numpy_to_pil + + +if is_vision_available(): + import PIL.Image + + +logger = logging.get_logger(__name__) + + +# We needed some extra functions on top of the ones in transformers.image_processing_utils.BaseImageProcessor, namely center crop +# Copy-pasted from transformers.models.blip.image_processing_blip.BlipImageProcessor +class BlipImageProcessor(BaseImageProcessor): + r""" + Constructs a BLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be + overridden by the `resample` parameter in the `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be + overridden by the `rescale_factor` parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be + overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + do_center_crop: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 224, "width": 224} + size = get_size_dict(size, default_to_square=True) + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + self.do_center_crop = do_center_crop + + # Copy-pasted from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + do_center_crop: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + do_convert_rgb: bool = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Controls the size of the image after `resize`. The shortest edge of the image is resized to + `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image + is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest + edge equal to `int(size["shortest_edge"] * (1333 / 800))`. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to normalize the image by if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to normalize the image by if `do_normalize` is set to `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None or resample is None: + raise ValueError("Size and resample must be specified if do_resize is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # PIL RGBA images are converted to RGB + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + if do_center_crop: + images = [self.center_crop(image, size, input_data_format=input_data_format) for image in images] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) + return encoded_outputs + + # Follows diffusers.VaeImageProcessor.postprocess + def postprocess(self, sample: torch.FloatTensor, output_type: str = "pil"): + if output_type not in ["pt", "np", "pil"]: + raise ValueError( + f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" + ) + + # Equivalent to diffusers.VaeImageProcessor.denormalize + sample = (sample / 2 + 0.5).clamp(0, 1) + if output_type == "pt": + return sample + + # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "np": + return sample + # Output_type must be 'pil' + sample = numpy_to_pil(sample) + return sample diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py new file mode 100644 index 000000000..c8869ad9d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py @@ -0,0 +1,642 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from transformers import BertTokenizer +from transformers.activations import QuickGELUActivation as QuickGELU +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + BaseModelOutputWithPoolingAndCrossAttentions, +) +from transformers.models.blip_2.configuration_blip_2 import Blip2Config, Blip2VisionConfig +from transformers.models.blip_2.modeling_blip_2 import ( + Blip2Encoder, + Blip2PreTrainedModel, + Blip2QFormerAttention, + Blip2QFormerIntermediate, + Blip2QFormerOutput, +) +from transformers.pytorch_utils import apply_chunking_to_forward +from transformers.utils import ( + logging, + replace_return_docstrings, +) + + +logger = logging.get_logger(__name__) + + +# There is an implementation of Blip2 in `transformers` : https://github.com/huggingface/transformers/blob/main/src/transformers/models/blip_2/modeling_blip_2.py. +# But it doesn't support getting multimodal embeddings. So, this module can be +# replaced with a future `transformers` version supports that. +class Blip2TextEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + batch_size = embeddings.shape[0] + # repeat the query embeddings for batch size + query_embeds = query_embeds.repeat(batch_size, 1, 1) + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + embeddings = embeddings.to(query_embeds.dtype) + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2 +class Blip2VisionEmbeddings(nn.Module): + def __init__(self, config: Blip2VisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + + self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) + return embeddings + + +# The Qformer encoder, which takes the visual embeddings, and the text input, to get multimodal embeddings +class Blip2QFormerEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions, query_length) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if layer_module.has_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# The layers making up the Qformer encoder +class Blip2QFormerLayer(nn.Module): + def __init__(self, config, layer_idx): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = Blip2QFormerAttention(config) + + self.layer_idx = layer_idx + + if layer_idx % config.cross_attention_frequency == 0: + self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) + self.has_cross_attention = True + else: + self.has_cross_attention = False + + self.intermediate = Blip2QFormerIntermediate(config) + self.intermediate_query = Blip2QFormerIntermediate(config) + self.output_query = Blip2QFormerOutput(config) + self.output = Blip2QFormerOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + if encoder_hidden_states is None: + raise ValueError("encoder_hidden_states must be given for cross-attention layers") + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + # add cross attentions if we output attention weights + outputs = outputs + cross_attention_outputs[1:-1] + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +# ProjLayer used to project the multimodal Blip2 embeddings to be used in the text encoder +class ProjLayer(nn.Module): + def __init__(self, in_dim, out_dim, hidden_dim, drop_p=0.1, eps=1e-12): + super().__init__() + + # Dense1 -> Act -> Dense2 -> Drop -> Res -> Norm + self.dense1 = nn.Linear(in_dim, hidden_dim) + self.act_fn = QuickGELU() + self.dense2 = nn.Linear(hidden_dim, out_dim) + self.dropout = nn.Dropout(drop_p) + + self.LayerNorm = nn.LayerNorm(out_dim, eps=eps) + + def forward(self, x): + x_in = x + + x = self.LayerNorm(x) + x = self.dropout(self.dense2(self.act_fn(self.dense1(x)))) + x_in + + return x + + +# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2 +class Blip2VisionModel(Blip2PreTrainedModel): + main_input_name = "pixel_values" + config_class = Blip2VisionConfig + + def __init__(self, config: Blip2VisionConfig): + super().__init__(config) + self.config = config + embed_dim = config.hidden_size + self.embeddings = Blip2VisionEmbeddings(config) + self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = Blip2Encoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + self.post_init() + + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layernorm(hidden_states) + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.post_layernorm(last_hidden_state) + + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def get_input_embeddings(self): + return self.embeddings + + +# Qformer model, used to get multimodal embeddings from the text and image inputs +class Blip2QFormerModel(Blip2PreTrainedModel): + """ + Querying Transformer (Q-Former), used in BLIP-2. + """ + + def __init__(self, config: Blip2Config): + super().__init__(config) + self.config = config + self.embeddings = Blip2TextEmbeddings(config.qformer_config) + self.visual_encoder = Blip2VisionModel(config.vision_config) + self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) + if not hasattr(config, "tokenizer") or config.tokenizer is None: + self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side="right") + else: + self.tokenizer = BertTokenizer.from_pretrained(config.tokenizer, truncation_side="right") + self.tokenizer.add_special_tokens({"bos_token": "[DEC]"}) + self.proj_layer = ProjLayer( + in_dim=config.qformer_config.hidden_size, + out_dim=config.qformer_config.hidden_size, + hidden_dim=config.qformer_config.hidden_size * 4, + drop_p=0.1, + eps=1e-12, + ) + + self.encoder = Blip2QFormerEncoder(config.qformer_config) + + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: torch.Tensor, + input_shape: Tuple[int], + device: torch.device, + has_query: bool = False, + ) -> torch.Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + device (`torch.device`): + The device of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + text_input=None, + image_input=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of: + shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and + value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are + used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key + value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape + `(batch_size, sequence_length)`. + use_cache (`bool`, `optional`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + + text = self.tokenizer(text_input, return_tensors="pt", padding=True) + text = text.to(self.device) + input_ids = text.input_ids + batch_size = input_ids.shape[0] + query_atts = torch.ones((batch_size, self.query_tokens.size()[1]), dtype=torch.long).to(self.device) + attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 + ) + + query_length = self.query_tokens.shape[1] + + embedding_output = self.embeddings( + input_ids=input_ids, + query_embeds=self.query_tokens, + past_key_values_length=past_key_values_length, + ) + + # embedding_output = self.layernorm(query_embeds) + # embedding_output = self.dropout(embedding_output) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + image_embeds_frozen = self.visual_encoder(image_input).last_hidden_state + # image_embeds_frozen = torch.ones_like(image_embeds_frozen) + encoder_hidden_states = image_embeds_frozen + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if isinstance(encoder_hidden_states, list): + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if isinstance(encoder_attention_mask, list): + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.qformer_config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = sequence_output[:, 0, :] + + if not return_dict: + return self.proj_layer(sequence_output[:, :query_length, :]) + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py new file mode 100644 index 000000000..c6772fc88 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py @@ -0,0 +1,223 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +from torch import nn +from transformers import CLIPPreTrainedModel +from transformers.modeling_outputs import BaseModelOutputWithPooling +from transformers.models.clip.configuration_clip import CLIPTextConfig +from transformers.models.clip.modeling_clip import CLIPEncoder + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# This is a modified version of the CLIPTextModel from transformers.models.clip.modeling_clip +# Which allows for an extra input of "context embeddings", which are the query embeddings used in Qformer +# They pass through the clip model, along with the text embeddings, and interact with them using self attention +class ContextCLIPTextModel(CLIPPreTrainedModel): + config_class = CLIPTextConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPTextConfig): + super().__init__(config) + self.text_model = ContextCLIPTextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + ctx_embeddings: torch.Tensor = None, + ctx_begin_pos: list = None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + return self.text_model( + ctx_embeddings=ctx_embeddings, + ctx_begin_pos=ctx_begin_pos, + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class ContextCLIPTextTransformer(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = ContextCLIPTextEmbeddings(config) + self.encoder = CLIPEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim) + + def forward( + self, + ctx_embeddings: torch.Tensor, + ctx_begin_pos: list, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + ctx_embeddings=ctx_embeddings, + ctx_begin_pos=ctx_begin_pos, + ) + + bsz, seq_len = input_shape + if ctx_embeddings is not None: + seq_len += ctx_embeddings.size(1) + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( + hidden_states.device + ) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # text_embeds.shape = [batch_size, sequence_length, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0], device=input_ids.device), + input_ids.to(torch.int).argmax(dim=-1), + ] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class ContextCLIPTextEmbeddings(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward( + self, + ctx_embeddings: torch.Tensor, + ctx_begin_pos: list, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + if ctx_embeddings is None: + ctx_len = 0 + else: + ctx_len = ctx_embeddings.shape[1] + + seq_length = (input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]) + ctx_len + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + # for each input embeddings, add the ctx embeddings at the correct position + input_embeds_ctx = [] + bsz = inputs_embeds.shape[0] + + if ctx_embeddings is not None: + for i in range(bsz): + cbp = ctx_begin_pos[i] + + prefix = inputs_embeds[i, :cbp] + # remove the special token embedding + suffix = inputs_embeds[i, cbp:] + + input_embeds_ctx.append(torch.cat([prefix, ctx_embeddings[i], suffix], dim=0)) + + inputs_embeds = torch.stack(input_embeds_ctx, dim=0) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py new file mode 100644 index 000000000..ba43b2e53 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -0,0 +1,348 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved.# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPTokenizer + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import PNDMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .blip_image_processing import BlipImageProcessor +from .modeling_blip2 import Blip2QFormerModel +from .modeling_ctx_clip import ContextCLIPTextModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers.pipelines import BlipDiffusionPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> blip_diffusion_pipe = BlipDiffusionPipeline.from_pretrained( + ... "Salesforce/blipdiffusion", torch_dtype=torch.float16 + ... ).to("cuda") + + + >>> cond_subject = "dog" + >>> tgt_subject = "dog" + >>> text_prompt_input = "swimming underwater" + + >>> cond_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg" + ... ) + >>> guidance_scale = 7.5 + >>> num_inference_steps = 25 + >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" + + + >>> output = blip_diffusion_pipe( + ... text_prompt_input, + ... cond_image, + ... cond_subject, + ... tgt_subject, + ... guidance_scale=guidance_scale, + ... num_inference_steps=num_inference_steps, + ... neg_prompt=negative_prompt, + ... height=512, + ... width=512, + ... ).images + >>> output[0].save("image.png") + ``` +""" + + +class BlipDiffusionPipeline(DiffusionPipeline): + """ + Pipeline for Zero-Shot Subject Driven Generation using Blip Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer ([`CLIPTokenizer`]): + Tokenizer for the text encoder + text_encoder ([`ContextCLIPTextModel`]): + Text encoder to encode the text prompt + vae ([`AutoencoderKL`]): + VAE model to map the latents to the image + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + scheduler ([`PNDMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + qformer ([`Blip2QFormerModel`]): + QFormer model to get multi-modal embeddings from the text and image. + image_processor ([`BlipImageProcessor`]): + Image Processor to preprocess and postprocess the image. + ctx_begin_pos (int, `optional`, defaults to 2): + Position of the context token in the text encoder. + """ + + model_cpu_offload_seq = "qformer->text_encoder->unet->vae" + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: ContextCLIPTextModel, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + scheduler: PNDMScheduler, + qformer: Blip2QFormerModel, + image_processor: BlipImageProcessor, + ctx_begin_pos: int = 2, + mean: List[float] = None, + std: List[float] = None, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + vae=vae, + unet=unet, + scheduler=scheduler, + qformer=qformer, + image_processor=image_processor, + ) + self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) + + def get_query_embeddings(self, input_image, src_subject): + return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) + + # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it + def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): + rv = [] + for prompt, tgt_subject in zip(prompts, tgt_subjects): + prompt = f"a {tgt_subject} {prompt.strip()}" + # a trick to amplify the prompt + rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) + + return rv + + # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def encode_prompt(self, query_embeds, prompt, device=None): + device = device or self._execution_device + + # embeddings for prompt, with query_embeds as context + max_len = self.text_encoder.text_model.config.max_position_embeddings + max_len -= self.qformer.config.num_query_tokens + + tokenized_prompt = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=max_len, + return_tensors="pt", + ).to(device) + + batch_size = query_embeds.shape[0] + ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size + + text_embeddings = self.text_encoder( + input_ids=tokenized_prompt.input_ids, + ctx_embeddings=query_embeds, + ctx_begin_pos=ctx_begin_pos, + )[0] + + return text_embeddings + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: List[str], + reference_image: PIL.Image.Image, + source_subject_category: List[str], + target_subject_category: List[str], + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 7.5, + height: int = 512, + width: int = 512, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + neg_prompt: Optional[str] = "", + prompt_strength: float = 1.0, + prompt_reps: int = 20, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`List[str]`): + The prompt or prompts to guide the image generation. + reference_image (`PIL.Image.Image`): + The reference image to condition the generation on. + source_subject_category (`List[str]`): + The source subject category. + target_subject_category (`List[str]`): + The target subject category. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by random sampling. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + height (`int`, *optional*, defaults to 512): + The height of the generated image. + width (`int`, *optional*, defaults to 512): + The width of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + neg_prompt (`str`, *optional*, defaults to ""): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_strength (`float`, *optional*, defaults to 1.0): + The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps + to amplify the prompt. + prompt_reps (`int`, *optional*, defaults to 20): + The number of times the prompt is repeated along with prompt_strength to amplify the prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + reference_image = self.image_processor.preprocess( + reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" + )["pixel_values"] + reference_image = reference_image.to(device) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(source_subject_category, str): + source_subject_category = [source_subject_category] + if isinstance(target_subject_category, str): + target_subject_category = [target_subject_category] + + batch_size = len(prompt) + + prompt = self._build_prompt( + prompts=prompt, + tgt_subjects=target_subject_category, + prompt_strength=prompt_strength, + prompt_reps=prompt_reps, + ) + query_embeds = self.get_query_embeddings(reference_image, source_subject_category) + text_embeddings = self.encode_prompt(query_embeds, prompt, device) + do_classifier_free_guidance = guidance_scale > 1.0 + if do_classifier_free_guidance: + max_length = self.text_encoder.text_model.config.max_position_embeddings + + uncond_input = self.tokenizer( + [neg_prompt] * batch_size, + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.to(device), + ctx_embeddings=None, + )[0] + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + + scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) + latents = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=height // scale_down_factor, + width=width // scale_down_factor, + generator=generator, + latents=latents, + dtype=self.unet.dtype, + device=device, + ) + # set timesteps + extra_set_kwargs = {} + self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + do_classifier_free_guidance = guidance_scale > 1.0 + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + noise_pred = self.unet( + latent_model_input, + timestep=t, + encoder_hidden_states=text_embeddings, + down_block_additional_residuals=None, + mid_block_additional_residual=None, + )["sample"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + t, + latents, + )["prev_sample"] + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/__init__.py new file mode 100644 index 000000000..162d91c01 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/__init__.py @@ -0,0 +1,24 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, +) + + +_import_structure = { + "pipeline_consistency_models": ["ConsistencyModelPipeline"], +} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_consistency_models import ConsistencyModelPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py new file mode 100644 index 000000000..befac79c6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py @@ -0,0 +1,275 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch + +from ...models import UNet2DModel +from ...schedulers import CMStochasticIterativeScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + + >>> from diffusers import ConsistencyModelPipeline + + >>> device = "cuda" + >>> # Load the cd_imagenet64_l2 checkpoint. + >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2" + >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe.to(device) + + >>> # Onestep Sampling + >>> image = pipe(num_inference_steps=1).images[0] + >>> image.save("cd_imagenet64_l2_onestep_sample.png") + + >>> # Onestep sampling, class-conditional image generation + >>> # ImageNet-64 class label 145 corresponds to king penguins + >>> image = pipe(num_inference_steps=1, class_labels=145).images[0] + >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png") + + >>> # Multistep sampling, class-conditional image generation + >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original Github repo: + >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77 + >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0] + >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png") + ``` +""" + + +class ConsistencyModelPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional or class-conditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + compatible with [`CMStochasticIterativeScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + ) + + self.safety_checker = None + + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Follows diffusers.VaeImageProcessor.postprocess + def postprocess_image(self, sample: torch.FloatTensor, output_type: str = "pil"): + if output_type not in ["pt", "np", "pil"]: + raise ValueError( + f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" + ) + + # Equivalent to diffusers.VaeImageProcessor.denormalize + sample = (sample / 2 + 0.5).clamp(0, 1) + if output_type == "pt": + return sample + + # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "np": + return sample + + # Output_type must be 'pil' + sample = self.numpy_to_pil(sample) + return sample + + def prepare_class_labels(self, batch_size, device, class_labels=None): + if self.unet.config.num_class_embeds is not None: + if isinstance(class_labels, list): + class_labels = torch.tensor(class_labels, dtype=torch.int) + elif isinstance(class_labels, int): + assert batch_size == 1, "Batch size must be 1 if classes is an int" + class_labels = torch.tensor([class_labels], dtype=torch.int) + elif class_labels is None: + # Randomly generate batch_size class labels + # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils + class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) + class_labels = class_labels.to(device) + else: + class_labels = None + return class_labels + + def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): + if num_inference_steps is None and timesteps is None: + raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") + + if num_inference_steps is not None and timesteps is not None: + logger.warning( + f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" + " `timesteps` will be used over `num_inference_steps`." + ) + + if latents is not None: + expected_shape = (batch_size, 3, img_size, img_size) + if latents.shape != expected_shape: + raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + batch_size: int = 1, + class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, + num_inference_steps: int = 1, + timesteps: List[int] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): + Optional class labels for conditioning class-conditional consistency models. Not used if the model is + not class-conditional. + num_inference_steps (`int`, *optional*, defaults to 1): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Prepare call parameters + img_size = self.unet.config.sample_size + device = self._execution_device + + # 1. Check inputs + self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) + + # 2. Prepare image latents + # Sample image latents x_0 ~ N(0, sigma_0^2 * I) + sample = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=img_size, + width=img_size, + dtype=self.unet.dtype, + device=device, + generator=generator, + latents=latents, + ) + + # 3. Handle class_labels for class-conditional models + class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # 5. Denoising loop + # Multistep sampling: implements Algorithm 1 in the paper + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + scaled_sample = self.scheduler.scale_model_input(sample, t) + model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] + + sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] + + # call the callback, if provided + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, sample) + + # 6. Post-process image sample + image = self.postprocess_image(sample, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/__init__.py new file mode 100644 index 000000000..3b832c017 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/__init__.py @@ -0,0 +1,80 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["multicontrolnet"] = ["MultiControlNetModel"] + _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] + _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] + _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] +try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .multicontrolnet import MultiControlNetModel + from .pipeline_controlnet import StableDiffusionControlNetPipeline + from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline + from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline + from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline + from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline + from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline + from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/multicontrolnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/multicontrolnet.py new file mode 100644 index 000000000..7d284f2d2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/multicontrolnet.py @@ -0,0 +1,187 @@ +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn + +from ...models.controlnet import ControlNetModel, ControlNetOutput +from ...models.modeling_utils import ModelMixin +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class MultiControlNetModel(ModelMixin): + r""" + Multiple `ControlNetModel` wrapper class for Multi-ControlNet + + This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be + compatible with `ControlNetModel`. + + Args: + controlnets (`List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. You must set multiple + `ControlNetModel` as a list. + """ + + def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]): + super().__init__() + self.nets = nn.ModuleList(controlnets) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + controlnet_cond: List[torch.tensor], + conditioning_scale: List[float], + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[ControlNetOutput, Tuple]: + for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): + down_samples, mid_sample = controlnet( + sample=sample, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + controlnet_cond=image, + conditioning_scale=scale, + class_labels=class_labels, + timestep_cond=timestep_cond, + attention_mask=attention_mask, + added_cond_kwargs=added_cond_kwargs, + cross_attention_kwargs=cross_attention_kwargs, + guess_mode=guess_mode, + return_dict=return_dict, + ) + + # merge samples + if i == 0: + down_block_res_samples, mid_block_res_sample = down_samples, mid_sample + else: + down_block_res_samples = [ + samples_prev + samples_curr + for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) + ] + mid_block_res_sample += mid_sample + + return down_block_res_samples, mid_block_res_sample + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + save_function: Callable = None, + safe_serialization: bool = True, + variant: Optional[str] = None, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + `[`~pipelines.controlnet.MultiControlNetModel.from_pretrained`]` class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + """ + idx = 0 + model_path_to_save = save_directory + for controlnet in self.nets: + controlnet.save_pretrained( + model_path_to_save, + is_main_process=is_main_process, + save_function=save_function, + safe_serialization=safe_serialization, + variant=variant, + ) + + idx += 1 + model_path_to_save = model_path_to_save + f"_{idx}" + + @classmethod + def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_path (`os.PathLike`): + A path to a *directory* containing model weights saved using + [`~diffusers.pipelines.controlnet.MultiControlNetModel.save_pretrained`], e.g., + `./my_model_directory/controlnet`. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype + will be automatically derived from the model's weights. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading by not initializing the weights and only loading the pre-trained weights. This + also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the + model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, + setting this argument to `True` will raise an error. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the + `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from + `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. + """ + idx = 0 + controlnets = [] + + # load controlnet and append to list until no controlnet directory exists anymore + # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` + # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... + model_path_to_load = pretrained_model_path + while os.path.isdir(model_path_to_load): + controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs) + controlnets.append(controlnet) + + idx += 1 + model_path_to_load = pretrained_model_path + f"_{idx}" + + logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.") + + if len(controlnets) == 0: + raise ValueError( + f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." + ) + + return cls(controlnets) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet.py new file mode 100644 index 000000000..8f31dfc26 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet.py @@ -0,0 +1,1318 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" + ... ) + >>> image = np.array(image) + + >>> # get canny image + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # load control net and stable diffusion v1-5 + >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) + >>> pipe = StableDiffusionControlNetPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> # speed up diffusion process with faster scheduler and memory optimization + >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + >>> # remove following line if xformers is not installed + >>> pipe.enable_xformers_memory_efficient_attention() + + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> generator = torch.manual_seed(0) + >>> image = pipe( + ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + transposed_image = [list(t) for t in zip(*image)] + if len(transposed_image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets." + ) + for image_ in transposed_image: + self.check_image(image_, prompt, prompt_embeds) + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError( + "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. " + "The conditioning scale must be fixed across the batch." + ) + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single ControlNet, + each will be paired with each prompt in the `prompt` list. This also applies to multiple ControlNets, + where a list of image lists can be passed to batch for each prompt and each ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + # Nested lists as ControlNet condition + if isinstance(image[0], list): + # Transpose the nested image list + image = [list(t) for t in zip(*image)] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py new file mode 100644 index 000000000..b983a3f8d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -0,0 +1,413 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPTokenizer + +from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel +from ...schedulers import PNDMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..blip_diffusion.blip_image_processing import BlipImageProcessor +from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel +from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline + >>> from diffusers.utils import load_image + >>> from controlnet_aux import CannyDetector + >>> import torch + + >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained( + ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> style_subject = "flower" + >>> tgt_subject = "teapot" + >>> text_prompt = "on a marble table" + + >>> cldm_cond_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg" + ... ).resize((512, 512)) + >>> canny = CannyDetector() + >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil") + >>> style_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg" + ... ) + >>> guidance_scale = 7.5 + >>> num_inference_steps = 50 + >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" + + + >>> output = blip_diffusion_pipe( + ... text_prompt, + ... style_image, + ... cldm_cond_image, + ... style_subject, + ... tgt_subject, + ... guidance_scale=guidance_scale, + ... num_inference_steps=num_inference_steps, + ... neg_prompt=negative_prompt, + ... height=512, + ... width=512, + ... ).images + >>> output[0].save("image.png") + ``` +""" + + +class BlipDiffusionControlNetPipeline(DiffusionPipeline): + """ + Pipeline for Canny Edge based Controlled subject-driven generation using Blip Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer ([`CLIPTokenizer`]): + Tokenizer for the text encoder + text_encoder ([`ContextCLIPTextModel`]): + Text encoder to encode the text prompt + vae ([`AutoencoderKL`]): + VAE model to map the latents to the image + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + scheduler ([`PNDMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + qformer ([`Blip2QFormerModel`]): + QFormer model to get multi-modal embeddings from the text and image. + controlnet ([`ControlNetModel`]): + ControlNet model to get the conditioning image embedding. + image_processor ([`BlipImageProcessor`]): + Image Processor to preprocess and postprocess the image. + ctx_begin_pos (int, `optional`, defaults to 2): + Position of the context token in the text encoder. + """ + + model_cpu_offload_seq = "qformer->text_encoder->unet->vae" + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: ContextCLIPTextModel, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + scheduler: PNDMScheduler, + qformer: Blip2QFormerModel, + controlnet: ControlNetModel, + image_processor: BlipImageProcessor, + ctx_begin_pos: int = 2, + mean: List[float] = None, + std: List[float] = None, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + vae=vae, + unet=unet, + scheduler=scheduler, + qformer=qformer, + controlnet=controlnet, + image_processor=image_processor, + ) + self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) + + def get_query_embeddings(self, input_image, src_subject): + return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) + + # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it + def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): + rv = [] + for prompt, tgt_subject in zip(prompts, tgt_subjects): + prompt = f"a {tgt_subject} {prompt.strip()}" + # a trick to amplify the prompt + rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) + + return rv + + # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def encode_prompt(self, query_embeds, prompt, device=None): + device = device or self._execution_device + + # embeddings for prompt, with query_embeds as context + max_len = self.text_encoder.text_model.config.max_position_embeddings + max_len -= self.qformer.config.num_query_tokens + + tokenized_prompt = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=max_len, + return_tensors="pt", + ).to(device) + + batch_size = query_embeds.shape[0] + ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size + + text_embeddings = self.text_encoder( + input_ids=tokenized_prompt.input_ids, + ctx_embeddings=query_embeds, + ctx_begin_pos=ctx_begin_pos, + )[0] + + return text_embeddings + + # Adapted from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + ): + image = self.image_processor.preprocess( + image, + size={"width": width, "height": height}, + do_rescale=True, + do_center_crop=False, + do_normalize=False, + return_tensors="pt", + )["pixel_values"].to(device) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance: + image = torch.cat([image] * 2) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: List[str], + reference_image: PIL.Image.Image, + condtioning_image: PIL.Image.Image, + source_subject_category: List[str], + target_subject_category: List[str], + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 7.5, + height: int = 512, + width: int = 512, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + neg_prompt: Optional[str] = "", + prompt_strength: float = 1.0, + prompt_reps: int = 20, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`List[str]`): + The prompt or prompts to guide the image generation. + reference_image (`PIL.Image.Image`): + The reference image to condition the generation on. + condtioning_image (`PIL.Image.Image`): + The conditioning canny edge image to condition the generation on. + source_subject_category (`List[str]`): + The source subject category. + target_subject_category (`List[str]`): + The target subject category. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by random sampling. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + height (`int`, *optional*, defaults to 512): + The height of the generated image. + width (`int`, *optional*, defaults to 512): + The width of the generated image. + seed (`int`, *optional*, defaults to 42): + The seed to use for random generation. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + neg_prompt (`str`, *optional*, defaults to ""): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_strength (`float`, *optional*, defaults to 1.0): + The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps + to amplify the prompt. + prompt_reps (`int`, *optional*, defaults to 20): + The number of times the prompt is repeated along with prompt_strength to amplify the prompt. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + reference_image = self.image_processor.preprocess( + reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" + )["pixel_values"] + reference_image = reference_image.to(device) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(source_subject_category, str): + source_subject_category = [source_subject_category] + if isinstance(target_subject_category, str): + target_subject_category = [target_subject_category] + + batch_size = len(prompt) + + prompt = self._build_prompt( + prompts=prompt, + tgt_subjects=target_subject_category, + prompt_strength=prompt_strength, + prompt_reps=prompt_reps, + ) + query_embeds = self.get_query_embeddings(reference_image, source_subject_category) + text_embeddings = self.encode_prompt(query_embeds, prompt, device) + # 3. unconditional embedding + do_classifier_free_guidance = guidance_scale > 1.0 + if do_classifier_free_guidance: + max_length = self.text_encoder.text_model.config.max_position_embeddings + + uncond_input = self.tokenizer( + [neg_prompt] * batch_size, + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.to(device), + ctx_embeddings=None, + )[0] + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) + latents = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=height // scale_down_factor, + width=width // scale_down_factor, + generator=generator, + latents=latents, + dtype=self.unet.dtype, + device=device, + ) + # set timesteps + extra_set_kwargs = {} + self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) + + cond_image = self.prepare_control_image( + image=condtioning_image, + width=width, + height=height, + batch_size=batch_size, + num_images_per_prompt=1, + device=device, + dtype=self.controlnet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + do_classifier_free_guidance = guidance_scale > 1.0 + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + down_block_res_samples, mid_block_res_sample = self.controlnet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + controlnet_cond=cond_image, + return_dict=False, + ) + + noise_pred = self.unet( + latent_model_input, + timestep=t, + encoder_hidden_states=text_embeddings, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + )["sample"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + t, + latents, + )["prev_sample"] + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py new file mode 100644 index 000000000..9d2c76fd7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py @@ -0,0 +1,1310 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" + ... ) + >>> np_image = np.array(image) + + >>> # get canny image + >>> np_image = cv2.Canny(np_image, 100, 200) + >>> np_image = np_image[:, :, None] + >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2) + >>> canny_image = Image.fromarray(np_image) + + >>> # load control net and stable diffusion v1-5 + >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) + >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> # speed up diffusion process with faster scheduler and memory optimization + >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> generator = torch.manual_seed(0) + >>> image = pipe( + ... "futuristic-looking woman", + ... num_inference_steps=20, + ... generator=generator, + ... image=image, + ... control_image=canny_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def prepare_image(image): + if isinstance(image, torch.Tensor): + # Batch single image + if image.ndim == 3: + image = image.unsqueeze(0) + + image = image.to(dtype=torch.float32) + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + return image + + +class StableDiffusionControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image to be used as the starting point for the image generation process. Can also accept + image latents as `image`, and if passing latents directly they are not encoded again. + control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + control_image, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + # 5. Prepare controlnet_conditioning_image + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py new file mode 100644 index 000000000..c4f1bff5e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py @@ -0,0 +1,1620 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install transformers accelerate + >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ... ) + >>> init_image = init_image.resize((512, 512)) + + >>> generator = torch.Generator(device="cpu").manual_seed(1) + + >>> mask_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ... ) + >>> mask_image = mask_image.resize((512, 512)) + + + >>> def make_canny_condition(image): + ... image = np.array(image) + ... image = cv2.Canny(image, 100, 200) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... image = Image.fromarray(image) + ... return image + + + >>> control_image = make_canny_condition(init_image) + + >>> controlnet = ControlNetModel.from_pretrained( + ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> image = pipe( + ... "a handsome man with ray-ban sunglasses", + ... num_inference_steps=20, + ... generator=generator, + ... eta=1.0, + ... image=init_image, + ... mask_image=mask_image, + ... control_image=control_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image +def prepare_mask_and_masked_image(image, mask, height, width, return_image=False): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead" + deprecate( + "prepare_mask_and_masked_image", + "0.30.0", + deprecation_message, + ) + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +class StableDiffusionControlNetInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image inpainting using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + + + This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting + ([runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)) as well as + default text-to-image Stable Diffusion checkpoints + ([runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)). Default text-to-image + Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as + [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint). + + + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt, + image, + mask_image, + height, + width, + callback_steps, + output_type, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if height is not None and height % 8 != 0 or width is not None and width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + crops_coords, + resize_mode, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.5, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, + `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, NumPy array or tensor representing an image batch to be used as the starting point. For both + NumPy array and PyTorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a + list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a NumPy array or + a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`. It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, + `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, NumPy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a NumPy array or PyTorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for PyTorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for NumPy array, it would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, + W, 1)`, or `(H, W)`. + control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, + `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If + `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and + contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on + the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large + and contain information inreleant for inpainging, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 0.5): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + control_image, + mask_image, + height, + width, + callback_steps, + output_type, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if padding_mask_crop is not None: + height, width = self.image_processor.get_default_height_width(image, height, width) + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 4.1 Preprocess mask and image - resizes image and mask w.r.t height and width + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + masked_image = init_image * (mask < 0.5) + _, _, height, width = init_image.shape + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py new file mode 100644 index 000000000..52ffe5a3f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py @@ -0,0 +1,1818 @@ +# Copyright 2024 Harutatsu Akiyama, Jinbin Bai, and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .multicontrolnet import MultiControlNetModel + + +if is_invisible_watermark_available(): + from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, DDIMScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ... ) + >>> init_image = init_image.resize((1024, 1024)) + + >>> generator = torch.Generator(device="cpu").manual_seed(1) + + >>> mask_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ... ) + >>> mask_image = mask_image.resize((1024, 1024)) + + + >>> def make_canny_condition(image): + ... image = np.array(image) + ... image = cv2.Canny(image, 100, 200) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... image = Image.fromarray(image) + ... return image + + + >>> control_image = make_canny_condition(init_image) + + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> image = pipe( + ... "a handsome man with ray-ban sunglasses", + ... num_inference_steps=20, + ... generator=generator, + ... eta=1.0, + ... image=init_image, + ... mask_image=mask_image, + ... control_image=control_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class StableDiffusionXLControlNetInpaintPipeline( + DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: ControlNetModel, + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + strength, + num_inference_steps, + callback_steps, + output_type, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + crops_coords, + resize_mode, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + add_noise=True, + return_noise=False, + return_image_latents=False, + ): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None and add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + elif add_noise: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + else: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = image_latents.to(device) + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + dtype = image.dtype + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + image_latents = image_latents.to(dtype) + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + + masked_image_latents = None + if masked_image is not None: + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + else: + t_start = 0 + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + if denoising_start is not None: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + timesteps = timesteps[-num_inference_steps:] + return timesteps, num_inference_steps + + return timesteps, num_inference_steps - t_start + + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,)) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + control_image: Union[ + PipelineImageInput, + List[PipelineImageInput], + ] = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.9999, + num_inference_steps: int = 50, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If + `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and + contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on + the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large + and contain information inreleant for inpainging, such as background. + strength (`float`, *optional*, defaults to 0.9999): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. Note that in the case of `denoising_start` being declared as an + integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # # 0.0 Default height and width to unet + # height = height or self.unet.config.sample_size * self.vae_scale_factor + # width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 0.1 align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + control_image, + mask_image, + strength, + num_inference_steps, + callback_steps, + output_type, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.1 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=denoising_start if denoising_value_valid(denoising_start) else None, + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + self._num_timesteps = len(timesteps) + + # 5. Preprocess mask and image - resizes image and mask w.r.t height and width + # 5.1 Prepare init image + if padding_mask_crop is not None: + height, width = self.image_processor.get_default_height_width(image, height, width) + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 5.2 Prepare control images + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + raise ValueError(f"{controlnet.__class__} is not supported.") + + # 5.3 Prepare mask + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + masked_image = init_image * (mask < 0.5) + _, _, height, width = init_image.shape + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + add_noise = True if denoising_start is None else False + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + add_noise=add_noise, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + # 8.1 Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + if isinstance(self.controlnet, MultiControlNetModel): + controlnet_keep.append(keeps) + else: + controlnet_keep.append(keeps[0]) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 10. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + if ( + denoising_end is not None + and denoising_start is not None + and denoising_value_valid(denoising_end) + and denoising_value_valid(denoising_start) + and denoising_start >= denoising_end + ): + raise ValueError( + f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {denoising_end} when using type float." + ) + elif denoising_end is not None and denoising_value_valid(denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # # Resize control_image to match the size of the input to the controlnet + # if control_image.shape[-2:] != control_model_input.shape[-2:]: + # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False) + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py new file mode 100644 index 000000000..eca81083b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py @@ -0,0 +1,1499 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be + accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in + `init`, images must be passed as a list such that each element of the list can be correctly batched for + input to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py new file mode 100644 index 000000000..86a0e2c57 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py @@ -0,0 +1,1626 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTFeatureExtractor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ).to("cuda") + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda") + >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ).to("cuda") + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.functional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionXLControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can + also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If + height and/or width are passed, `image` is resized according to them. If multiple ControlNets are + specified in init, images must be passed as a list such that each element of the list can be correctly + batched for input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` + containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py new file mode 100644 index 000000000..5b6fc2b39 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py @@ -0,0 +1,532 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from PIL import Image +from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from ..stable_diffusion import FlaxStableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> import jax.numpy as jnp + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> from diffusers.utils import load_image, make_image_grid + >>> from PIL import Image + >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel + + + >>> def create_key(seed=0): + ... return jax.random.PRNGKey(seed) + + + >>> rng = create_key(0) + + >>> # get canny image + >>> canny_image = load_image( + ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg" + ... ) + + >>> prompts = "best quality, extremely detailed" + >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality" + + >>> # load control net and stable diffusion v1-5 + >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( + ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32 + ... ) + >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32 + ... ) + >>> params["controlnet"] = controlnet_params + + >>> num_samples = jax.device_count() + >>> rng = jax.random.split(rng, jax.device_count()) + + >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) + >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) + >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) + + >>> p_params = replicate(params) + >>> prompt_ids = shard(prompt_ids) + >>> negative_prompt_ids = shard(negative_prompt_ids) + >>> processed_image = shard(processed_image) + + >>> output = pipe( + ... prompt_ids=prompt_ids, + ... image=processed_image, + ... params=p_params, + ... prng_seed=rng, + ... num_inference_steps=50, + ... neg_prompt_ids=negative_prompt_ids, + ... jit=True, + ... ).images + + >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) + >>> output_images = make_image_grid(output_images, num_samples // 4, 4) + >>> output_images.save("generated_image.png") + ``` +""" + + +class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`FlaxControlNetModel`]: + Provides additional conditioning to the `unet` during the denoising process. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + controlnet: FlaxControlNetModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_text_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + + return text_input.input_ids + + def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]): + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) + + return processed_images + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + controlnet_conditioning_scale: float = 1.0, + ): + height, width = image.shape[-2:] + if height % 64 != 0 or width % 64 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + image = jnp.concatenate([image] * 2) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + down_block_res_samples, mid_block_res_sample = self.controlnet.apply( + {"params": params["controlnet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + controlnet_cond=image, + conditioning_scale=controlnet_conditioning_scale, + return_dict=False, + ) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + ).sample + + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt_ids (`jnp.ndarray`): + The prompt or prompts to guide the image generation. + image (`jnp.ndarray`): + Array representing the ControlNet input condition to provide guidance to the `unet` for generation. + params (`Dict` or `FrozenDict`): + Dictionary containing the model parameters/weights. + prng_seed (`jax.Array`): + Array containing random number generator key. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + + height, width = image.shape[-2:] + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if isinstance(controlnet_conditioning_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + controlnet_conditioning_scale = controlnet_conditioning_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + else: + images = self._generate( + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.array(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0), + static_broadcasted_argnums=(0, 5), +) +def _p_generate( + pipe, + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, +): + return pipe._generate( + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess(image, dtype): + image = image.convert("RGB") + w, h = image.size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return image diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/__init__.py new file mode 100644 index 000000000..0d3e466df --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_dance_diffusion import DanceDiffusionPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py new file mode 100644 index 000000000..bcd36c412 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py @@ -0,0 +1,156 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class DanceDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for audio generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet1DModel`]): + A `UNet1DModel` to denoise the encoded audio. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`IPNDMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + audio_length_in_s: Optional[float] = None, + return_dict: bool = True, + ) -> Union[AudioPipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of audio samples to generate. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at + the expense of slower inference. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): + The length of the generated audio sample in seconds. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + + Example: + + ```py + from diffusers import DiffusionPipeline + from scipy.io.wavfile import write + + model_id = "harmonai/maestro-150k" + pipe = DiffusionPipeline.from_pretrained(model_id) + pipe = pipe.to("cuda") + + audios = pipe(audio_length_in_s=4.0).audios + + # To save locally + for i, audio in enumerate(audios): + write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) + + # To dislay in google colab + import IPython.display as ipd + + for audio in audios: + display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) + ``` + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate + + sample_size = audio_length_in_s * self.unet.config.sample_rate + + down_scale_factor = 2 ** len(self.unet.up_blocks) + if sample_size < 3 * down_scale_factor: + raise ValueError( + f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" + f" {3 * down_scale_factor / self.unet.config.sample_rate}." + ) + + original_sample_size = int(sample_size) + if sample_size % down_scale_factor != 0: + sample_size = ( + (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 + ) * down_scale_factor + logger.info( + f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" + f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" + " process." + ) + sample_size = int(sample_size) + + dtype = next(self.unet.parameters()).dtype + shape = (batch_size, self.unet.config.in_channels, sample_size) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps, device=audio.device) + self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(audio, t).sample + + # 2. compute previous audio sample: x_t -> t_t-1 + audio = self.scheduler.step(model_output, t, audio).prev_sample + + audio = audio.clamp(-1, 1).float().cpu().numpy() + + audio = audio[:, :, :original_sample_size] + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/__init__.py new file mode 100644 index 000000000..d9eede47c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_ddim": ["DDIMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_ddim import DDIMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/pipeline_ddim.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/pipeline_ddim.py new file mode 100644 index 000000000..a3b967ed3 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddim/pipeline_ddim.py @@ -0,0 +1,154 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ...schedulers import DDIMScheduler +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DDIMPipeline(DiffusionPipeline): + r""" + Pipeline for image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + + # make sure scheduler can always be converted to DDIM + scheduler = DDIMScheduler.from_config(scheduler.config) + + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + eta: float = 0.0, + num_inference_steps: int = 50, + use_clipped_model_output: Optional[bool] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. A value of `0` corresponds to + DDIM and `1` corresponds to DDPM. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + use_clipped_model_output (`bool`, *optional*, defaults to `None`): + If `True` or `False`, see documentation for [`DDIMScheduler.step`]. If `None`, nothing is passed + downstream to the scheduler (use `None` for schedulers which don't support this argument). + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DDIMPipeline + >>> import PIL.Image + >>> import numpy as np + + >>> # load model and scheduler + >>> pipe = DDIMPipeline.from_pretrained("fusing/ddim-lsun-bedroom") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe(eta=0.0, num_inference_steps=50) + + >>> # process image to PIL + >>> image_processed = image.cpu().permute(0, 2, 3, 1) + >>> image_processed = (image_processed + 1.0) * 127.5 + >>> image_processed = image_processed.numpy().astype(np.uint8) + >>> image_pil = PIL.Image.fromarray(image_processed[0]) + + >>> # save image + >>> image_pil.save("test.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + # Sample gaussian noise to begin loop + if isinstance(self.unet.config.sample_size, int): + image_shape = ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size, + self.unet.config.sample_size, + ) + else: + image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. predict previous mean of image x_t-1 and add variance depending on eta + # eta corresponds to η in paper and should be between [0, 1] + # do x_t -> x_t-1 + image = self.scheduler.step( + model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator + ).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/__init__.py new file mode 100644 index 000000000..eb41dd1dc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/__init__.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, +) + + +_import_structure = {"pipeline_ddpm": ["DDPMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_ddpm import DDPMPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/pipeline_ddpm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/pipeline_ddpm.py new file mode 100644 index 000000000..093a3cdfe --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ddpm/pipeline_ddpm.py @@ -0,0 +1,127 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DDPMPipeline(DiffusionPipeline): + r""" + Pipeline for image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + num_inference_steps: int = 1000, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 1000): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DDPMPipeline + + >>> # load model and scheduler + >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe().images[0] + + >>> # save image + >>> image.save("ddpm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + # Sample gaussian noise to begin loop + if isinstance(self.unet.config.sample_size, int): + image_shape = ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size, + self.unet.config.sample_size, + ) + else: + image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) + + if self.device.type == "mps": + # randn does not work reproducibly on mps + image = randn_tensor(image_shape, generator=generator) + image = image.to(self.device) + else: + image = randn_tensor(image_shape, generator=generator, device=self.device) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. compute previous image: x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/__init__.py new file mode 100644 index 000000000..79aab1fb1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/__init__.py @@ -0,0 +1,85 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = { + "timesteps": [ + "fast27_timesteps", + "smart100_timesteps", + "smart185_timesteps", + "smart27_timesteps", + "smart50_timesteps", + "super100_timesteps", + "super27_timesteps", + "super40_timesteps", + ] +} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_if"] = ["IFPipeline"] + _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"] + _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"] + _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"] + _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"] + _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"] + _import_structure["pipeline_output"] = ["IFPipelineOutput"] + _import_structure["safety_checker"] = ["IFSafetyChecker"] + _import_structure["watermark"] = ["IFWatermarker"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_if import IFPipeline + from .pipeline_if_img2img import IFImg2ImgPipeline + from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline + from .pipeline_if_inpainting import IFInpaintingPipeline + from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline + from .pipeline_if_superresolution import IFSuperResolutionPipeline + from .pipeline_output import IFPipelineOutput + from .safety_checker import IFSafetyChecker + from .timesteps import ( + fast27_timesteps, + smart27_timesteps, + smart50_timesteps, + smart100_timesteps, + smart185_timesteps, + super27_timesteps, + super40_timesteps, + super100_timesteps, + ) + from .watermark import IFWatermarker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py new file mode 100644 index 000000000..7adf9e9c4 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py @@ -0,0 +1,788 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + + >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt" + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> safety_modules = { + ... "feature_extractor": pipe.feature_extractor, + ... "safety_checker": pipe.safety_checker, + ... "watermarker": pipe.watermarker, + ... } + >>> super_res_2_pipe = DiffusionPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 + ... ) + >>> super_res_2_pipe.enable_model_cpu_offload() + + >>> image = super_res_2_pipe( + ... prompt=prompt, + ... image=image, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + @torch.no_grad() + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + intermediate_images = intermediate_images * self.scheduler.init_noise_sigma + return intermediate_images + + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 100, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + height = height or self.unet.config.sample_size + width = width or self.unet.config.sample_size + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare intermediate images + intermediate_images = self.prepare_intermediate_images( + batch_size * num_images_per_prompt, + self.unet.config.in_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + image = self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py new file mode 100644 index 000000000..ccc7b1d15 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py @@ -0,0 +1,910 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image.resize((768, 512)) + + >>> pipe = IFImg2ImgPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A fantasy landscape in style minecraft" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", + ... text_encoder=None, + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + @torch.no_grad() + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None + ): + _, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + image = self.scheduler.add_noise(image, noise, timestep) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.7, + num_inference_steps: int = 80, + timesteps: List[int] = None, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.7): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 80): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 10.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. Prepare intermediate images + image = self.preprocess_image(image) + image = image.to(device=device, dtype=dtype) + + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, generator + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py new file mode 100644 index 000000000..b4ce5831a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py @@ -0,0 +1,1029 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image.resize((768, 512)) + + >>> pipe = IFImg2ImgPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A fantasy landscape in style minecraft" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", + ... text_encoder=None, + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"] + model_cpu_offload_seq = "text_encoder->unet" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + original_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # original_image + + if isinstance(original_image, list): + check_image_type = original_image[0] + else: + check_image_type = original_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(original_image, list): + image_batch_size = len(original_image) + elif isinstance(original_image, torch.Tensor): + image_batch_size = original_image.shape[0] + elif isinstance(original_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(original_image, np.ndarray): + image_batch_size = original_image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError( + f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image + def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None + ): + _, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + image = self.scheduler.add_noise(image, noise, timestep) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor], + original_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.8, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 250, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + original_image (`torch.FloatTensor` or `PIL.Image.Image`): + The original image that `image` was varied from. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 250): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + original_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. prepare original image + original_image = self.preprocess_original_image(original_image) + original_image = original_image.to(device=device, dtype=dtype) + + # 6. Prepare intermediate images + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + original_image, + noise_timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator, + ) + + # 7. Prepare upscaled image and noise level + _, _, height, width = original_image.shape + + image = self.preprocess_image(image, num_images_per_prompt, device) + + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 12. Convert to PIL + image = self.numpy_to_pil(image) + + # 13. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py new file mode 100644 index 000000000..180e5309c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py @@ -0,0 +1,1030 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" + >>> response = requests.get(url) + >>> mask_image = Image.open(BytesIO(response.content)) + >>> mask_image = mask_image + + >>> pipe = IFInpaintingPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "blue sunglasses" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... mask_image=mask_image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # mask_image + + if isinstance(mask_image, list): + check_image_type = mask_image[0] + else: + check_image_type = mask_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`mask_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(mask_image, list): + image_batch_size = len(mask_image) + elif isinstance(mask_image, torch.Tensor): + image_batch_size = mask_image.shape[0] + elif isinstance(mask_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(mask_image, np.ndarray): + image_batch_size = mask_image.shape[0] + else: + assert False + + if image_batch_size != 1 and batch_size != image_batch_size: + raise ValueError( + f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + def preprocess_mask_image(self, mask_image) -> torch.Tensor: + if not isinstance(mask_image, list): + mask_image = [mask_image] + + if isinstance(mask_image[0], torch.Tensor): + mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) + + if mask_image.ndim == 2: + # Batch and add channel dim for single mask + mask_image = mask_image.unsqueeze(0).unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] == 1: + # Single mask, the 0'th dimension is considered to be + # the existing batch size of 1 + mask_image = mask_image.unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] != 1: + # Batch of mask, the 0'th dimension is considered to be + # the batching dimension + mask_image = mask_image.unsqueeze(1) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + + elif isinstance(mask_image[0], PIL.Image.Image): + new_mask_image = [] + + for mask_image_ in mask_image: + mask_image_ = mask_image_.convert("L") + mask_image_ = resize(mask_image_, self.unet.sample_size) + mask_image_ = np.array(mask_image_) + mask_image_ = mask_image_[None, None, :] + new_mask_image.append(mask_image_) + + mask_image = new_mask_image + + mask_image = np.concatenate(mask_image, axis=0) + mask_image = mask_image.astype(np.float32) / 255.0 + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + elif isinstance(mask_image[0], np.ndarray): + mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + return mask_image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None + ): + image_batch_size, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + noised_image = self.scheduler.add_noise(image, noise, timestep) + + image = (1 - mask_image) * image + mask_image * noised_image + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + mask_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + strength (`float`, *optional*, defaults to 1.0): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + mask_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. Prepare intermediate images + image = self.preprocess_image(image) + image = image.to(device=device, dtype=dtype) + + mask_image = self.preprocess_mask_image(mask_image) + mask_image = mask_image.to(device=device, dtype=dtype) + + if mask_image.shape[0] == 1: + mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) + else: + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + prev_intermediate_images = intermediate_images + + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py new file mode 100644 index 000000000..b67907c1c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py @@ -0,0 +1,1137 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" + >>> response = requests.get(url) + >>> mask_image = Image.open(BytesIO(response.content)) + >>> mask_image = mask_image + + >>> pipe = IFInpaintingPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "blue sunglasses" + + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + >>> image = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... mask_image=mask_image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` + """ + + +class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + model_cpu_offload_seq = "text_encoder->unet" + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + original_image, + mask_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # original_image + + if isinstance(original_image, list): + check_image_type = original_image[0] + else: + check_image_type = original_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(original_image, list): + image_batch_size = len(original_image) + elif isinstance(original_image, torch.Tensor): + image_batch_size = original_image.shape[0] + elif isinstance(original_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(original_image, np.ndarray): + image_batch_size = original_image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError( + f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" + ) + + # mask_image + + if isinstance(mask_image, list): + check_image_type = mask_image[0] + else: + check_image_type = mask_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`mask_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(mask_image, list): + image_batch_size = len(mask_image) + elif isinstance(mask_image, torch.Tensor): + image_batch_size = mask_image.shape[0] + elif isinstance(mask_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(mask_image, np.ndarray): + image_batch_size = mask_image.shape[0] + else: + assert False + + if image_batch_size != 1 and batch_size != image_batch_size: + raise ValueError( + f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image + def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.preprocess_mask_image + def preprocess_mask_image(self, mask_image) -> torch.Tensor: + if not isinstance(mask_image, list): + mask_image = [mask_image] + + if isinstance(mask_image[0], torch.Tensor): + mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) + + if mask_image.ndim == 2: + # Batch and add channel dim for single mask + mask_image = mask_image.unsqueeze(0).unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] == 1: + # Single mask, the 0'th dimension is considered to be + # the existing batch size of 1 + mask_image = mask_image.unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] != 1: + # Batch of mask, the 0'th dimension is considered to be + # the batching dimension + mask_image = mask_image.unsqueeze(1) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + + elif isinstance(mask_image[0], PIL.Image.Image): + new_mask_image = [] + + for mask_image_ in mask_image: + mask_image_ = mask_image_.convert("L") + mask_image_ = resize(mask_image_, self.unet.sample_size) + mask_image_ = np.array(mask_image_) + mask_image_ = mask_image_[None, None, :] + new_mask_image.append(mask_image_) + + mask_image = new_mask_image + + mask_image = np.concatenate(mask_image, axis=0) + mask_image = mask_image.astype(np.float32) / 255.0 + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + elif isinstance(mask_image[0], np.ndarray): + mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + return mask_image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.prepare_intermediate_images + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None + ): + image_batch_size, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + noised_image = self.scheduler.add_noise(image, noise, timestep) + + image = (1 - mask_image) * image + mask_image * noised_image + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor], + original_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + mask_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.8, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 100, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + original_image (`torch.FloatTensor` or `PIL.Image.Image`): + The original image that `image` was varied from. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 0): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + original_image, + mask_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. prepare original image + original_image = self.preprocess_original_image(original_image) + original_image = original_image.to(device=device, dtype=dtype) + + # 6. prepare mask image + mask_image = self.preprocess_mask_image(mask_image) + mask_image = mask_image.to(device=device, dtype=dtype) + + if mask_image.shape[0] == 1: + mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) + else: + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + + # 6. Prepare intermediate images + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + original_image, + noise_timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + mask_image, + generator, + ) + + # 7. Prepare upscaled image and noise level + _, _, height, width = original_image.shape + + image = self.preprocess_image(image, num_images_per_prompt, device) + + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + prev_intermediate_images = intermediate_images + + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 12. Convert to PIL + image = self.numpy_to_pil(image) + + # 13. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + else: + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py new file mode 100644 index 000000000..a293343eb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py @@ -0,0 +1,885 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + is_accelerate_available, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + + >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.safety_checker]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + batch_size, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level`: {noise_level} must be a valid timestep in `self.noising_scheduler`, [0, {self.image_noising_scheduler.config.num_train_timesteps})" + ) + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_intermediate_images + def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + intermediate_images = intermediate_images * self.scheduler.init_noise_sigma + return intermediate_images + + def preprocess_image(self, image, num_images_per_prompt, device): + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: int = None, + width: int = None, + image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 250, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to None): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to None): + The width in pixels of the generated image. + image (`PIL.Image.Image`, `np.ndarray`, `torch.FloatTensor`): + The image to be upscaled. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*, defaults to None): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 250): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + batch_size, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + height = height or self.unet.config.sample_size + width = width or self.unet.config.sample_size + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare intermediate images + num_channels = self.unet.config.in_channels // 2 + intermediate_images = self.prepare_intermediate_images( + batch_size * num_images_per_prompt, + num_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare upscaled image and noise level + image = self.preprocess_image(image, num_images_per_prompt, device) + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 9. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 10. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 11. Convert to PIL + image = self.numpy_to_pil(image) + + # 12. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 9. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 10. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py new file mode 100644 index 000000000..7f39ab5ba --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class IFPipelineOutput(BaseOutput): + """ + Args: + Output class for Stable Diffusion pipelines. + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content or a watermark. `None` if safety checking could not be performed. + watermark_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety + checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_detected: Optional[List[bool]] + watermark_detected: Optional[List[bool]] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/safety_checker.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/safety_checker.py new file mode 100644 index 000000000..8ffeed580 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/safety_checker.py @@ -0,0 +1,59 @@ +import numpy as np +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class IFSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModelWithProjection(config.vision_config) + + self.p_head = nn.Linear(config.vision_config.projection_dim, 1) + self.w_head = nn.Linear(config.vision_config.projection_dim, 1) + + @torch.no_grad() + def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): + image_embeds = self.vision_model(clip_input)[0] + + nsfw_detected = self.p_head(image_embeds) + nsfw_detected = nsfw_detected.flatten() + nsfw_detected = nsfw_detected > p_threshold + nsfw_detected = nsfw_detected.tolist() + + if any(nsfw_detected): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + for idx, nsfw_detected_ in enumerate(nsfw_detected): + if nsfw_detected_: + images[idx] = np.zeros(images[idx].shape) + + watermark_detected = self.w_head(image_embeds) + watermark_detected = watermark_detected.flatten() + watermark_detected = watermark_detected > w_threshold + watermark_detected = watermark_detected.tolist() + + if any(watermark_detected): + logger.warning( + "Potential watermarked content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + for idx, watermark_detected_ in enumerate(watermark_detected): + if watermark_detected_: + images[idx] = np.zeros(images[idx].shape) + + return images, nsfw_detected, watermark_detected diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/timesteps.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/timesteps.py new file mode 100644 index 000000000..d44285c01 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/timesteps.py @@ -0,0 +1,579 @@ +fast27_timesteps = [ + 999, + 800, + 799, + 600, + 599, + 500, + 400, + 399, + 377, + 355, + 333, + 311, + 288, + 266, + 244, + 222, + 200, + 199, + 177, + 155, + 133, + 111, + 88, + 66, + 44, + 22, + 0, +] + +smart27_timesteps = [ + 999, + 976, + 952, + 928, + 905, + 882, + 858, + 857, + 810, + 762, + 715, + 714, + 572, + 429, + 428, + 286, + 285, + 238, + 190, + 143, + 142, + 118, + 95, + 71, + 47, + 24, + 0, +] + +smart50_timesteps = [ + 999, + 988, + 977, + 966, + 955, + 944, + 933, + 922, + 911, + 900, + 899, + 879, + 859, + 840, + 820, + 800, + 799, + 766, + 733, + 700, + 699, + 650, + 600, + 599, + 500, + 499, + 400, + 399, + 350, + 300, + 299, + 266, + 233, + 200, + 199, + 179, + 159, + 140, + 120, + 100, + 99, + 88, + 77, + 66, + 55, + 44, + 33, + 22, + 11, + 0, +] + +smart100_timesteps = [ + 999, + 995, + 992, + 989, + 985, + 981, + 978, + 975, + 971, + 967, + 964, + 961, + 957, + 956, + 951, + 947, + 942, + 937, + 933, + 928, + 923, + 919, + 914, + 913, + 908, + 903, + 897, + 892, + 887, + 881, + 876, + 871, + 870, + 864, + 858, + 852, + 846, + 840, + 834, + 828, + 827, + 820, + 813, + 806, + 799, + 792, + 785, + 784, + 777, + 770, + 763, + 756, + 749, + 742, + 741, + 733, + 724, + 716, + 707, + 699, + 698, + 688, + 677, + 666, + 656, + 655, + 645, + 634, + 623, + 613, + 612, + 598, + 584, + 570, + 569, + 555, + 541, + 527, + 526, + 505, + 484, + 483, + 462, + 440, + 439, + 396, + 395, + 352, + 351, + 308, + 307, + 264, + 263, + 220, + 219, + 176, + 132, + 88, + 44, + 0, +] + +smart185_timesteps = [ + 999, + 997, + 995, + 992, + 990, + 988, + 986, + 984, + 981, + 979, + 977, + 975, + 972, + 970, + 968, + 966, + 964, + 961, + 959, + 957, + 956, + 954, + 951, + 949, + 946, + 944, + 941, + 939, + 936, + 934, + 931, + 929, + 926, + 924, + 921, + 919, + 916, + 914, + 913, + 910, + 907, + 905, + 902, + 899, + 896, + 893, + 891, + 888, + 885, + 882, + 879, + 877, + 874, + 871, + 870, + 867, + 864, + 861, + 858, + 855, + 852, + 849, + 846, + 843, + 840, + 837, + 834, + 831, + 828, + 827, + 824, + 821, + 817, + 814, + 811, + 808, + 804, + 801, + 798, + 795, + 791, + 788, + 785, + 784, + 780, + 777, + 774, + 770, + 766, + 763, + 760, + 756, + 752, + 749, + 746, + 742, + 741, + 737, + 733, + 730, + 726, + 722, + 718, + 714, + 710, + 707, + 703, + 699, + 698, + 694, + 690, + 685, + 681, + 677, + 673, + 669, + 664, + 660, + 656, + 655, + 650, + 646, + 641, + 636, + 632, + 627, + 622, + 618, + 613, + 612, + 607, + 602, + 596, + 591, + 586, + 580, + 575, + 570, + 569, + 563, + 557, + 551, + 545, + 539, + 533, + 527, + 526, + 519, + 512, + 505, + 498, + 491, + 484, + 483, + 474, + 466, + 457, + 449, + 440, + 439, + 428, + 418, + 407, + 396, + 395, + 381, + 366, + 352, + 351, + 330, + 308, + 307, + 286, + 264, + 263, + 242, + 220, + 219, + 176, + 175, + 132, + 131, + 88, + 44, + 0, +] + +super27_timesteps = [ + 999, + 991, + 982, + 974, + 966, + 958, + 950, + 941, + 933, + 925, + 916, + 908, + 900, + 899, + 874, + 850, + 825, + 800, + 799, + 700, + 600, + 500, + 400, + 300, + 200, + 100, + 0, +] + +super40_timesteps = [ + 999, + 992, + 985, + 978, + 971, + 964, + 957, + 949, + 942, + 935, + 928, + 921, + 914, + 907, + 900, + 899, + 879, + 859, + 840, + 820, + 800, + 799, + 766, + 733, + 700, + 699, + 650, + 600, + 599, + 500, + 499, + 400, + 399, + 300, + 299, + 200, + 199, + 100, + 99, + 0, +] + +super100_timesteps = [ + 999, + 996, + 992, + 989, + 985, + 982, + 979, + 975, + 972, + 968, + 965, + 961, + 958, + 955, + 951, + 948, + 944, + 941, + 938, + 934, + 931, + 927, + 924, + 920, + 917, + 914, + 910, + 907, + 903, + 900, + 899, + 891, + 884, + 876, + 869, + 861, + 853, + 846, + 838, + 830, + 823, + 815, + 808, + 800, + 799, + 788, + 777, + 766, + 755, + 744, + 733, + 722, + 711, + 700, + 699, + 688, + 677, + 666, + 655, + 644, + 633, + 622, + 611, + 600, + 599, + 585, + 571, + 557, + 542, + 528, + 514, + 500, + 499, + 485, + 471, + 457, + 442, + 428, + 414, + 400, + 399, + 379, + 359, + 340, + 320, + 300, + 299, + 279, + 259, + 240, + 220, + 200, + 199, + 166, + 133, + 100, + 99, + 66, + 33, + 0, +] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/watermark.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/watermark.py new file mode 100644 index 000000000..ca10413de --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deepfloyd_if/watermark.py @@ -0,0 +1,46 @@ +from typing import List + +import PIL.Image +import torch +from PIL import Image + +from ...configuration_utils import ConfigMixin +from ...models.modeling_utils import ModelMixin +from ...utils import PIL_INTERPOLATION + + +class IFWatermarker(ModelMixin, ConfigMixin): + def __init__(self): + super().__init__() + + self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) + self.watermark_image_as_pil = None + + def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): + # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 + + h = images[0].height + w = images[0].width + + sample_size = sample_size or h + + coef = min(h / sample_size, w / sample_size) + img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) + + S1, S2 = 1024**2, img_w * img_h + K = (S2 / S1) ** 0.5 + wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) + + if self.watermark_image_as_pil is None: + watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() + watermark_image = Image.fromarray(watermark_image, mode="RGBA") + self.watermark_image_as_pil = watermark_image + + wm_img = self.watermark_image_as_pil.resize( + (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None + ) + + for pil_img in images: + pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1]) + + return images diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/README.md new file mode 100644 index 000000000..1e21dbbbd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/README.md @@ -0,0 +1,3 @@ +# Deprecated Pipelines + +This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files. \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/__init__.py new file mode 100644 index 000000000..993632317 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/__init__.py @@ -0,0 +1,153 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_librosa_available, + is_note_seq_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_pt_objects + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] + _import_structure["pndm"] = ["PNDMPipeline"] + _import_structure["repaint"] = ["RePaintPipeline"] + _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] + _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["alt_diffusion"] = [ + "AltDiffusionImg2ImgPipeline", + "AltDiffusionPipeline", + "AltDiffusionPipelineOutput", + ] + _import_structure["versatile_diffusion"] = [ + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] + _import_structure["stable_diffusion_variants"] = [ + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + ] + +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_librosa_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) + +else: + _import_structure["audio_diffusion"] = ["AudioDiffusionPipeline", "Mel"] + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) + +else: + _import_structure["spectrogram_diffusion"] = ["MidiProcessor", "SpectrogramDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_pt_objects import * + + else: + from .latent_diffusion_uncond import LDMPipeline + from .pndm import PNDMPipeline + from .repaint import RePaintPipeline + from .score_sde_ve import ScoreSdeVePipeline + from .stochastic_karras_ve import KarrasVePipeline + + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput + from .audio_diffusion import AudioDiffusionPipeline, Mel + from .spectrogram_diffusion import SpectrogramDiffusionPipeline + from .stable_diffusion_variants import ( + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + ) + from .stochastic_karras_ve import KarrasVePipeline + from .versatile_diffusion import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + from .vq_diffusion import VQDiffusionPipeline + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_librosa_objects import * + else: + from .audio_diffusion import AudioDiffusionPipeline, Mel + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + else: + from .spectrogram_diffusion import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py new file mode 100644 index 000000000..71fa15b3f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py @@ -0,0 +1,53 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_roberta_series"] = ["RobertaSeriesModelWithTransformation"] + _import_structure["pipeline_alt_diffusion"] = ["AltDiffusionPipeline"] + _import_structure["pipeline_alt_diffusion_img2img"] = ["AltDiffusionImg2ImgPipeline"] + + _import_structure["pipeline_output"] = ["AltDiffusionPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_roberta_series import RobertaSeriesModelWithTransformation + from .pipeline_alt_diffusion import AltDiffusionPipeline + from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline + from .pipeline_output import AltDiffusionPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py new file mode 100644 index 000000000..f73ef15d7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py @@ -0,0 +1,124 @@ +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import nn +from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel +from transformers.utils import ModelOutput + + +@dataclass +class TransformationModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + projection_state: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__( + self, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + project_dim=512, + pooler_fn="cls", + learn_encoder=False, + use_attention_mask=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + self.use_attention_mask = use_attention_mask + + +class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler", r"logit_scale"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + base_model_prefix = "roberta" + config_class = RobertaSeriesConfig + + def __init__(self, config): + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size, config.project_dim) + self.has_pre_transformation = getattr(config, "has_pre_transformation", False) + if self.has_pre_transformation: + self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim) + self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_init() + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ): + r""" """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True if self.has_pre_transformation else output_hidden_states, + return_dict=return_dict, + ) + + if self.has_pre_transformation: + sequence_output2 = outputs["hidden_states"][-2] + sequence_output2 = self.pre_LN(sequence_output2) + projection_state2 = self.transformation_pre(sequence_output2) + + return TransformationModelOutput( + projection_state=projection_state2, + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + else: + projection_state = self.transformation(outputs.last_hidden_state) + return TransformationModelOutput( + projection_state=projection_state, + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py new file mode 100644 index 000000000..e4583699e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -0,0 +1,946 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .modeling_roberta_series import RobertaSeriesModelWithTransformation +from .pipeline_output import AltDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AltDiffusionPipeline + + >>> pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion-m9", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # "dark elf princess, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko and hoang lap" + >>> prompt = "黑暗精灵公主,非常详细,幻想,非常详细,数字绘画,概念艺术,敏锐的焦点,插图" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AltDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Alt Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.XLMRobertaTokenizer`]): + A `XLMRobertaTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: RobertaSeriesModelWithTransformation, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py new file mode 100644 index 000000000..156e52c24 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -0,0 +1,1018 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .modeling_roberta_series import RobertaSeriesModelWithTransformation +from .pipeline_output import AltDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import AltDiffusionImg2ImgPipeline + + >>> device = "cuda" + >>> model_id_or_path = "BAAI/AltDiffusion-m9" + >>> pipe = AltDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> # "A fantasy landscape, trending on artstation" + >>> prompt = "幻想风景, artstation" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + >>> images[0].save("幻想风景.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AltDiffusionImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image-to-image generation using Alt Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.XLMRobertaTokenizer`]): + A `XLMRobertaTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: RobertaSeriesModelWithTransformation, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. set timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 7.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py new file mode 100644 index 000000000..dd174ae3c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ....utils import ( + BaseOutput, +) + + +@dataclass +# Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt +class AltDiffusionPipelineOutput(BaseOutput): + """ + Output class for Alt Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py new file mode 100644 index 000000000..312795186 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = { + "mel": ["Mel"], + "pipeline_audio_diffusion": ["AudioDiffusionPipeline"], +} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .mel import Mel + from .pipeline_audio_diffusion import AudioDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py new file mode 100644 index 000000000..3426c3ad0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py @@ -0,0 +1,179 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np # noqa: E402 + +from ....configuration_utils import ConfigMixin, register_to_config +from ....schedulers.scheduling_utils import SchedulerMixin + + +try: + import librosa # noqa: E402 + + _librosa_can_be_imported = True + _import_error = "" +except Exception as e: + _librosa_can_be_imported = False + _import_error = ( + f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it." + ) + + +from PIL import Image # noqa: E402 + + +class Mel(ConfigMixin, SchedulerMixin): + """ + Parameters: + x_res (`int`): + x resolution of spectrogram (time). + y_res (`int`): + y resolution of spectrogram (frequency bins). + sample_rate (`int`): + Sample rate of audio. + n_fft (`int`): + Number of Fast Fourier Transforms. + hop_length (`int`): + Hop length (a higher number is recommended if `y_res` < 256). + top_db (`int`): + Loudest decibel value. + n_iter (`int`): + Number of iterations for Griffin-Lim Mel inversion. + """ + + config_name = "mel_config.json" + + @register_to_config + def __init__( + self, + x_res: int = 256, + y_res: int = 256, + sample_rate: int = 22050, + n_fft: int = 2048, + hop_length: int = 512, + top_db: int = 80, + n_iter: int = 32, + ): + self.hop_length = hop_length + self.sr = sample_rate + self.n_fft = n_fft + self.top_db = top_db + self.n_iter = n_iter + self.set_resolution(x_res, y_res) + self.audio = None + + if not _librosa_can_be_imported: + raise ValueError(_import_error) + + def set_resolution(self, x_res: int, y_res: int): + """Set resolution. + + Args: + x_res (`int`): + x resolution of spectrogram (time). + y_res (`int`): + y resolution of spectrogram (frequency bins). + """ + self.x_res = x_res + self.y_res = y_res + self.n_mels = self.y_res + self.slice_size = self.x_res * self.hop_length - 1 + + def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None): + """Load audio. + + Args: + audio_file (`str`): + An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. + raw_audio (`np.ndarray`): + The raw audio file as a NumPy array. + """ + if audio_file is not None: + self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr) + else: + self.audio = raw_audio + + # Pad with silence if necessary. + if len(self.audio) < self.x_res * self.hop_length: + self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))]) + + def get_number_of_slices(self) -> int: + """Get number of slices in audio. + + Returns: + `int`: + Number of spectograms audio can be sliced into. + """ + return len(self.audio) // self.slice_size + + def get_audio_slice(self, slice: int = 0) -> np.ndarray: + """Get slice of audio. + + Args: + slice (`int`): + Slice number of audio (out of `get_number_of_slices()`). + + Returns: + `np.ndarray`: + The audio slice as a NumPy array. + """ + return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)] + + def get_sample_rate(self) -> int: + """Get sample rate. + + Returns: + `int`: + Sample rate of audio. + """ + return self.sr + + def audio_slice_to_image(self, slice: int) -> Image.Image: + """Convert slice of audio to spectrogram. + + Args: + slice (`int`): + Slice number of audio to convert (out of `get_number_of_slices()`). + + Returns: + `PIL Image`: + A grayscale image of `x_res x y_res`. + """ + S = librosa.feature.melspectrogram( + y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels + ) + log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) + bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8) + image = Image.fromarray(bytedata) + return image + + def image_to_audio(self, image: Image.Image) -> np.ndarray: + """Converts spectrogram to audio. + + Args: + image (`PIL Image`): + An grayscale image of `x_res x y_res`. + + Returns: + audio (`np.ndarray`): + The audio as a NumPy array. + """ + bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width)) + log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db + S = librosa.db_to_power(log_S) + audio = librosa.feature.inverse.mel_to_audio( + S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter + ) + return audio diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py new file mode 100644 index 000000000..47044e050 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -0,0 +1,329 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from math import acos, sin +from typing import List, Tuple, Union + +import numpy as np +import torch +from PIL import Image + +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import DDIMScheduler, DDPMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput +from .mel import Mel + + +class AudioDiffusionPipeline(DiffusionPipeline): + """ + Pipeline for audio diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + mel ([`Mel`]): + Transform audio into a spectrogram. + scheduler ([`DDIMScheduler`] or [`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`] or [`DDPMScheduler`]. + """ + + _optional_components = ["vqvae"] + + def __init__( + self, + vqvae: AutoencoderKL, + unet: UNet2DConditionModel, + mel: Mel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + ): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae) + + def get_default_steps(self) -> int: + """Returns default number of steps recommended for inference. + + Returns: + `int`: + The number of steps. + """ + return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000 + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + audio_file: str = None, + raw_audio: np.ndarray = None, + slice: int = 0, + start_step: int = 0, + steps: int = None, + generator: torch.Generator = None, + mask_start_secs: float = 0, + mask_end_secs: float = 0, + step_generator: torch.Generator = None, + eta: float = 0, + noise: torch.Tensor = None, + encoding: torch.Tensor = None, + return_dict=True, + ) -> Union[ + Union[AudioPipelineOutput, ImagePipelineOutput], + Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], + ]: + """ + The call function to the pipeline for generation. + + Args: + batch_size (`int`): + Number of samples to generate. + audio_file (`str`): + An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. + raw_audio (`np.ndarray`): + The raw audio file as a NumPy array. + slice (`int`): + Slice number of audio to convert. + start_step (int): + Step to start diffusion from. + steps (`int`): + Number of denoising steps (defaults to `50` for DDIM and `1000` for DDPM). + generator (`torch.Generator`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + mask_start_secs (`float`): + Number of seconds of audio to mask (not generate) at start. + mask_end_secs (`float`): + Number of seconds of audio to mask (not generate) at end. + step_generator (`torch.Generator`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) used to denoise. + None + eta (`float`): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + noise (`torch.Tensor`): + A noise tensor of shape `(batch_size, 1, height, width)` or `None`. + encoding (`torch.Tensor`): + A tensor for [`UNet2DConditionModel`] of shape `(batch_size, seq_length, cross_attention_dim)`. + return_dict (`bool`): + Whether or not to return a [`AudioPipelineOutput`], [`ImagePipelineOutput`] or a plain tuple. + + Examples: + + For audio diffusion: + + ```py + import torch + from IPython.display import Audio + from diffusers import DiffusionPipeline + + device = "cuda" if torch.cuda.is_available() else "cpu" + pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device) + + output = pipe() + display(output.images[0]) + display(Audio(output.audios[0], rate=mel.get_sample_rate())) + ``` + + For latent audio diffusion: + + ```py + import torch + from IPython.display import Audio + from diffusers import DiffusionPipeline + + device = "cuda" if torch.cuda.is_available() else "cpu" + pipe = DiffusionPipeline.from_pretrained("teticio/latent-audio-diffusion-256").to(device) + + output = pipe() + display(output.images[0]) + display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) + ``` + + For other tasks like variation, inpainting, outpainting, etc: + + ```py + output = pipe( + raw_audio=output.audios[0, 0], + start_step=int(pipe.get_default_steps() / 2), + mask_start_secs=1, + mask_end_secs=1, + ) + display(output.images[0]) + display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) + ``` + + Returns: + `List[PIL Image]`: + A list of Mel spectrograms (`float`, `List[np.ndarray]`) with the sample rate and raw audio. + """ + + steps = steps or self.get_default_steps() + self.scheduler.set_timesteps(steps) + step_generator = step_generator or generator + # For backwards compatibility + if isinstance(self.unet.config.sample_size, int): + self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size) + if noise is None: + noise = randn_tensor( + ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size[0], + self.unet.config.sample_size[1], + ), + generator=generator, + device=self.device, + ) + images = noise + mask = None + + if audio_file is not None or raw_audio is not None: + self.mel.load_audio(audio_file, raw_audio) + input_image = self.mel.audio_slice_to_image(slice) + input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape( + (input_image.height, input_image.width) + ) + input_image = (input_image / 255) * 2 - 1 + input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device) + + if self.vqvae is not None: + input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample( + generator=generator + )[0] + input_images = self.vqvae.config.scaling_factor * input_images + + if start_step > 0: + images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1]) + + pixels_per_second = ( + self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length + ) + mask_start = int(mask_start_secs * pixels_per_second) + mask_end = int(mask_end_secs * pixels_per_second) + mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:])) + + for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): + if isinstance(self.unet, UNet2DConditionModel): + model_output = self.unet(images, t, encoding)["sample"] + else: + model_output = self.unet(images, t)["sample"] + + if isinstance(self.scheduler, DDIMScheduler): + images = self.scheduler.step( + model_output=model_output, + timestep=t, + sample=images, + eta=eta, + generator=step_generator, + )["prev_sample"] + else: + images = self.scheduler.step( + model_output=model_output, + timestep=t, + sample=images, + generator=step_generator, + )["prev_sample"] + + if mask is not None: + if mask_start > 0: + images[:, :, :, :mask_start] = mask[:, step, :, :mask_start] + if mask_end > 0: + images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:] + + if self.vqvae is not None: + # 0.18215 was scaling factor used in training to ensure unit variance + images = 1 / self.vqvae.config.scaling_factor * images + images = self.vqvae.decode(images)["sample"] + + images = (images / 2 + 0.5).clamp(0, 1) + images = images.cpu().permute(0, 2, 3, 1).numpy() + images = (images * 255).round().astype("uint8") + images = list( + (Image.fromarray(_[:, :, 0]) for _ in images) + if images.shape[3] == 1 + else (Image.fromarray(_, mode="RGB").convert("L") for _ in images) + ) + + audios = [self.mel.image_to_audio(_) for _ in images] + if not return_dict: + return images, (self.mel.get_sample_rate(), audios) + + return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images)) + + @torch.no_grad() + def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray: + """ + Reverse the denoising step process to recover a noisy image from the generated image. + + Args: + images (`List[PIL Image]`): + List of images to encode. + steps (`int`): + Number of encoding steps to perform (defaults to `50`). + + Returns: + `np.ndarray`: + A noise tensor of shape `(batch_size, 1, height, width)`. + """ + + # Only works with DDIM as this method is deterministic + assert isinstance(self.scheduler, DDIMScheduler) + self.scheduler.set_timesteps(steps) + sample = np.array( + [np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images] + ) + sample = (sample / 255) * 2 - 1 + sample = torch.Tensor(sample).to(self.device) + + for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))): + prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps + alpha_prod_t = self.scheduler.alphas_cumprod[t] + alpha_prod_t_prev = ( + self.scheduler.alphas_cumprod[prev_timestep] + if prev_timestep >= 0 + else self.scheduler.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + model_output = self.unet(sample, t)["sample"] + pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output + sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) + sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output + + return sample + + @staticmethod + def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor: + """Spherical Linear intERPolation. + + Args: + x0 (`torch.Tensor`): + The first tensor to interpolate between. + x1 (`torch.Tensor`): + Second tensor to interpolate between. + alpha (`float`): + Interpolation between 0 and 1 + + Returns: + `torch.Tensor`: + The interpolated tensor. + """ + + theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1)) + return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py new file mode 100644 index 000000000..214f5bbca --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_latent_diffusion_uncond import LDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py new file mode 100644 index 000000000..7fe5d59f7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -0,0 +1,130 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel, VQModel +from ....schedulers import DDIMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class LDMPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. + """ + + def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): + super().__init__() + self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + eta: float = 0.0, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + Number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import LDMPipeline + + >>> # load model and scheduler + >>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe().images[0] + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + latents = randn_tensor( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + ) + latents = latents.to(self.device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + self.scheduler.set_timesteps(num_inference_steps) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(self.scheduler.timesteps): + latent_model_input = self.scheduler.scale_model_input(latents, t) + # predict the noise residual + noise_prediction = self.unet(latent_model_input, t).sample + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample + + # adjust latents with inverse of vae scale + latents = latents / self.vqvae.config.scaling_factor + # decode the image latents with the VAE + image = self.vqvae.decode(latents).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/__init__.py new file mode 100644 index 000000000..5e3bdba74 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_pndm": ["PNDMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_pndm import PNDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py new file mode 100644 index 000000000..ef78af194 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -0,0 +1,121 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import PNDMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class PNDMPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`PNDMScheduler`]): + A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: PNDMScheduler + + def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler): + super().__init__() + + scheduler = PNDMScheduler.from_config(scheduler.config) + + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, `optional`, defaults to 1): + The number of images to generate. + num_inference_steps (`int`, `optional`, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator`, `optional`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import PNDMPipeline + + >>> # load model and scheduler + >>> pndm = PNDMPipeline.from_pretrained("google/ddpm-cifar10-32") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pndm().images[0] + + >>> # save image + >>> image.save("pndm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # For more information on the sampling method you can take a look at Algorithm 2 of + # the official paper: https://arxiv.org/pdf/2202.09778.pdf + + # Sample gaussian noise to begin loop + image = randn_tensor( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + device=self.device, + ) + + self.scheduler.set_timesteps(num_inference_steps) + for t in self.progress_bar(self.scheduler.timesteps): + model_output = self.unet(image, t).sample + + image = self.scheduler.step(model_output, t, image).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/__init__.py new file mode 100644 index 000000000..2c6b04af5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_repaint": ["RePaintPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_repaint import RePaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py new file mode 100644 index 000000000..c03a3d8fc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -0,0 +1,230 @@ +# Copyright 2024 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch + +from ....models import UNet2DModel +from ....schedulers import RePaintScheduler +from ....utils import PIL_INTERPOLATION, deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]): + if isinstance(mask, torch.Tensor): + return mask + elif isinstance(mask, PIL.Image.Image): + mask = [mask] + + if isinstance(mask[0], PIL.Image.Image): + w, h = mask[0].size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask] + mask = np.concatenate(mask, axis=0) + mask = mask.astype(np.float32) / 255.0 + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + elif isinstance(mask[0], torch.Tensor): + mask = torch.cat(mask, dim=0) + return mask + + +class RePaintPipeline(DiffusionPipeline): + r""" + Pipeline for image inpainting using RePaint. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`RePaintScheduler`]): + A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: RePaintScheduler + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + image: Union[torch.Tensor, PIL.Image.Image], + mask_image: Union[torch.Tensor, PIL.Image.Image], + num_inference_steps: int = 250, + eta: float = 0.0, + jump_length: int = 10, + jump_n_sample: int = 10, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + image (`torch.FloatTensor` or `PIL.Image.Image`): + The original image to inpaint on. + mask_image (`torch.FloatTensor` or `PIL.Image.Image`): + The mask_image where 0.0 define which part of the original image to inpaint. + num_inference_steps (`int`, *optional*, defaults to 1000): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + eta (`float`): + The weight of the added noise in a diffusion step. Its value is between 0.0 and 1.0; 0.0 corresponds to + DDIM and 1.0 is the DDPM scheduler. + jump_length (`int`, *optional*, defaults to 10): + The number of steps taken forward in time before going backward in time for a single jump ("j" in + RePaint paper). Take a look at Figure 9 and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf). + jump_n_sample (`int`, *optional*, defaults to 10): + The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9 + and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf). + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from io import BytesIO + >>> import torch + >>> import PIL + >>> import requests + >>> from diffusers import RePaintPipeline, RePaintScheduler + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png" + >>> mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" + + >>> # Load the original image and the mask as PIL images + >>> original_image = download_image(img_url).resize((256, 256)) + >>> mask_image = download_image(mask_url).resize((256, 256)) + + >>> # Load the RePaint scheduler and pipeline based on a pretrained DDPM model + >>> scheduler = RePaintScheduler.from_pretrained("google/ddpm-ema-celebahq-256") + >>> pipe = RePaintPipeline.from_pretrained("google/ddpm-ema-celebahq-256", scheduler=scheduler) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> output = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... num_inference_steps=250, + ... eta=0.0, + ... jump_length=10, + ... jump_n_sample=10, + ... generator=generator, + ... ) + >>> inpainted_image = output.images[0] + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + original_image = image + + original_image = _preprocess_image(original_image) + original_image = original_image.to(device=self._execution_device, dtype=self.unet.dtype) + mask_image = _preprocess_mask(mask_image) + mask_image = mask_image.to(device=self._execution_device, dtype=self.unet.dtype) + + batch_size = original_image.shape[0] + + # sample gaussian noise to begin the loop + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + image_shape = original_image.shape + image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, self._execution_device) + self.scheduler.eta = eta + + t_last = self.scheduler.timesteps[0] + 1 + generator = generator[0] if isinstance(generator, list) else generator + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + if t < t_last: + # predict the noise residual + model_output = self.unet(image, t).sample + # compute previous image: x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample + + else: + # compute the reverse: x_t-1 -> x_t + image = self.scheduler.undo_step(image, t_last, generator) + t_last = t + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py new file mode 100644 index 000000000..87c167c3d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_score_sde_ve import ScoreSdeVePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py new file mode 100644 index 000000000..b0bb114a8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -0,0 +1,109 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import ScoreSdeVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class ScoreSdeVePipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`ScoreSdeVeScheduler`]): + A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: ScoreSdeVeScheduler + + def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 2000, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, `optional`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + img_size = self.unet.config.sample_size + shape = (batch_size, 3, img_size, img_size) + + model = self.unet + + sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma + sample = sample.to(self.device) + + self.scheduler.set_timesteps(num_inference_steps) + self.scheduler.set_sigmas(num_inference_steps) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device) + + # correction step + for _ in range(self.scheduler.config.correct_steps): + model_output = self.unet(sample, sigma_t).sample + sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample + + # prediction step + model_output = model(sample, sigma_t).sample + output = self.scheduler.step_pred(model_output, t, sample, generator=generator) + + sample, sample_mean = output.prev_sample, output.prev_sample_mean + + sample = sample_mean.clamp(0, 1) + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + sample = self.numpy_to_pil(sample) + + if not return_dict: + return (sample,) + + return ImagePipelineOutput(images=sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py new file mode 100644 index 000000000..150954baa --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py @@ -0,0 +1,75 @@ +# flake8: noqa +from typing import TYPE_CHECKING +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, + is_note_seq_available, + OptionalDependencyNotAvailable, + is_torch_available, + is_transformers_available, + get_objects_from_module, +) + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["continous_encoder"] = ["SpectrogramContEncoder"] + _import_structure["notes_encoder"] = ["SpectrogramNotesEncoder"] + _import_structure["pipeline_spectrogram_diffusion"] = [ + "SpectrogramContEncoder", + "SpectrogramDiffusionPipeline", + "T5FilmDecoder", + ] +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_transformers_and_torch_and_note_seq_objects + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["midi_utils"] = ["MidiProcessor"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline + from .pipeline_spectrogram_diffusion import SpectrogramContEncoder + from .pipeline_spectrogram_diffusion import SpectrogramNotesEncoder + from .pipeline_spectrogram_diffusion import T5FilmDecoder + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_transformers_and_torch_and_note_seq_objects import * + + else: + from .midi_utils import MidiProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py new file mode 100644 index 000000000..8664c2fb6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py @@ -0,0 +1,92 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers.modeling_utils import ModuleUtilsMixin +from transformers.models.t5.modeling_t5 import ( + T5Block, + T5Config, + T5LayerNorm, +) + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin + + +class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + @register_to_config + def __init__( + self, + input_dims: int, + targets_context_length: int, + d_model: int, + dropout_rate: float, + num_layers: int, + num_heads: int, + d_kv: int, + d_ff: int, + feed_forward_proj: str, + is_decoder: bool = False, + ): + super().__init__() + + self.input_proj = nn.Linear(input_dims, d_model, bias=False) + + self.position_encoding = nn.Embedding(targets_context_length, d_model) + self.position_encoding.weight.requires_grad = False + + self.dropout_pre = nn.Dropout(p=dropout_rate) + + t5config = T5Config( + d_model=d_model, + num_heads=num_heads, + d_kv=d_kv, + d_ff=d_ff, + feed_forward_proj=feed_forward_proj, + dropout_rate=dropout_rate, + is_decoder=is_decoder, + is_encoder_decoder=False, + ) + self.encoders = nn.ModuleList() + for lyr_num in range(num_layers): + lyr = T5Block(t5config) + self.encoders.append(lyr) + + self.layer_norm = T5LayerNorm(d_model) + self.dropout_post = nn.Dropout(p=dropout_rate) + + def forward(self, encoder_inputs, encoder_inputs_mask): + x = self.input_proj(encoder_inputs) + + # terminal relative positional encodings + max_positions = encoder_inputs.shape[1] + input_positions = torch.arange(max_positions, device=encoder_inputs.device) + + seq_lens = encoder_inputs_mask.sum(-1) + input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0) + x += self.position_encoding(input_positions) + + x = self.dropout_pre(x) + + # inverted the attention mask + input_shape = encoder_inputs.size() + extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) + + for lyr in self.encoders: + x = lyr(x, extended_attention_mask)[0] + x = self.layer_norm(x) + + return self.dropout_post(x), encoder_inputs_mask diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py new file mode 100644 index 000000000..e777e8449 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py @@ -0,0 +1,667 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import math +import os +from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from ....utils import is_note_seq_available +from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH + + +if is_note_seq_available(): + import note_seq +else: + raise ImportError("Please install note-seq via `pip install note-seq`") + + +INPUT_FEATURE_LENGTH = 2048 + +SAMPLE_RATE = 16000 +HOP_SIZE = 320 +FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) + +DEFAULT_STEPS_PER_SECOND = 100 +DEFAULT_MAX_SHIFT_SECONDS = 10 +DEFAULT_NUM_VELOCITY_BINS = 1 + +SLAKH_CLASS_PROGRAMS = { + "Acoustic Piano": 0, + "Electric Piano": 4, + "Chromatic Percussion": 8, + "Organ": 16, + "Acoustic Guitar": 24, + "Clean Electric Guitar": 26, + "Distorted Electric Guitar": 29, + "Acoustic Bass": 32, + "Electric Bass": 33, + "Violin": 40, + "Viola": 41, + "Cello": 42, + "Contrabass": 43, + "Orchestral Harp": 46, + "Timpani": 47, + "String Ensemble": 48, + "Synth Strings": 50, + "Choir and Voice": 52, + "Orchestral Hit": 55, + "Trumpet": 56, + "Trombone": 57, + "Tuba": 58, + "French Horn": 60, + "Brass Section": 61, + "Soprano/Alto Sax": 64, + "Tenor Sax": 66, + "Baritone Sax": 67, + "Oboe": 68, + "English Horn": 69, + "Bassoon": 70, + "Clarinet": 71, + "Pipe": 73, + "Synth Lead": 80, + "Synth Pad": 88, +} + + +@dataclasses.dataclass +class NoteRepresentationConfig: + """Configuration note representations.""" + + onsets_only: bool + include_ties: bool + + +@dataclasses.dataclass +class NoteEventData: + pitch: int + velocity: Optional[int] = None + program: Optional[int] = None + is_drum: Optional[bool] = None + instrument: Optional[int] = None + + +@dataclasses.dataclass +class NoteEncodingState: + """Encoding state for note transcription, keeping track of active pitches.""" + + # velocity bin for active pitches and programs + active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass +class EventRange: + type: str + min_value: int + max_value: int + + +@dataclasses.dataclass +class Event: + type: str + value: int + + +class Tokenizer: + def __init__(self, regular_ids: int): + # The special tokens: 0=PAD, 1=EOS, and 2=UNK + self._num_special_tokens = 3 + self._num_regular_tokens = regular_ids + + def encode(self, token_ids): + encoded = [] + for token_id in token_ids: + if not 0 <= token_id < self._num_regular_tokens: + raise ValueError( + f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" + ) + encoded.append(token_id + self._num_special_tokens) + + # Add EOS token + encoded.append(1) + + # Pad to till INPUT_FEATURE_LENGTH + encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) + + return encoded + + +class Codec: + """Encode and decode events. + + Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from + Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not + include things like EOS or UNK token handling. + + To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required + and specified separately. + """ + + def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): + """Define Codec. + + Args: + max_shift_steps: Maximum number of shift steps that can be encoded. + steps_per_second: Shift steps will be interpreted as having a duration of + 1 / steps_per_second. + event_ranges: Other supported event types and their ranges. + """ + self.steps_per_second = steps_per_second + self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) + self._event_ranges = [self._shift_range] + event_ranges + # Ensure all event types have unique names. + assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) + + @property + def num_classes(self) -> int: + return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) + + # The next couple methods are simplified special case methods just for shift + # events that are intended to be used from within autograph functions. + + def is_shift_event_index(self, index: int) -> bool: + return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) + + @property + def max_shift_steps(self) -> int: + return self._shift_range.max_value + + def encode_event(self, event: Event) -> int: + """Encode an event to an index.""" + offset = 0 + for er in self._event_ranges: + if event.type == er.type: + if not er.min_value <= event.value <= er.max_value: + raise ValueError( + f"Event value {event.value} is not within valid range " + f"[{er.min_value}, {er.max_value}] for type {event.type}" + ) + return offset + event.value - er.min_value + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event type: {event.type}") + + def event_type_range(self, event_type: str) -> Tuple[int, int]: + """Return [min_id, max_id] for an event type.""" + offset = 0 + for er in self._event_ranges: + if event_type == er.type: + return offset, offset + (er.max_value - er.min_value) + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event type: {event_type}") + + def decode_event_index(self, index: int) -> Event: + """Decode an event index to an Event.""" + offset = 0 + for er in self._event_ranges: + if offset <= index <= offset + er.max_value - er.min_value: + return Event(type=er.type, value=er.min_value + index - offset) + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event index: {index}") + + +@dataclasses.dataclass +class ProgramGranularity: + # both tokens_map_fn and program_map_fn should be idempotent + tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] + program_map_fn: Callable[[int], int] + + +def drop_programs(tokens, codec: Codec): + """Drops program change events from a token sequence.""" + min_program_id, max_program_id = codec.event_type_range("program") + return tokens[(tokens < min_program_id) | (tokens > max_program_id)] + + +def programs_to_midi_classes(tokens, codec): + """Modifies program events to be the first program in the MIDI class.""" + min_program_id, max_program_id = codec.event_type_range("program") + is_program = (tokens >= min_program_id) & (tokens <= max_program_id) + return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) + + +PROGRAM_GRANULARITIES = { + # "flat" granularity; drop program change tokens and set NoteSequence + # programs to zero + "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), + # map each program to the first program in its MIDI class + "midi_class": ProgramGranularity( + tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) + ), + # leave programs as is + "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), +} + + +def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): + """ + equivalent of tf.signal.frame + """ + signal_length = signal.shape[axis] + if pad_end: + frames_overlap = frame_length - frame_step + rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) + pad_size = int(frame_length - rest_samples) + + if pad_size != 0: + pad_axis = [0] * signal.ndim + pad_axis[axis] = pad_size + signal = F.pad(signal, pad_axis, "constant", pad_value) + frames = signal.unfold(axis, frame_length, frame_step) + return frames + + +def program_to_slakh_program(program): + # this is done very hackily, probably should use a custom mapping + for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): + if program >= slakh_program: + return slakh_program + + +def audio_to_frames( + samples, + hop_size: int, + frame_rate: int, +) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: + """Convert audio samples to non-overlapping frames and frame times.""" + frame_size = hop_size + samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") + + # Split audio into frames. + frames = frame( + torch.Tensor(samples).unsqueeze(0), + frame_length=frame_size, + frame_step=frame_size, + pad_end=False, # TODO check why its off by 1 here when True + ) + + num_frames = len(samples) // frame_size + + times = np.arange(num_frames) / frame_rate + return frames, times + + +def note_sequence_to_onsets_and_offsets_and_programs( + ns: note_seq.NoteSequence, +) -> Tuple[Sequence[float], Sequence[NoteEventData]]: + """Extract onset & offset times and pitches & programs from a NoteSequence. + + The onset & offset times will not necessarily be in sorted order. + + Args: + ns: NoteSequence from which to extract onsets and offsets. + + Returns: + times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for + note + offsets. + """ + # Sort by program and pitch and put offsets before onsets as a tiebreaker for + # subsequent stable sort. + notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) + times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] + values = [ + NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) + for note in notes + if not note.is_drum + ] + [ + NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) + for note in notes + ] + return times, values + + +def num_velocity_bins_from_codec(codec: Codec): + """Get number of velocity bins from event codec.""" + lo, hi = codec.event_type_range("velocity") + return hi - lo + + +# segment an array into segments of length n +def segment(a, n): + return [a[i : i + n] for i in range(0, len(a), n)] + + +def velocity_to_bin(velocity, num_velocity_bins): + if velocity == 0: + return 0 + else: + return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) + + +def note_event_data_to_events( + state: Optional[NoteEncodingState], + value: NoteEventData, + codec: Codec, +) -> Sequence[Event]: + """Convert note event data to a sequence of events.""" + if value.velocity is None: + # onsets only, no program or velocity + return [Event("pitch", value.pitch)] + else: + num_velocity_bins = num_velocity_bins_from_codec(codec) + velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) + if value.program is None: + # onsets + offsets + velocities only, no programs + if state is not None: + state.active_pitches[(value.pitch, 0)] = velocity_bin + return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] + else: + if value.is_drum: + # drum events use a separate vocabulary + return [Event("velocity", velocity_bin), Event("drum", value.pitch)] + else: + # program + velocity + pitch + if state is not None: + state.active_pitches[(value.pitch, value.program)] = velocity_bin + return [ + Event("program", value.program), + Event("velocity", velocity_bin), + Event("pitch", value.pitch), + ] + + +def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: + """Output program and pitch events for active notes plus a final tie event.""" + events = [] + for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): + if state.active_pitches[(pitch, program)]: + events += [Event("program", program), Event("pitch", pitch)] + events.append(Event("tie", 0)) + return events + + +def encode_and_index_events( + state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None +): + """Encode a sequence of timed events and index to audio frame times. + + Encodes time shifts as repeated single step shifts for later run length encoding. + + Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio + frame. This can be used e.g. to prepend events representing the current state to a targets segment. + + Args: + state: Initial event encoding state. + event_times: Sequence of event times. + event_values: Sequence of event values. + encode_event_fn: Function that transforms event value into a sequence of one + or more Event objects. + codec: An Codec object that maps Event objects to indices. + frame_times: Time for every audio frame. + encoding_state_to_events_fn: Function that transforms encoding state into a + sequence of one or more Event objects. + + Returns: + events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. + Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes + splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of + another. + event_end_indices: Corresponding end event index for every audio frame. Used + to ensure when slicing that one chunk ends where the next begins. Should always be true that + event_end_indices[i] = event_start_indices[i + 1]. + state_events: Encoded "state" events representing the encoding state before + each event. + state_event_indices: Corresponding state event index for every audio frame. + """ + indices = np.argsort(event_times, kind="stable") + event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] + event_values = [event_values[i] for i in indices] + + events = [] + state_events = [] + event_start_indices = [] + state_event_indices = [] + + cur_step = 0 + cur_event_idx = 0 + cur_state_event_idx = 0 + + def fill_event_start_indices_to_cur_step(): + while ( + len(event_start_indices) < len(frame_times) + and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second + ): + event_start_indices.append(cur_event_idx) + state_event_indices.append(cur_state_event_idx) + + for event_step, event_value in zip(event_steps, event_values): + while event_step > cur_step: + events.append(codec.encode_event(Event(type="shift", value=1))) + cur_step += 1 + fill_event_start_indices_to_cur_step() + cur_event_idx = len(events) + cur_state_event_idx = len(state_events) + if encoding_state_to_events_fn: + # Dump state to state events *before* processing the next event, because + # we want to capture the state prior to the occurrence of the event. + for e in encoding_state_to_events_fn(state): + state_events.append(codec.encode_event(e)) + + for e in encode_event_fn(state, event_value, codec): + events.append(codec.encode_event(e)) + + # After the last event, continue filling out the event_start_indices array. + # The inequality is not strict because if our current step lines up exactly + # with (the start of) an audio frame, we need to add an additional shift event + # to "cover" that frame. + while cur_step / codec.steps_per_second <= frame_times[-1]: + events.append(codec.encode_event(Event(type="shift", value=1))) + cur_step += 1 + fill_event_start_indices_to_cur_step() + cur_event_idx = len(events) + + # Now fill in event_end_indices. We need this extra array to make sure that + # when we slice events, each slice ends exactly where the subsequent slice + # begins. + event_end_indices = event_start_indices[1:] + [len(events)] + + events = np.array(events).astype(np.int32) + state_events = np.array(state_events).astype(np.int32) + event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + + outputs = [] + for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): + outputs.append( + { + "inputs": events, + "event_start_indices": start_indices, + "event_end_indices": end_indices, + "state_events": state_events, + "state_event_indices": event_indices, + } + ) + + return outputs + + +def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): + """Extract target sequence corresponding to audio token segment.""" + features = features.copy() + start_idx = features["event_start_indices"][0] + end_idx = features["event_end_indices"][-1] + + features[feature_key] = features[feature_key][start_idx:end_idx] + + if state_events_end_token is not None: + # Extract the state events corresponding to the audio start token, and + # prepend them to the targets array. + state_event_start_idx = features["state_event_indices"][0] + state_event_end_idx = state_event_start_idx + 1 + while features["state_events"][state_event_end_idx - 1] != state_events_end_token: + state_event_end_idx += 1 + features[feature_key] = np.concatenate( + [ + features["state_events"][state_event_start_idx:state_event_end_idx], + features[feature_key], + ], + axis=0, + ) + + return features + + +def map_midi_programs( + feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" +) -> Mapping[str, Any]: + """Apply MIDI program map to token sequences.""" + granularity = PROGRAM_GRANULARITIES[granularity_type] + + feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) + return feature + + +def run_length_encode_shifts_fn( + features, + codec: Codec, + feature_key: str = "inputs", + state_change_event_types: Sequence[str] = (), +) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: + """Return a function that run-length encodes shifts for a given codec. + + Args: + codec: The Codec to use for shift events. + feature_key: The feature key for which to run-length encode shifts. + state_change_event_types: A list of event types that represent state + changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones + will be removed. + + Returns: + A preprocessing function that run-length encodes single-step shifts. + """ + state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] + + def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: + """Combine leading/interior shifts, trim trailing shifts. + + Args: + features: Dict of features to process. + + Returns: + A dict of features. + """ + events = features[feature_key] + + shift_steps = 0 + total_shift_steps = 0 + output = np.array([], dtype=np.int32) + + current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) + + for event in events: + if codec.is_shift_event_index(event): + shift_steps += 1 + total_shift_steps += 1 + + else: + # If this event is a state change and has the same value as the current + # state, we can skip it entirely. + is_redundant = False + for i, (min_index, max_index) in enumerate(state_change_event_ranges): + if (min_index <= event) and (event <= max_index): + if current_state[i] == event: + is_redundant = True + current_state[i] = event + if is_redundant: + continue + + # Once we've reached a non-shift event, RLE all previous shift events + # before outputting the non-shift event. + if shift_steps > 0: + shift_steps = total_shift_steps + while shift_steps > 0: + output_steps = np.minimum(codec.max_shift_steps, shift_steps) + output = np.concatenate([output, [output_steps]], axis=0) + shift_steps -= output_steps + output = np.concatenate([output, [event]], axis=0) + + features[feature_key] = output + return features + + return run_length_encode_shifts(features) + + +def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): + tie_token = codec.encode_event(Event("tie", 0)) + state_events_end_token = tie_token if note_representation_config.include_ties else None + + features = extract_sequence_with_indices( + features, state_events_end_token=state_events_end_token, feature_key="inputs" + ) + + features = map_midi_programs(features, codec) + + features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) + + return features + + +class MidiProcessor: + def __init__(self): + self.codec = Codec( + max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, + steps_per_second=DEFAULT_STEPS_PER_SECOND, + event_ranges=[ + EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), + EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), + EventRange("tie", 0, 0), + EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), + EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), + ], + ) + self.tokenizer = Tokenizer(self.codec.num_classes) + self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) + + def __call__(self, midi: Union[bytes, os.PathLike, str]): + if not isinstance(midi, bytes): + with open(midi, "rb") as f: + midi = f.read() + + ns = note_seq.midi_to_note_sequence(midi) + ns_sus = note_seq.apply_sustain_control_changes(ns) + + for note in ns_sus.notes: + if not note.is_drum: + note.program = program_to_slakh_program(note.program) + + samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) + + _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) + times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) + + events = encode_and_index_events( + state=NoteEncodingState(), + event_times=times, + event_values=values, + frame_times=frame_times, + codec=self.codec, + encode_event_fn=note_event_data_to_events, + encoding_state_to_events_fn=note_encoding_state_to_events, + ) + + events = [ + note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events + ] + input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] + + return input_tokens diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py new file mode 100644 index 000000000..1259f0bf0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py @@ -0,0 +1,86 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers.modeling_utils import ModuleUtilsMixin +from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin + + +class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + @register_to_config + def __init__( + self, + max_length: int, + vocab_size: int, + d_model: int, + dropout_rate: float, + num_layers: int, + num_heads: int, + d_kv: int, + d_ff: int, + feed_forward_proj: str, + is_decoder: bool = False, + ): + super().__init__() + + self.token_embedder = nn.Embedding(vocab_size, d_model) + + self.position_encoding = nn.Embedding(max_length, d_model) + self.position_encoding.weight.requires_grad = False + + self.dropout_pre = nn.Dropout(p=dropout_rate) + + t5config = T5Config( + vocab_size=vocab_size, + d_model=d_model, + num_heads=num_heads, + d_kv=d_kv, + d_ff=d_ff, + dropout_rate=dropout_rate, + feed_forward_proj=feed_forward_proj, + is_decoder=is_decoder, + is_encoder_decoder=False, + ) + + self.encoders = nn.ModuleList() + for lyr_num in range(num_layers): + lyr = T5Block(t5config) + self.encoders.append(lyr) + + self.layer_norm = T5LayerNorm(d_model) + self.dropout_post = nn.Dropout(p=dropout_rate) + + def forward(self, encoder_input_tokens, encoder_inputs_mask): + x = self.token_embedder(encoder_input_tokens) + + seq_length = encoder_input_tokens.shape[1] + inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) + x += self.position_encoding(inputs_positions) + + x = self.dropout_pre(x) + + # inverted the attention mask + input_shape = encoder_input_tokens.size() + extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) + + for lyr in self.encoders: + x = lyr(x, extended_attention_mask)[0] + x = self.layer_norm(x) + + return self.dropout_post(x), encoder_inputs_mask diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py new file mode 100644 index 000000000..496a1f765 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -0,0 +1,269 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any, Callable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ....models import T5FilmDecoder +from ....schedulers import DDPMScheduler +from ....utils import is_onnx_available, logging +from ....utils.torch_utils import randn_tensor + + +if is_onnx_available(): + from ...onnx_utils import OnnxRuntimeModel + +from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from .continuous_encoder import SpectrogramContEncoder +from .notes_encoder import SpectrogramNotesEncoder + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +TARGET_FEATURE_LENGTH = 256 + + +class SpectrogramDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional audio generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + notes_encoder ([`SpectrogramNotesEncoder`]): + continuous_encoder ([`SpectrogramContEncoder`]): + decoder ([`T5FilmDecoder`]): + A [`T5FilmDecoder`] to denoise the encoded audio latents. + scheduler ([`DDPMScheduler`]): + A scheduler to be used in combination with `decoder` to denoise the encoded audio latents. + melgan ([`OnnxRuntimeModel`]): + """ + + _optional_components = ["melgan"] + + def __init__( + self, + notes_encoder: SpectrogramNotesEncoder, + continuous_encoder: SpectrogramContEncoder, + decoder: T5FilmDecoder, + scheduler: DDPMScheduler, + melgan: OnnxRuntimeModel if is_onnx_available() else Any, + ) -> None: + super().__init__() + + # From MELGAN + self.min_value = math.log(1e-5) # Matches MelGAN training. + self.max_value = 4.0 # Largest value for most examples + self.n_dims = 128 + + self.register_modules( + notes_encoder=notes_encoder, + continuous_encoder=continuous_encoder, + decoder=decoder, + scheduler=scheduler, + melgan=melgan, + ) + + def scale_features(self, features, output_range=(-1.0, 1.0), clip=False): + """Linearly scale features to network outputs range.""" + min_out, max_out = output_range + if clip: + features = torch.clip(features, self.min_value, self.max_value) + # Scale to [0, 1]. + zero_one = (features - self.min_value) / (self.max_value - self.min_value) + # Scale to [min_out, max_out]. + return zero_one * (max_out - min_out) + min_out + + def scale_to_features(self, outputs, input_range=(-1.0, 1.0), clip=False): + """Invert by linearly scaling network outputs to features range.""" + min_out, max_out = input_range + outputs = torch.clip(outputs, min_out, max_out) if clip else outputs + # Scale to [0, 1]. + zero_one = (outputs - min_out) / (max_out - min_out) + # Scale to [self.min_value, self.max_value]. + return zero_one * (self.max_value - self.min_value) + self.min_value + + def encode(self, input_tokens, continuous_inputs, continuous_mask): + tokens_mask = input_tokens > 0 + tokens_encoded, tokens_mask = self.notes_encoder( + encoder_input_tokens=input_tokens, encoder_inputs_mask=tokens_mask + ) + + continuous_encoded, continuous_mask = self.continuous_encoder( + encoder_inputs=continuous_inputs, encoder_inputs_mask=continuous_mask + ) + + return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] + + def decode(self, encodings_and_masks, input_tokens, noise_time): + timesteps = noise_time + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(input_tokens.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device) + + logits = self.decoder( + encodings_and_masks=encodings_and_masks, decoder_input_tokens=input_tokens, decoder_noise_time=timesteps + ) + return logits + + @torch.no_grad() + def __call__( + self, + input_tokens: List[List[int]], + generator: Optional[torch.Generator] = None, + num_inference_steps: int = 100, + return_dict: bool = True, + output_type: str = "numpy", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ) -> Union[AudioPipelineOutput, Tuple]: + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + r""" + The call function to the pipeline for generation. + + Args: + input_tokens (`List[List[int]]`): + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + output_type (`str`, *optional*, defaults to `"numpy"`): + The output format of the generated audio. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Example: + + ```py + >>> from diffusers import SpectrogramDiffusionPipeline, MidiProcessor + + >>> pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") + >>> pipe = pipe.to("cuda") + >>> processor = MidiProcessor() + + >>> # Download MIDI from: wget http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_2.mid + >>> output = pipe(processor("beethoven_hammerklavier_2.mid")) + + >>> audio = output.audios[0] + ``` + + Returns: + [`pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + + pred_mel = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.float32) + full_pred_mel = np.zeros([1, 0, self.n_dims], np.float32) + ones = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) + + for i, encoder_input_tokens in enumerate(input_tokens): + if i == 0: + encoder_continuous_inputs = torch.from_numpy(pred_mel[:1].copy()).to( + device=self.device, dtype=self.decoder.dtype + ) + # The first chunk has no previous context. + encoder_continuous_mask = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) + else: + # The full song pipeline does not feed in a context feature, so the mask + # will be all 0s after the feature converter. Because we know we're + # feeding in a full context chunk from the previous prediction, set it + # to all 1s. + encoder_continuous_mask = ones + + encoder_continuous_inputs = self.scale_features( + encoder_continuous_inputs, output_range=[-1.0, 1.0], clip=True + ) + + encodings_and_masks = self.encode( + input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device), + continuous_inputs=encoder_continuous_inputs, + continuous_mask=encoder_continuous_mask, + ) + + # Sample encoder_continuous_inputs shaped gaussian noise to begin loop + x = randn_tensor( + shape=encoder_continuous_inputs.shape, + generator=generator, + device=self.device, + dtype=self.decoder.dtype, + ) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + # Denoising diffusion loop + for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + output = self.decode( + encodings_and_masks=encodings_and_masks, + input_tokens=x, + noise_time=t / self.scheduler.config.num_train_timesteps, # rescale to [0, 1) + ) + + # Compute previous output: x_t -> x_t-1 + x = self.scheduler.step(output, t, x, generator=generator).prev_sample + + mel = self.scale_to_features(x, input_range=[-1.0, 1.0]) + encoder_continuous_inputs = mel[:1] + pred_mel = mel.cpu().float().numpy() + + full_pred_mel = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1) + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, full_pred_mel) + + logger.info("Generated segment", i) + + if output_type == "numpy" and not is_onnx_available(): + raise ValueError( + "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." + ) + elif output_type == "numpy" and self.melgan is None: + raise ValueError( + "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." + ) + + if output_type == "numpy": + output = self.melgan(input_features=full_pred_mel.astype(np.float32)) + else: + output = full_pred_mel + + if not return_dict: + return (output,) + + return AudioPipelineOutput(audios=output) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py new file mode 100644 index 000000000..36cf1a33c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] + + _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] + _import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_cycle_diffusion import CycleDiffusionPipeline + from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy + from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline + from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline + from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py new file mode 100644 index 000000000..0581effef --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -0,0 +1,948 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def posterior_sample(scheduler, latents, timestep, clean_latents, generator, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + if prev_timestep <= 0: + return clean_latents + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # direction pointing to x_t + e_t = (latents - alpha_prod_t ** (0.5) * clean_latents) / (1 - alpha_prod_t) ** (0.5) + dir_xt = (1.0 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * e_t + noise = std_dev_t * randn_tensor( + clean_latents.shape, dtype=clean_latents.dtype, device=clean_latents.device, generator=generator + ) + prev_latents = alpha_prod_t_prev ** (0.5) * clean_latents + dir_xt + noise + + return prev_latents + + +def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + noise = (prev_latents - (alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction)) / ( + variance ** (0.5) * eta + ) + return noise + + +class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): + r""" + Pipeline for text-guided image to image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can only be an + instance of [`DDIMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + image = image.to(device=device, dtype=dtype) + + batch_size = image.shape[0] + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) + + # add noise to latents using the timestep + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + clean_latents = init_latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents, clean_latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + source_prompt: Union[str, List[str]], + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + source_guidance_scale: Optional[float] = 1, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + source_guidance_scale (`float`, *optional*, defaults to 1): + Guidance scale for the source prompt. This is useful to control the amount of influence the source + prompt has for encoding. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Example: + + ```py + import requests + import torch + from PIL import Image + from io import BytesIO + + from diffusers import CycleDiffusionPipeline, DDIMScheduler + + # load the pipeline + # make sure you're logged in with `huggingface-cli login` + model_id_or_path = "CompVis/stable-diffusion-v1-4" + scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") + pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") + + # let's download an initial image + url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png" + response = requests.get(url) + init_image = Image.open(BytesIO(response.content)).convert("RGB") + init_image = init_image.resize((512, 512)) + init_image.save("horse.png") + + # let's specify a prompt + source_prompt = "An astronaut riding a horse" + prompt = "An astronaut riding an elephant" + + # call the pipeline + image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.8, + guidance_scale=2, + source_guidance_scale=1, + ).images[0] + + image.save("horse_to_elephant.png") + + # let's try another example + # See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion + url = ( + "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png" + ) + response = requests.get(url) + init_image = Image.open(BytesIO(response.content)).convert("RGB") + init_image = init_image.resize((512, 512)) + init_image.save("black.png") + + source_prompt = "A black colored car" + prompt = "A blue colored car" + + # call the pipeline + torch.manual_seed(0) + image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.85, + guidance_scale=3, + source_guidance_scale=1, + ).images[0] + + image.save("black_to_blue.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 1. Check inputs + self.check_inputs(prompt, strength, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds_tuple = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + source_prompt_embeds_tuple = self.encode_prompt( + source_prompt, device, num_images_per_prompt, do_classifier_free_guidance, None, clip_skip=clip_skip + ) + if prompt_embeds_tuple[1] is not None: + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + else: + prompt_embeds = prompt_embeds_tuple[0] + if source_prompt_embeds_tuple[1] is not None: + source_prompt_embeds = torch.cat([source_prompt_embeds_tuple[1], source_prompt_embeds_tuple[0]]) + else: + source_prompt_embeds = source_prompt_embeds_tuple[0] + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents, clean_latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + source_latents = latents + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + generator = extra_step_kwargs.pop("generator", None) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + source_latent_model_input = ( + torch.cat([source_latents] * 2) if do_classifier_free_guidance else source_latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t) + + # predict the noise residual + if do_classifier_free_guidance: + concat_latent_model_input = torch.stack( + [ + source_latent_model_input[0], + latent_model_input[0], + source_latent_model_input[1], + latent_model_input[1], + ], + dim=0, + ) + concat_prompt_embeds = torch.stack( + [ + source_prompt_embeds[0], + prompt_embeds[0], + source_prompt_embeds[1], + prompt_embeds[1], + ], + dim=0, + ) + else: + concat_latent_model_input = torch.cat( + [ + source_latent_model_input, + latent_model_input, + ], + dim=0, + ) + concat_prompt_embeds = torch.cat( + [ + source_prompt_embeds, + prompt_embeds, + ], + dim=0, + ) + + concat_noise_pred = self.unet( + concat_latent_model_input, + t, + cross_attention_kwargs=cross_attention_kwargs, + encoder_hidden_states=concat_prompt_embeds, + ).sample + + # perform guidance + if do_classifier_free_guidance: + ( + source_noise_pred_uncond, + noise_pred_uncond, + source_noise_pred_text, + noise_pred_text, + ) = concat_noise_pred.chunk(4, dim=0) + + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + source_noise_pred = source_noise_pred_uncond + source_guidance_scale * ( + source_noise_pred_text - source_noise_pred_uncond + ) + + else: + (source_noise_pred, noise_pred) = concat_noise_pred.chunk(2, dim=0) + + # Sample source_latents from the posterior distribution. + prev_source_latents = posterior_sample( + self.scheduler, source_latents, t, clean_latents, generator=generator, **extra_step_kwargs + ) + # Compute noise. + noise = compute_noise( + self.scheduler, prev_source_latents, source_latents, t, source_noise_pred, **extra_step_kwargs + ) + source_latents = prev_source_latents + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=noise, **extra_step_kwargs + ).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py new file mode 100644 index 000000000..0aa5e68bf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -0,0 +1,542 @@ +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ....utils import deprecate, logging +from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def preprocess(image): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, scale_factor=8): + mask = mask.convert("L") + w, h = mask.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL.Image.NEAREST) + mask = np.array(mask).astype(np.float32) / 255.0 + mask = np.tile(mask, (4, 1, 1)) + mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? + mask = 1 - mask # repaint white, keep black + return mask + + +class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. This is a *legacy feature* for Onnx pipelines to + provide compatibility with StableDiffusionInpaintPipelineLegacy and may be removed in the future. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image] = None, + mask_image: Union[np.ndarray, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`nd.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. This is the image whose masked region will be inpainted. + mask_image (`nd.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a + PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should + contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.uu + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (?) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + if isinstance(image, PIL.Image.Image): + image = preprocess(image) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = image.astype(latents_dtype) + + # encode the init image into latents and scale the latents + init_latents = self.vae_encoder(sample=image)[0] + init_latents = 0.18215 * init_latents + + # Expand init_latents for batch_size and num_images_per_prompt + init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) + init_latents_orig = init_latents + + # preprocess mask + if not isinstance(mask_image, np.ndarray): + mask_image = preprocess_mask(mask_image, 8) + mask_image = mask_image.astype(latents_dtype) + mask = np.concatenate([mask_image] * num_images_per_prompt, axis=0) + + # check sizes + if not mask.shape == init_latents.shape: + raise ValueError("The mask and image should be the same size!") + + # get the original timestep using init_timestep + offset = self.scheduler.config.get("steps_offset", 0) + init_timestep = int(num_inference_steps * strength) + offset + init_timestep = min(init_timestep, num_inference_steps) + + timesteps = self.scheduler.timesteps.numpy()[-init_timestep] + timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) + + # add noise to latents using the timesteps + noise = generator.randn(*init_latents.shape).astype(latents_dtype) + init_latents = self.scheduler.add_noise( + torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps) + ) + init_latents = init_latents.numpy() + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (?) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ? in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + latents = init_latents + + t_start = max(num_inference_steps - init_timestep + offset, 0) + timesteps = self.scheduler.timesteps[t_start:].numpy() + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ).prev_sample + + latents = latents.numpy() + + init_latents_proper = self.scheduler.add_noise( + torch.from_numpy(init_latents_orig), torch.from_numpy(noise), torch.from_numpy(np.array([t])) + ) + + init_latents_proper = init_latents_proper.numpy() + + latents = (init_latents_proper * mask) + (latents * (1 - mask)) + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # There will throw an error if use safety_checker batchsize>1 + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py new file mode 100644 index 000000000..980adf273 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -0,0 +1,786 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) + + +def preprocess_image(image, batch_size): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, batch_size, scale_factor=8): + if not isinstance(mask, torch.FloatTensor): + mask = mask.convert("L") + w, h = mask.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) + mask = np.array(mask).astype(np.float32) / 255.0 + mask = np.tile(mask, (4, 1, 1)) + mask = np.vstack([mask[None]] * batch_size) + mask = 1 - mask # repaint white, keep black + mask = torch.from_numpy(mask) + return mask + + else: + valid_mask_channel_sizes = [1, 3] + # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) + if mask.shape[3] in valid_mask_channel_sizes: + mask = mask.permute(0, 3, 1, 2) + elif mask.shape[1] not in valid_mask_channel_sizes: + raise ValueError( + f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," + f" but received mask of shape {tuple(mask.shape)}" + ) + # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape + mask = mask.mean(dim=1, keepdim=True) + h, w = mask.shape[-2:] + h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 + mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) + return mask + + +class StableDiffusionInpaintPipelineLegacy( + DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + In addition the pipeline inherits the following loading methods: + - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] + - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`] + - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] + + as well as the following saving methods: + - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`] + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + deprecation_message = ( + f"The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionality" + "by loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533" + "for more information." + ) + deprecate("legacy is outdated", "1.0.0", deprecation_message, standard_warn=False) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator): + image = image.to(device=device, dtype=dtype) + init_latent_dist = self.vae.encode(image).latent_dist + init_latents = init_latent_dist.sample(generator=generator) + init_latents = self.vae.config.scaling_factor * init_latents + + # Expand init_latents for batch_size and num_images_per_prompt + init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) + init_latents_orig = init_latents + + # add noise to latents using the timesteps + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + return latents, init_latents_orig, noise + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + add_predicted_noise: Optional[bool] = False, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. This is the image whose masked region will be inpainted. + mask_image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a + PIL image, it will be converted to a single channel (luminance) before use. If mask is a tensor, the + expected shape should be either `(B, H, W, C)` or `(B, C, H, W)`, where C is 1 or 3. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` + is 1, the denoising process will be run on the masked area for the full number of iterations specified + in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to + that region the larger the `strength`. If `strength` is 0, no inpainting will occur. + num_inference_steps (`int`, *optional*, defaults to 50): + The reference number of denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. This parameter will be modulated by `strength`, as explained above. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + add_predicted_noise (`bool`, *optional*, defaults to True): + Use predicted noise instead of random noise when constructing noisy versions of the original image in + the reverse diffusion process + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 1. Check inputs + self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess image and mask + if not isinstance(image, torch.FloatTensor): + image = preprocess_image(image, batch_size) + + mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + # encode the init image into latents and scale the latents + latents, init_latents_orig, noise = self.prepare_latents( + image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + + # 7. Prepare mask latent + mask = mask_image.to(device=device, dtype=latents.dtype) + mask = torch.cat([mask] * num_images_per_prompt) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + # masking + if add_predicted_noise: + init_latents_proper = self.scheduler.add_noise( + init_latents_orig, noise_pred_uncond, torch.tensor([t]) + ) + else: + init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) + + latents = (init_latents_proper * mask) + (latents * (1 - mask)) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # use original latents corresponding to unmasked portions of the image + latents = (init_latents_orig * mask) + (latents * (1 - mask)) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py new file mode 100644 index 000000000..dee93fc2e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -0,0 +1,824 @@ +# Copyright 2024 TIME Authors and The HuggingFace Team. All rights reserved." +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import PNDMScheduler +from ....schedulers.scheduling_utils import SchedulerMixin +from ....utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +AUGS_CONST = ["A photo of ", "An image of ", "A picture of "] + + +class StableDiffusionModelEditingPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin +): + r""" + Pipeline for text-to-image model editing. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPFeatureExtractor`]): + A `CLIPFeatureExtractor` to extract features from generated images; used as inputs to the `safety_checker`. + with_to_k ([`bool`]): + Whether to edit the key projection matrices along with the value projection matrices. + with_augs ([`list`]): + Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: SchedulerMixin, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + with_to_k: bool = True, + with_augs: list = AUGS_CONST, + ): + super().__init__() + + if isinstance(scheduler, PNDMScheduler): + logger.error("PNDMScheduler for this pipeline is currently not supported.") + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.with_to_k = with_to_k + self.with_augs = with_augs + + # get cross-attention layers + ca_layers = [] + + def append_ca(net_): + if net_.__class__.__name__ == "CrossAttention": + ca_layers.append(net_) + elif hasattr(net_, "children"): + for net__ in net_.children(): + append_ca(net__) + + # recursively find all cross-attention layers in unet + for net in self.unet.named_children(): + if "down" in net[0]: + append_ca(net[1]) + elif "up" in net[0]: + append_ca(net[1]) + elif "mid" in net[0]: + append_ca(net[1]) + + # get projection matrices + self.ca_clip_layers = [l for l in ca_layers if l.to_v.in_features == 768] + self.projection_matrices = [l.to_v for l in self.ca_clip_layers] + self.og_matrices = [copy.deepcopy(l.to_v) for l in self.ca_clip_layers] + if self.with_to_k: + self.projection_matrices = self.projection_matrices + [l.to_k for l in self.ca_clip_layers] + self.og_matrices = self.og_matrices + [copy.deepcopy(l.to_k) for l in self.ca_clip_layers] + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def edit_model( + self, + source_prompt: str, + destination_prompt: str, + lamb: float = 0.1, + restart_params: bool = True, + ): + r""" + Apply model editing via closed-form solution (see Eq. 5 in the TIME [paper](https://arxiv.org/abs/2303.08084)). + + Args: + source_prompt (`str`): + The source prompt containing the concept to be edited. + destination_prompt (`str`): + The destination prompt. Must contain all words from `source_prompt` with additional ones to specify the + target edit. + lamb (`float`, *optional*, defaults to 0.1): + The lambda parameter specifying the regularization intesity. Smaller values increase the editing power. + restart_params (`bool`, *optional*, defaults to True): + Restart the model parameters to their pre-trained version before editing. This is done to avoid edit + compounding. When it is `False`, edits accumulate. + """ + + # restart LDM parameters + if restart_params: + num_ca_clip_layers = len(self.ca_clip_layers) + for idx_, l in enumerate(self.ca_clip_layers): + l.to_v = copy.deepcopy(self.og_matrices[idx_]) + self.projection_matrices[idx_] = l.to_v + if self.with_to_k: + l.to_k = copy.deepcopy(self.og_matrices[num_ca_clip_layers + idx_]) + self.projection_matrices[num_ca_clip_layers + idx_] = l.to_k + + # set up sentences + old_texts = [source_prompt] + new_texts = [destination_prompt] + # add augmentations + base = old_texts[0] if old_texts[0][0:1] != "A" else "a" + old_texts[0][1:] + for aug in self.with_augs: + old_texts.append(aug + base) + base = new_texts[0] if new_texts[0][0:1] != "A" else "a" + new_texts[0][1:] + for aug in self.with_augs: + new_texts.append(aug + base) + + # prepare input k* and v* + old_embs, new_embs = [], [] + for old_text, new_text in zip(old_texts, new_texts): + text_input = self.tokenizer( + [old_text, new_text], + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] + old_emb, new_emb = text_embeddings + old_embs.append(old_emb) + new_embs.append(new_emb) + + # identify corresponding destinations for each token in old_emb + idxs_replaces = [] + for old_text, new_text in zip(old_texts, new_texts): + tokens_a = self.tokenizer(old_text).input_ids + tokens_b = self.tokenizer(new_text).input_ids + tokens_a = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_a] + tokens_b = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_b] + num_orig_tokens = len(tokens_a) + idxs_replace = [] + j = 0 + for i in range(num_orig_tokens): + curr_token = tokens_a[i] + while tokens_b[j] != curr_token: + j += 1 + idxs_replace.append(j) + j += 1 + while j < 77: + idxs_replace.append(j) + j += 1 + while len(idxs_replace) < 77: + idxs_replace.append(76) + idxs_replaces.append(idxs_replace) + + # prepare batch: for each pair of setences, old context and new values + contexts, valuess = [], [] + for old_emb, new_emb, idxs_replace in zip(old_embs, new_embs, idxs_replaces): + context = old_emb.detach() + values = [] + with torch.no_grad(): + for layer in self.projection_matrices: + values.append(layer(new_emb[idxs_replace]).detach()) + contexts.append(context) + valuess.append(values) + + # edit the model + for layer_num in range(len(self.projection_matrices)): + # mat1 = \lambda W + \sum{v k^T} + mat1 = lamb * self.projection_matrices[layer_num].weight + + # mat2 = \lambda I + \sum{k k^T} + mat2 = lamb * torch.eye( + self.projection_matrices[layer_num].weight.shape[1], + device=self.projection_matrices[layer_num].weight.device, + ) + + # aggregate sums for mat1, mat2 + for context, values in zip(contexts, valuess): + context_vector = context.reshape(context.shape[0], context.shape[1], 1) + context_vector_T = context.reshape(context.shape[0], 1, context.shape[1]) + value_vector = values[layer_num].reshape(values[layer_num].shape[0], values[layer_num].shape[1], 1) + for_mat1 = (value_vector @ context_vector_T).sum(dim=0) + for_mat2 = (context_vector @ context_vector_T).sum(dim=0) + mat1 += for_mat1 + mat2 += for_mat2 + + # update projection matrix + self.projection_matrices[layer_num].weight = torch.nn.Parameter(mat1 @ torch.inverse(mat2)) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + ```py + >>> import torch + >>> from diffusers import StableDiffusionModelEditingPipeline + + >>> model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt) + + >>> pipe = pipe.to("cuda") + + >>> source_prompt = "A pack of roses" + >>> destination_prompt = "A pack of blue roses" + >>> pipe.edit_model(source_prompt, destination_prompt) + + >>> prompt = "A field of roses" + >>> image = pipe(prompt).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py new file mode 100644 index 000000000..ddc866ef9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -0,0 +1,786 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DDPMParallelScheduler + >>> from diffusers import StableDiffusionParadigmsPipeline + + >>> scheduler = DDPMParallelScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler") + + >>> pipe = StableDiffusionParadigmsPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", scheduler=scheduler, torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> ngpu, batch_per_device = torch.cuda.device_count(), 5 + >>> pipe.wrapped_unet = torch.nn.DataParallel(pipe.unet, device_ids=[d for d in range(ngpu)]) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, parallel=ngpu * batch_per_device, num_inference_steps=1000).images[0] + ``` +""" + + +class StableDiffusionParadigmsPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-to-image generation using a parallelized version of Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # attribute to wrap the unet with torch.nn.DataParallel when running multiple denoising steps on multiple GPUs + self.wrapped_unet = self.unet + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _cumsum(self, input, dim, debug=False): + if debug: + # cumsum_cuda_kernel does not have a deterministic implementation + # so perform cumsum on cpu for debugging purposes + return torch.cumsum(input.cpu().float(), dim=dim).to(input.device) + else: + return torch.cumsum(input, dim=dim) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + parallel: int = 10, + tolerance: float = 0.1, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + debug: bool = False, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + parallel (`int`, *optional*, defaults to 10): + The batch size to use when doing parallel sampling. More parallelism may lead to faster inference but + requires higher memory usage and can also require more total FLOPs. + tolerance (`float`, *optional*, defaults to 0.1): + The error tolerance for determining when to slide the batch window forward for parallel sampling. Lower + tolerance usually leads to less or no degradation. Higher tolerance is faster but can risk degradation + of sample quality. The tolerance is specified as a ratio of the scheduler's noise magnitude. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + debug (`bool`, *optional*, defaults to `False`): + Whether or not to run in debug mode. In debug mode, `torch.cumsum` is evaluated using the CPU. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + extra_step_kwargs.pop("generator", None) + + # # 7. Denoising loop + scheduler = self.scheduler + parallel = min(parallel, len(scheduler.timesteps)) + + begin_idx = 0 + end_idx = parallel + latents_time_evolution_buffer = torch.stack([latents] * (len(scheduler.timesteps) + 1)) + + # We must make sure the noise of stochastic schedulers such as DDPM is sampled only once per timestep. + # Sampling inside the parallel denoising loop will mess this up, so we pre-sample the noise vectors outside the denoising loop. + noise_array = torch.zeros_like(latents_time_evolution_buffer) + for j in range(len(scheduler.timesteps)): + base_noise = randn_tensor( + shape=latents.shape, generator=generator, device=latents.device, dtype=prompt_embeds.dtype + ) + noise = (self.scheduler._get_variance(scheduler.timesteps[j]) ** 0.5) * base_noise + noise_array[j] = noise.clone() + + # We specify the error tolerance as a ratio of the scheduler's noise magnitude. We similarly compute the error tolerance + # outside of the denoising loop to avoid recomputing it at every step. + # We will be dividing the norm of the noise, so we store its inverse here to avoid a division at every step. + inverse_variance_norm = 1.0 / torch.tensor( + [scheduler._get_variance(scheduler.timesteps[j]) for j in range(len(scheduler.timesteps))] + [0] + ).to(noise_array.device) + latent_dim = noise_array[0, 0].numel() + inverse_variance_norm = inverse_variance_norm[:, None] / latent_dim + + scaled_tolerance = tolerance**2 + + with self.progress_bar(total=num_inference_steps) as progress_bar: + steps = 0 + while begin_idx < len(scheduler.timesteps): + # these have shape (parallel_dim, 2*batch_size, ...) + # parallel_len is at most parallel, but could be less if we are at the end of the timesteps + # we are processing batch window of timesteps spanning [begin_idx, end_idx) + parallel_len = end_idx - begin_idx + + block_prompt_embeds = torch.stack([prompt_embeds] * parallel_len) + block_latents = latents_time_evolution_buffer[begin_idx:end_idx] + block_t = scheduler.timesteps[begin_idx:end_idx, None].repeat(1, batch_size * num_images_per_prompt) + t_vec = block_t + if do_classifier_free_guidance: + t_vec = t_vec.repeat(1, 2) + + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([block_latents] * 2, dim=1) if do_classifier_free_guidance else block_latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t_vec) + + # if parallel_len is small, no need to use multiple GPUs + net = self.wrapped_unet if parallel_len > 3 else self.unet + # predict the noise residual, shape is now [parallel_len * 2 * batch_size * num_images_per_prompt, ...] + model_output = net( + latent_model_input.flatten(0, 1), + t_vec.flatten(0, 1), + encoder_hidden_states=block_prompt_embeds.flatten(0, 1), + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + per_latent_shape = model_output.shape[1:] + if do_classifier_free_guidance: + model_output = model_output.reshape( + parallel_len, 2, batch_size * num_images_per_prompt, *per_latent_shape + ) + noise_pred_uncond, noise_pred_text = model_output[:, 0], model_output[:, 1] + model_output = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + model_output = model_output.reshape( + parallel_len * batch_size * num_images_per_prompt, *per_latent_shape + ) + + block_latents_denoise = scheduler.batch_step_no_noise( + model_output=model_output, + timesteps=block_t.flatten(0, 1), + sample=block_latents.flatten(0, 1), + **extra_step_kwargs, + ).reshape(block_latents.shape) + + # back to shape (parallel_dim, batch_size, ...) + # now we want to add the pre-sampled noise + # parallel sampling algorithm requires computing the cumulative drift from the beginning + # of the window, so we need to compute cumulative sum of the deltas and the pre-sampled noises. + delta = block_latents_denoise - block_latents + cumulative_delta = self._cumsum(delta, dim=0, debug=debug) + cumulative_noise = self._cumsum(noise_array[begin_idx:end_idx], dim=0, debug=debug) + + # if we are using an ODE-like scheduler (like DDIM), we don't want to add noise + if scheduler._is_ode_scheduler: + cumulative_noise = 0 + + block_latents_new = ( + latents_time_evolution_buffer[begin_idx][None,] + cumulative_delta + cumulative_noise + ) + cur_error = torch.linalg.norm( + (block_latents_new - latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1]).reshape( + parallel_len, batch_size * num_images_per_prompt, -1 + ), + dim=-1, + ).pow(2) + error_ratio = cur_error * inverse_variance_norm[begin_idx + 1 : end_idx + 1] + + # find the first index of the vector error_ratio that is greater than error tolerance + # we can shift the window for the next iteration up to this index + error_ratio = torch.nn.functional.pad( + error_ratio, (0, 0, 0, 1), value=1e9 + ) # handle the case when everything is below ratio, by padding the end of parallel_len dimension + any_error_at_time = torch.max(error_ratio > scaled_tolerance, dim=1).values.int() + ind = torch.argmax(any_error_at_time).item() + + # compute the new begin and end idxs for the window + new_begin_idx = begin_idx + min(1 + ind, parallel) + new_end_idx = min(new_begin_idx + parallel, len(scheduler.timesteps)) + + # store the computed latents for the current window in the global buffer + latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1] = block_latents_new + # initialize the new sliding window latents with the end of the current window, + # should be better than random initialization + latents_time_evolution_buffer[end_idx : new_end_idx + 1] = latents_time_evolution_buffer[end_idx][ + None, + ] + + steps += 1 + + progress_bar.update(new_begin_idx - begin_idx) + if callback is not None and steps % callback_steps == 0: + callback(begin_idx, block_t[begin_idx], latents_time_evolution_buffer[begin_idx]) + + begin_idx = new_begin_idx + end_idx = new_end_idx + + latents = latents_time_evolution_buffer[-1] + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py new file mode 100644 index 000000000..c819e5728 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -0,0 +1,1304 @@ +# Copyright 2024 Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + BlipForConditionalGeneration, + BlipProcessor, + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, +) + +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import Attention +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler +from ....schedulers.scheduling_ddim_inverse import DDIMInverseScheduler +from ....utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class Pix2PixInversionPipelineOutput(BaseOutput, TextualInversionLoaderMixin): + """ + Output class for Stable Diffusion pipelines. + + Args: + latents (`torch.FloatTensor`) + inverted latents tensor + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + latents: torch.FloatTensor + images: Union[List[PIL.Image.Image], np.ndarray] + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + + >>> from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline + + + >>> def download(embedding_url, local_filepath): + ... r = requests.get(embedding_url) + ... with open(local_filepath, "wb") as f: + ... f.write(r.content) + + + >>> model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16) + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.to("cuda") + + >>> prompt = "a high resolution painting of a cat in the style of van gough" + >>> source_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/cat.pt" + >>> target_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/dog.pt" + + >>> for url in [source_emb_url, target_emb_url]: + ... download(url, url.split("/")[-1]) + + >>> src_embeds = torch.load(source_emb_url.split("/")[-1]) + >>> target_embeds = torch.load(target_emb_url.split("/")[-1]) + >>> images = pipeline( + ... prompt, + ... source_embeds=src_embeds, + ... target_embeds=target_embeds, + ... num_inference_steps=50, + ... cross_attention_guidance_amount=0.15, + ... ).images + + >>> images[0].save("edited_image_dog.png") + ``` +""" + +EXAMPLE_INVERT_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from transformers import BlipForConditionalGeneration, BlipProcessor + >>> from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline + + >>> import requests + >>> from PIL import Image + + >>> captioner_id = "Salesforce/blip-image-captioning-base" + >>> processor = BlipProcessor.from_pretrained(captioner_id) + >>> model = BlipForConditionalGeneration.from_pretrained( + ... captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True + ... ) + + >>> sd_model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained( + ... sd_model_ckpt, + ... caption_generator=model, + ... caption_processor=processor, + ... torch_dtype=torch.float16, + ... safety_checker=None, + ... ) + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png" + + >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB").resize((512, 512)) + >>> # generate caption + >>> caption = pipeline.generate_caption(raw_image) + + >>> # "a photography of a cat with flowers and dai dai daie - daie - daie kasaii" + >>> inv_latents = pipeline.invert(caption, image=raw_image).latents + >>> # we need to generate source and target embeds + + >>> source_prompts = ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"] + + >>> target_prompts = ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"] + + >>> source_embeds = pipeline.get_embeds(source_prompts) + >>> target_embeds = pipeline.get_embeds(target_prompts) + >>> # the latents can then be used to edit a real image + >>> # when using Stable Diffusion 2 or other models that use v-prediction + >>> # set `cross_attention_guidance_amount` to 0.01 or less to avoid input latent gradient explosion + + >>> image = pipeline( + ... caption, + ... source_embeds=source_embeds, + ... target_embeds=target_embeds, + ... num_inference_steps=50, + ... cross_attention_guidance_amount=0.15, + ... generator=generator, + ... latents=inv_latents, + ... negative_prompt=caption, + ... ).images[0] + >>> image.save("edited_image.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def prepare_unet(unet: UNet2DConditionModel): + """Modifies the UNet (`unet`) to perform Pix2Pix Zero optimizations.""" + pix2pix_zero_attn_procs = {} + for name in unet.attn_processors.keys(): + module_name = name.replace(".processor", "") + module = unet.get_submodule(module_name) + if "attn2" in name: + pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=True) + module.requires_grad_(True) + else: + pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=False) + module.requires_grad_(False) + + unet.set_attn_processor(pix2pix_zero_attn_procs) + return unet + + +class Pix2PixZeroL2Loss: + def __init__(self): + self.loss = 0.0 + + def compute_loss(self, predictions, targets): + self.loss += ((predictions - targets) ** 2).sum((1, 2)).mean(0) + + +class Pix2PixZeroAttnProcessor: + """An attention processor class to store the attention weights. + In Pix2Pix Zero, it happens during computations in the cross-attention blocks.""" + + def __init__(self, is_pix2pix_zero=False): + self.is_pix2pix_zero = is_pix2pix_zero + if self.is_pix2pix_zero: + self.reference_cross_attn_map = {} + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + timestep=None, + loss=None, + ): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + if self.is_pix2pix_zero and timestep is not None: + # new bookkeeping to save the attention weights. + if loss is None: + self.reference_cross_attn_map[timestep.item()] = attention_probs.detach().cpu() + # compute loss + elif loss is not None: + prev_attn_probs = self.reference_cross_attn_map.pop(timestep.item()) + loss.compute_loss(attention_probs, prev_attn_probs.to(attention_probs.device)) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for pixel-level image editing using Pix2Pix Zero. Based on Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`], or [`DDPMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + requires_safety_checker (bool): + Whether the pipeline requires a safety checker. We recommend setting it to True if you're using the + pipeline publicly. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = [ + "safety_checker", + "feature_extractor", + "caption_generator", + "caption_processor", + "inverse_scheduler", + ] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDPMScheduler, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler], + feature_extractor: CLIPImageProcessor, + safety_checker: StableDiffusionSafetyChecker, + inverse_scheduler: DDIMInverseScheduler, + caption_generator: BlipForConditionalGeneration, + caption_processor: BlipProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + caption_processor=caption_processor, + caption_generator=caption_generator, + inverse_scheduler=inverse_scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + source_embeds, + target_embeds, + callback_steps, + prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if source_embeds is None and target_embeds is None: + raise ValueError("`source_embeds` and `target_embeds` cannot be undefined.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def generate_caption(self, images): + """Generates caption for a given image.""" + text = "a photography of" + + prev_device = self.caption_generator.device + + device = self._execution_device + inputs = self.caption_processor(images, text, return_tensors="pt").to( + device=device, dtype=self.caption_generator.dtype + ) + self.caption_generator.to(device) + outputs = self.caption_generator.generate(**inputs, max_new_tokens=128) + + # offload caption generator + self.caption_generator.to(prev_device) + + caption = self.caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] + return caption + + def construct_direction(self, embs_source: torch.Tensor, embs_target: torch.Tensor): + """Constructs the edit direction to steer the image generation process semantically.""" + return (embs_target.mean(0) - embs_source.mean(0)).unsqueeze(0) + + @torch.no_grad() + def get_embeds(self, prompt: List[str], batch_size: int = 16) -> torch.FloatTensor: + num_prompts = len(prompt) + embeds = [] + for i in range(0, num_prompts, batch_size): + prompt_slice = prompt[i : i + batch_size] + + input_ids = self.tokenizer( + prompt_slice, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ).input_ids + + input_ids = input_ids.to(self.text_encoder.device) + embeds.append(self.text_encoder(input_ids)[0]) + + return torch.cat(embeds, dim=0).mean(0)[None] + + def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0) + else: + latents = self.vae.encode(image).latent_dist.sample(generator) + + latents = self.vae.config.scaling_factor * latents + + if batch_size != latents.shape[0]: + if batch_size % latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_latents_per_image = batch_size // latents.shape[0] + latents = torch.cat([latents] * additional_latents_per_image, dim=0) + else: + raise ValueError( + f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts." + ) + else: + latents = torch.cat([latents], dim=0) + + return latents + + def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): + pred_type = self.inverse_scheduler.config.prediction_type + alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + + if pred_type == "epsilon": + return model_output + elif pred_type == "sample": + return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5) + elif pred_type == "v_prediction": + return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`" + ) + + def auto_corr_loss(self, hidden_states, generator=None): + reg_loss = 0.0 + for i in range(hidden_states.shape[0]): + for j in range(hidden_states.shape[1]): + noise = hidden_states[i : i + 1, j : j + 1, :, :] + while True: + roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 + + if noise.shape[2] <= 8: + break + noise = F.avg_pool2d(noise, kernel_size=2) + return reg_loss + + def kl_divergence(self, hidden_states): + mean = hidden_states.mean() + var = hidden_states.var() + return var + mean**2 - 1 - torch.log(var + 1e-7) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + source_embeds: torch.Tensor = None, + target_embeds: torch.Tensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + cross_attention_guidance_amount: float = 0.1, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + source_embeds (`torch.Tensor`): + Source concept embeddings. Generation of the embeddings as per the [original + paper](https://arxiv.org/abs/2302.03027). Used in discovering the edit direction. + target_embeds (`torch.Tensor`): + Target concept embeddings. Generation of the embeddings as per the [original + paper](https://arxiv.org/abs/2302.03027). Used in discovering the edit direction. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + cross_attention_guidance_amount (`float`, defaults to 0.1): + Amount of guidance needed from the reference cross-attention maps. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Define the spatial resolutions. + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + source_embeds, + target_embeds, + callback_steps, + prompt_embeds, + ) + + # 3. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Generate the inverted noise from the input image or any other image + # generated from the input prompt. + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents_init = latents.clone() + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Rejig the UNet so that we can obtain the cross-attenion maps and + # use them for guiding the subsequent image generation. + self.unet = prepare_unet(self.unet) + + # 7. Denoising loop where we obtain the cross-attention maps. + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs={"timestep": t}, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Compute the edit directions. + edit_direction = self.construct_direction(source_embeds, target_embeds).to(prompt_embeds.device) + + # 9. Edit the prompt embeddings as per the edit directions discovered. + prompt_embeds_edit = prompt_embeds.clone() + prompt_embeds_edit[1:2] += edit_direction + + # 10. Second denoising loop to generate the edited image. + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + latents = latents_init + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # we want to learn the latent such that it steers the generation + # process towards the edited direction, so make the make initial + # noise learnable + x_in = latent_model_input.detach().clone() + x_in.requires_grad = True + + # optimizer + opt = torch.optim.SGD([x_in], lr=cross_attention_guidance_amount) + + with torch.enable_grad(): + # initialize loss + loss = Pix2PixZeroL2Loss() + + # predict the noise residual + noise_pred = self.unet( + x_in, + t, + encoder_hidden_states=prompt_embeds_edit.detach(), + cross_attention_kwargs={"timestep": t, "loss": loss}, + ).sample + + loss.loss.backward(retain_graph=False) + opt.step() + + # recompute the noise + noise_pred = self.unet( + x_in.detach(), + t, + encoder_hidden_states=prompt_embeds_edit, + cross_attention_kwargs={"timestep": None}, + ).sample + + latents = x_in.detach().chunk(2)[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) + def invert( + self, + prompt: Optional[str] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 50, + guidance_scale: float = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + cross_attention_guidance_amount: float = 0.1, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + lambda_auto_corr: float = 20.0, + lambda_kl: float = 20.0, + num_reg_steps: int = 5, + num_auto_corr_rolls: int = 5, + ): + r""" + Function used to generate inverted latents given a prompt and image. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch which will be used for conditioning. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 1): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + cross_attention_guidance_amount (`float`, defaults to 0.1): + Amount of guidance needed from the reference cross-attention maps. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + lambda_auto_corr (`float`, *optional*, defaults to 20.0): + Lambda parameter to control auto correction + lambda_kl (`float`, *optional*, defaults to 20.0): + Lambda parameter to control Kullback–Leibler divergence output + num_reg_steps (`int`, *optional*, defaults to 5): + Number of regularization loss steps + num_auto_corr_rolls (`int`, *optional*, defaults to 5): + Number of auto correction roll steps + + Examples: + + Returns: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.Pix2PixInversionPipelineOutput`] or + `tuple`: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.Pix2PixInversionPipelineOutput`] if + `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is the inverted + latents tensor and then second is the corresponding decoded image. + """ + # 1. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. Prepare latent variables + latents = self.prepare_image_latents(image, batch_size, self.vae.dtype, device, generator) + + # 5. Encode input prompt + num_images_per_prompt = 1 + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.inverse_scheduler.timesteps + + # 6. Rejig the UNet so that we can obtain the cross-attenion maps and + # use them for guiding the subsequent image generation. + self.unet = prepare_unet(self.unet) + + # 7. Denoising loop where we obtain the cross-attention maps. + num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs={"timestep": t}, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # regularization of the noise prediction + with torch.enable_grad(): + for _ in range(num_reg_steps): + if lambda_auto_corr > 0: + for _ in range(num_auto_corr_rolls): + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_ac = self.auto_corr_loss(var_epsilon, generator=generator) + l_ac.backward() + + grad = var.grad.detach() / num_auto_corr_rolls + noise_pred = noise_pred - lambda_auto_corr * grad + + if lambda_kl > 0: + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_kld = self.kl_divergence(var_epsilon) + l_kld.backward() + + grad = var.grad.detach() + noise_pred = noise_pred - lambda_kl * grad + + noise_pred = noise_pred.detach() + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + inverted_latents = latents.detach().clone() + + # 8. Post-processing + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (inverted_latents, image) + + return Pix2PixInversionPipelineOutput(latents=inverted_latents, images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py new file mode 100644 index 000000000..15c9a8c27 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_stochastic_karras_ve": ["KarrasVePipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_stochastic_karras_ve import KarrasVePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py new file mode 100644 index 000000000..023edb4ce --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -0,0 +1,128 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import KarrasVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class KarrasVePipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`KarrasVeScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. + """ + + # add type hints for linting + unet: UNet2DModel + scheduler: KarrasVeScheduler + + def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + img_size = self.unet.config.sample_size + shape = (batch_size, 3, img_size, img_size) + + model = self.unet + + # sample x_0 ~ N(0, sigma_0^2 * I) + sample = randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma + + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # here sigma_t == t_i from the paper + sigma = self.scheduler.schedule[t] + sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0 + + # 1. Select temporarily increased noise level sigma_hat + # 2. Add new noise to move from sample_i to sample_hat + sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator) + + # 3. Predict the noise residual given the noise magnitude `sigma_hat` + # The model inputs and output are adjusted by following eq. (213) in [1]. + model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample + + # 4. Evaluate dx/dt at sigma_hat + # 5. Take Euler step from sigma to sigma_prev + step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat) + + if sigma_prev != 0: + # 6. Apply 2nd order correction + # The model inputs and output are adjusted by following eq. (213) in [1]. + model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample + step_output = self.scheduler.step_correct( + model_output, + sigma_hat, + sigma_prev, + sample_hat, + step_output.prev_sample, + step_output["derivative"], + ) + sample = step_output.prev_sample + + sample = (sample / 2 + 0.5).clamp(0, 1) + image = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py new file mode 100644 index 000000000..8ea6ef6e2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py @@ -0,0 +1,71 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + + _dummy_objects.update( + { + "VersatileDiffusionDualGuidedPipeline": VersatileDiffusionDualGuidedPipeline, + "VersatileDiffusionImageVariationPipeline": VersatileDiffusionImageVariationPipeline, + "VersatileDiffusionPipeline": VersatileDiffusionPipeline, + "VersatileDiffusionTextToImagePipeline": VersatileDiffusionTextToImagePipeline, + } + ) +else: + _import_structure["modeling_text_unet"] = ["UNetFlatConditionModel"] + _import_structure["pipeline_versatile_diffusion"] = ["VersatileDiffusionPipeline"] + _import_structure["pipeline_versatile_diffusion_dual_guided"] = ["VersatileDiffusionDualGuidedPipeline"] + _import_structure["pipeline_versatile_diffusion_image_variation"] = ["VersatileDiffusionImageVariationPipeline"] + _import_structure["pipeline_versatile_diffusion_text_to_image"] = ["VersatileDiffusionTextToImagePipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + else: + from .pipeline_versatile_diffusion import VersatileDiffusionPipeline + from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline + from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline + from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py new file mode 100644 index 000000000..62a3a8728 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py @@ -0,0 +1,2508 @@ +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from diffusers.utils import deprecate + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin +from ....models.activations import get_activation +from ....models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnAddedKVProcessor2_0, + AttnProcessor, +) +from ....models.embeddings import ( + GaussianFourierProjection, + ImageHintTimeEmbedding, + ImageProjection, + ImageTimeEmbedding, + TextImageProjection, + TextImageTimeEmbedding, + TextTimeEmbedding, + TimestepEmbedding, + Timesteps, +) +from ....models.resnet import ResnetBlockCondNorm2D +from ....models.transformers.dual_transformer_2d import DualTransformer2DModel +from ....models.transformers.transformer_2d import Transformer2DModel +from ....models.unets.unet_2d_condition import UNet2DConditionOutput +from ....utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import apply_freeu + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + add_downsample, + resnet_eps, + resnet_act_fn, + num_attention_heads, + transformer_layers_per_block, + attention_type, + attention_head_dim, + resnet_groups=None, + cross_attention_dim=None, + downsample_padding=None, + dual_cross_attention=False, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", + resnet_skip_time_act=False, + resnet_out_scale_factor=1.0, + cross_attention_norm=None, + dropout=0.0, +): + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlockFlat": + return DownBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "CrossAttnDownBlockFlat": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat") + return CrossAttnDownBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{down_block_type} is not supported.") + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + num_attention_heads, + transformer_layers_per_block, + resolution_idx, + attention_type, + attention_head_dim, + resnet_groups=None, + cross_attention_dim=None, + dual_cross_attention=False, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", + resnet_skip_time_act=False, + resnet_out_scale_factor=1.0, + cross_attention_norm=None, + dropout=0.0, +): + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpBlockFlat": + return UpBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "CrossAttnUpBlockFlat": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat") + return CrossAttnUpBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{up_block_type} is not supported.") + + +class FourierEmbedder(nn.Module): + def __init__(self, num_freqs=64, temperature=100): + super().__init__() + + self.num_freqs = num_freqs + self.temperature = temperature + + freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs) + freq_bands = freq_bands[None, None, None] + self.register_buffer("freq_bands", freq_bands, persistent=False) + + def __call__(self, x): + x = self.freq_bands * x.unsqueeze(-1) + return torch.stack((x.sin(), x.cos()), dim=-1).permute(0, 1, 3, 4, 2).reshape(*x.shape[:2], -1) + + +class GLIGENTextBoundingboxProjection(nn.Module): + def __init__(self, positive_len, out_dim, feature_type, fourier_freqs=8): + super().__init__() + self.positive_len = positive_len + self.out_dim = out_dim + + self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs) + self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy + + if isinstance(out_dim, tuple): + out_dim = out_dim[0] + + if feature_type == "text-only": + self.linears = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + elif feature_type == "text-image": + self.linears_text = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.linears_image = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) + + def forward( + self, + boxes, + masks, + positive_embeddings=None, + phrases_masks=None, + image_masks=None, + phrases_embeddings=None, + image_embeddings=None, + ): + masks = masks.unsqueeze(-1) + + xyxy_embedding = self.fourier_embedder(boxes) + xyxy_null = self.null_position_feature.view(1, 1, -1) + xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null + + if positive_embeddings: + positive_null = self.null_positive_feature.view(1, 1, -1) + positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null + + objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) + else: + phrases_masks = phrases_masks.unsqueeze(-1) + image_masks = image_masks.unsqueeze(-1) + + text_null = self.null_text_feature.view(1, 1, -1) + image_null = self.null_image_feature.view(1, 1, -1) + + phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null + image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null + + objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) + objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) + objs = torch.cat([objs_text, objs_image], dim=1) + + return objs + + +class UNetFlatConditionModel(ModelMixin, ConfigMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. + flip_sin_to_cos (`bool`, *optional*, defaults to `False`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlockFlatCrossAttn`, `UNetMidBlockFlat`, or + `UNetMidBlockFlatSimpleCrossAttn`. If `None`, the mid block layer is skipped. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat")`): + The tuple of upsample blocks to use. + only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): + Whether to include self-attention in the basic transformer blocks, see + [`~models.attention.BasicTransformerBlock`]. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], + [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. + reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling + blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for + [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], + [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): + The number of attention heads. If not defined, defaults to `attention_head_dim` + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlockFlat`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + addition_time_embed_dim: (`int`, *optional*, defaults to `None`): + Dimension for the timestep embeddings. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + time_embedding_type (`str`, *optional*, defaults to `positional`): + The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. + time_embedding_dim (`int`, *optional*, defaults to `None`): + An optional override for the dimension of the projected time embedding. + time_embedding_act_fn (`str`, *optional*, defaults to `None`): + Optional activation function to use only once on the time embeddings before they are passed to the rest of + the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. + timestep_post_act (`str`, *optional*, defaults to `None`): + The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. + time_cond_proj_dim (`int`, *optional*, defaults to `None`): + The dimension of `cond_proj` layer in the timestep embedding. + conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, + *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, + *optional*): The dimension of the `class_labels` input when + `class_embed_type="projection"`. Required when `class_embed_type="projection"`. + class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time + embeddings with the class embeddings. + mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): + Whether to use cross attention with the mid block when using the `UNetMidBlockFlatSimpleCrossAttn`. If + `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the + `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` + otherwise. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + center_input_sample: bool = False, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlockFlat", + "CrossAttnDownBlockFlat", + "CrossAttnDownBlockFlat", + "DownBlockFlat", + ), + mid_block_type: Optional[str] = "UNetMidBlockFlatCrossAttn", + up_block_types: Tuple[str] = ( + "UpBlockFlat", + "CrossAttnUpBlockFlat", + "CrossAttnUpBlockFlat", + "CrossAttnUpBlockFlat", + ), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + layers_per_block: Union[int, Tuple[int]] = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + dropout: float = 0.0, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: Union[int, Tuple[int]] = 1280, + transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, + reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: int = 1.0, + time_embedding_type: str = "positional", + time_embedding_dim: Optional[int] = None, + time_embedding_act_fn: Optional[str] = None, + timestep_post_act: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + conv_in_kernel: int = 3, + conv_out_kernel: int = 3, + projection_class_embeddings_input_dim: Optional[int] = None, + attention_type: str = "default", + class_embeddings_concat: bool = False, + mid_block_only_cross_attention: Optional[bool] = None, + cross_attention_norm: Optional[str] = None, + addition_embed_type_num_heads=64, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: + for layer_number_per_block in transformer_layers_per_block: + if isinstance(layer_number_per_block, list): + raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") + + # input + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = LinearMultiDim( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + if time_embedding_type == "fourier": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 + if time_embed_dim % 2 != 0: + raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") + self.time_proj = GaussianFourierProjection( + time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + ) + timestep_input_dim = time_embed_dim + elif time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + else: + raise ValueError( + f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." + ) + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + post_act_fn=timestep_post_act, + cond_proj_dim=time_cond_proj_dim, + ) + + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 + self.encoder_hid_proj = ImageProjection( + image_embed_dim=encoder_hid_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type is not None: + raise ValueError( + f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." + ) + else: + self.encoder_hid_proj = None + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif class_embed_type == "simple_projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" + ) + self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif addition_embed_type == "image": + # Kandinsky 2.2 + self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type == "image_hint": + # Kandinsky 2.2 ControlNet + self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type is not None: + raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") + + if time_embedding_act_fn is None: + self.time_embed_act = None + else: + self.time_embed_act = get_activation(time_embedding_act_fn) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = only_cross_attention + + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = False + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + if class_embeddings_concat: + # The time embeddings are concatenated with the class embeddings. The dimension of the + # time embeddings passed to the down, middle, and up blocks is twice the dimension of the + # regular time embeddings + blocks_time_embed_dim = time_embed_dim * 2 + else: + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.down_blocks.append(down_block) + + # mid + if mid_block_type == "UNetMidBlockFlatCrossAttn": + self.mid_block = UNetMidBlockFlatCrossAttn( + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim[-1], + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn": + self.mid_block = UNetMidBlockFlatSimpleCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + cross_attention_dim=cross_attention_dim[-1], + attention_head_dim=attention_head_dim[-1], + resnet_groups=norm_num_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + only_cross_attention=mid_block_only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif mid_block_type == "UNetMidBlockFlat": + self.mid_block = UNetMidBlockFlat( + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + num_layers=0, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_groups=norm_num_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + add_attention=False, + ) + elif mid_block_type is None: + self.mid_block = None + else: + raise ValueError(f"unknown mid_block_type : {mid_block_type}") + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = ( + list(reversed(transformer_layers_per_block)) + if reverse_transformer_layers_per_block is None + else reverse_transformer_layers_per_block + ) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resolution_idx=i, + resnet_groups=norm_num_groups, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + + self.conv_act = get_activation(act_fn) + + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = LinearMultiDim( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + if attention_type in ["gated", "gated-text-image"]: + positive_len = 768 + if isinstance(cross_attention_dim, int): + positive_len = cross_attention_dim + elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list): + positive_len = cross_attention_dim[0] + + feature_type = "text-only" if attention_type == "gated" else "text-image" + self.position_net = GLIGENTextBoundingboxProjection( + positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type + ) + + @property + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def set_attention_slice(self, slice_size): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def enable_freeu(self, s1, s2, b1, b2): + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + def disable_freeu(self): + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def unload_lora(self): + """Unloads LoRA weights.""" + deprecate( + "unload_lora", + "0.28.0", + "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", + ) + for module in self.modules(): + if hasattr(module, "set_lora_layer"): + module.set_lora_layer(None) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`UNetFlatConditionModel`] forward method. + + Args: + sample (`torch.FloatTensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.FloatTensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added to UNet long skip connections from down blocks to up blocks for + example from ControlNet side model(s) + mid_block_additional_residual (`torch.Tensor`, *optional*): + additional residual to be added to UNet mid block output, for example from ControlNet side model + down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise + a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + for dim in sample.shape[-2:]: + if dim % default_overall_up_factor != 0: + # Forward upsample size to force interpolation output size. + forward_upsample_size = True + break + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # there might be better ways to encapsulate this. + class_labels = class_labels.to(dtype=sample.dtype) + + class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) + + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + elif self.config.addition_embed_type == "text_image": + # Kandinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + + image_embs = added_cond_kwargs.get("image_embeds") + text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) + aug_emb = self.add_embedding(text_embs, image_embs) + elif self.config.addition_embed_type == "text_time": + # SDXL - style + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + elif self.config.addition_embed_type == "image": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + aug_emb = self.add_embedding(image_embs) + elif self.config.addition_embed_type == "image_hint": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + hint = added_cond_kwargs.get("hint") + aug_emb, hint = self.add_embedding(image_embs, hint) + sample = torch.cat([sample, hint], dim=1) + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": + # Kadinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + image_embeds = self.encoder_hid_proj(image_embeds) + encoder_hidden_states = (encoder_hidden_states, image_embeds) + + # 2. pre-process + sample = self.conv_in(sample) + + # 2.5 GLIGEN position net + if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: + cross_attention_kwargs = cross_attention_kwargs.copy() + gligen_args = cross_attention_kwargs.pop("gligen") + cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} + + # 3. down + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None + # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets + is_adapter = down_intrablock_additional_residuals is not None + # maintain backward compatibility for legacy usage, where + # T2I-Adapter and ControlNet both use down_block_additional_residuals arg + # but can only use one or the other + if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: + deprecate( + "T2I should not use down_block_additional_residuals", + "1.3.0", + "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ + and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ + for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", + standard_warn=False, + ) + down_intrablock_additional_residuals = down_block_additional_residuals + is_adapter = True + + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + # For t2i-adapter CrossAttnDownBlockFlat + additional_residuals = {} + if is_adapter and len(down_intrablock_additional_residuals) > 0: + additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + if is_adapter and len(down_intrablock_additional_residuals) > 0: + sample += down_intrablock_additional_residuals.pop(0) + + down_block_res_samples += res_samples + + if is_controlnet: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = self.mid_block(sample, emb) + + # To support T2I-Adapter-XL + if ( + is_adapter + and len(down_intrablock_additional_residuals) > 0 + and sample.shape == down_intrablock_additional_residuals[0].shape + ): + sample += down_intrablock_additional_residuals.pop(0) + + if is_controlnet: + sample = sample + mid_block_additional_residual + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + scale=lora_scale, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) + + +class LinearMultiDim(nn.Linear): + def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs): + in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features) + if out_features is None: + out_features = in_features + out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features) + self.in_features_multidim = in_features + self.out_features_multidim = out_features + super().__init__(np.array(in_features).prod(), np.array(out_features).prod()) + + def forward(self, input_tensor, *args, **kwargs): + shape = input_tensor.shape + n_dim = len(self.in_features_multidim) + input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features) + output_tensor = super().forward(input_tensor) + output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim) + return output_tensor + + +class ResnetBlockFlat(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + dropout=0.0, + temb_channels=512, + groups=32, + groups_out=None, + pre_norm=True, + eps=1e-6, + time_embedding_norm="default", + use_in_shortcut=None, + second_dim=4, + **kwargs, + ): + super().__init__() + self.pre_norm = pre_norm + self.pre_norm = True + + in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels) + self.in_channels_prod = np.array(in_channels).prod() + self.channels_multidim = in_channels + + if out_channels is not None: + out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels) + out_channels_prod = np.array(out_channels).prod() + self.out_channels_multidim = out_channels + else: + out_channels_prod = self.in_channels_prod + self.out_channels_multidim = self.channels_multidim + self.time_embedding_norm = time_embedding_norm + + if groups_out is None: + groups_out = groups + + self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True) + self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0) + + if temb_channels is not None: + self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod) + else: + self.time_emb_proj = None + + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0) + + self.nonlinearity = nn.SiLU() + + self.use_in_shortcut = ( + self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut + ) + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, input_tensor, temb): + shape = input_tensor.shape + n_dim = len(self.channels_multidim) + input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1) + input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1) + + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv1(hidden_states) + + if temb is not None: + temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None] + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = input_tensor + hidden_states + + output_tensor = output_tensor.view(*shape[0:-n_dim], -1) + output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim) + + return output_tensor + + +class DownBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + LinearMultiDim( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnDownBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + add_downsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + LinearMultiDim( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + additional_residuals: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: + output_states = () + + blocks = list(zip(self.resnets, self.attentions)) + + for i, (resnet, attn) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UpBlock2D with UpBlock2D->UpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim +class UpBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlockFlat( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.CrossAttnUpBlock2D with CrossAttnUpBlock2D->CrossAttnUpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim +class CrossAttnUpBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlockFlat( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2D with UNetMidBlock2D->UNetMidBlockFlat, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlat(nn.Module): + """ + A 2D UNet mid-block [`UNetMidBlockFlat`] with multiple residual blocks and optional attention blocks. + + Args: + in_channels (`int`): The number of input channels. + temb_channels (`int`): The number of temporal embedding channels. + dropout (`float`, *optional*, defaults to 0.0): The dropout rate. + num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. + resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. + resnet_time_scale_shift (`str`, *optional*, defaults to `default`): + The type of normalization to apply to the time embeddings. This can help to improve the performance of the + model on tasks with long-range temporal dependencies. + resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks. + resnet_groups (`int`, *optional*, defaults to 32): + The number of groups to use in the group normalization layers of the resnet blocks. + attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks. + resnet_pre_norm (`bool`, *optional*, defaults to `True`): + Whether to use pre-normalization for the resnet blocks. + add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks. + attention_head_dim (`int`, *optional*, defaults to 1): + Dimension of a single attention head. The number of attention heads is determined based on this value and + the number of input channels. + output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor. + + Returns: + `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size, + in_channels, height, width)`. + + """ + + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + attn_groups: Optional[int] = None, + resnet_pre_norm: bool = True, + add_attention: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + ): + super().__init__() + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + self.add_attention = add_attention + + if attn_groups is None: + attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None + + # there is always at least one resnet + if resnet_time_scale_shift == "spatial": + resnets = [ + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ] + else: + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." + ) + attention_head_dim = in_channels + + for _ in range(num_layers): + if self.add_attention: + attentions.append( + Attention( + in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=attn_groups, + spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + else: + attentions.append(None) + + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if attn is not None: + hidden_states = attn(hidden_states, temb=temb) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn with UNetMidBlock2DCrossAttn->UNetMidBlockFlatCrossAttn, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlatCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # support for variable transformer layers per block + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + # there is always at least one resnet + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + for i in range(num_layers): + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DSimpleCrossAttn with UNetMidBlock2DSimpleCrossAttn->UNetMidBlockFlatSimpleCrossAttn, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlatSimpleCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + skip_time_act: bool = False, + only_cross_attention: bool = False, + cross_attention_norm: Optional[str] = None, + ): + super().__init__() + + self.has_cross_attention = True + + self.attention_head_dim = attention_head_dim + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + self.num_heads = in_channels // self.attention_head_dim + + # there is always at least one resnet + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ] + attentions = [] + + for _ in range(num_layers): + processor = ( + AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() + ) + + attentions.append( + Attention( + query_dim=in_channels, + cross_attention_dim=in_channels, + heads=self.num_heads, + dim_head=self.attention_head_dim, + added_kv_proj_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + bias=True, + upcast_softmax=True, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + processor=processor, + ) + ) + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward( + self, + hidden_states: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.") + + if attention_mask is None: + # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. + mask = None if encoder_hidden_states is None else encoder_attention_mask + else: + # when attention_mask is defined: we don't even check for encoder_attention_mask. + # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. + # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. + # then we can simplify this whole if/else block to: + # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask + mask = attention_mask + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + # attn + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + + # resnet + hidden_states = resnet(hidden_states, temb) + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py new file mode 100644 index 000000000..4455d20df --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -0,0 +1,421 @@ +import inspect +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel + +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline +from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline +from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline +from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModel + image_encoder: CLIPVisionModel + image_unet: UNet2DConditionModel + text_unet: UNet2DConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + def __init__( + self, + tokenizer: CLIPTokenizer, + image_feature_extractor: CLIPImageProcessor, + text_encoder: CLIPTextModel, + image_encoder: CLIPVisionModel, + image_unet: UNet2DConditionModel, + text_unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + image_feature_extractor=image_feature_extractor, + text_encoder=text_encoder, + image_encoder=image_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + @torch.no_grad() + def image_variation( + self, + image: Union[torch.FloatTensor, PIL.Image.Image], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): + The image prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe.image_variation(image, generator=generator).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + return VersatileDiffusionImageVariationPipeline(**components)( + image=image, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + + @torch.no_grad() + def text_to_image( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe.text_to_image("an astronaut riding on a horse on mars", generator=generator).images[0] + >>> image.save("./astronaut.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + temp_pipeline = VersatileDiffusionTextToImagePipeline(**components) + output = temp_pipeline( + prompt=prompt, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + # swap the attention blocks back to the original state + temp_pipeline._swap_unet_attention_blocks() + + return output + + @torch.no_grad() + def dual_guided( + self, + prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], + image: Union[str, List[str]], + text_to_image_strength: float = 0.5, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + >>> text = "a red car in the sun" + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> text_to_image_strength = 0.75 + + >>> image = pipe.dual_guided( + ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator + ... ).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components) + output = temp_pipeline( + prompt=prompt, + image=image, + text_to_image_strength=text_to_image_strength, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + temp_pipeline._revert_dual_attention() + + return output diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py new file mode 100644 index 000000000..8af739bbe --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -0,0 +1,556 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_text_unet import UNetFlatConditionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): + r""" + Pipeline for image-text dual-guided generation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModelWithProjection + image_encoder: CLIPVisionModelWithProjection + image_unet: UNet2DConditionModel + text_unet: UNetFlatConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + _optional_components = ["text_unet"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + image_feature_extractor: CLIPImageProcessor, + text_encoder: CLIPTextModelWithProjection, + image_encoder: CLIPVisionModelWithProjection, + image_unet: UNet2DConditionModel, + text_unet: UNetFlatConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + tokenizer=tokenizer, + image_feature_extractor=image_feature_extractor, + text_encoder=text_encoder, + image_encoder=image_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if self.text_unet is not None and ( + "dual_cross_attention" not in self.image_unet.config or not self.image_unet.config.dual_cross_attention + ): + # if loading from a universal checkpoint rather than a saved dual-guided pipeline + self._convert_to_dual_attention() + + def remove_unused_weights(self): + self.register_modules(text_unet=None) + + def _convert_to_dual_attention(self): + """ + Replace image_unet's `Transformer2DModel` blocks with `DualTransformer2DModel` that contains transformer blocks + from both `image_unet` and `text_unet` + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, Transformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + + image_transformer = self.image_unet.get_submodule(parent_name)[index] + text_transformer = self.text_unet.get_submodule(parent_name)[index] + + config = image_transformer.config + dual_transformer = DualTransformer2DModel( + num_attention_heads=config.num_attention_heads, + attention_head_dim=config.attention_head_dim, + in_channels=config.in_channels, + num_layers=config.num_layers, + dropout=config.dropout, + norm_num_groups=config.norm_num_groups, + cross_attention_dim=config.cross_attention_dim, + attention_bias=config.attention_bias, + sample_size=config.sample_size, + num_vector_embeds=config.num_vector_embeds, + activation_fn=config.activation_fn, + num_embeds_ada_norm=config.num_embeds_ada_norm, + ) + dual_transformer.transformers[0] = image_transformer + dual_transformer.transformers[1] = text_transformer + + self.image_unet.get_submodule(parent_name)[index] = dual_transformer + self.image_unet.register_to_config(dual_cross_attention=True) + + def _revert_dual_attention(self): + """ + Revert the image_unet `DualTransformer2DModel` blocks back to `Transformer2DModel` with image_unet weights Call + this function if you reuse `image_unet` in another pipeline, e.g. `VersatileDiffusionPipeline` + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, DualTransformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + self.image_unet.get_submodule(parent_name)[index] = module.transformers[0] + + self.image_unet.register_to_config(dual_cross_attention=False) + + def _encode_text_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + """ + + def normalize_embeddings(encoder_output): + embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) + embeds_pooled = encoder_output.text_embeds + embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = normalize_embeddings(prompt_embeds) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def _encode_image_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + """ + + def normalize_embeddings(encoder_output): + embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) + embeds = self.image_encoder.visual_projection(embeds) + embeds_pooled = embeds[:, 0:1] + embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") + pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) + image_embeddings = self.image_encoder(pixel_values) + image_embeddings = normalize_embeddings(image_embeddings) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size + uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") + pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) + negative_prompt_embeds = self.image_encoder(pixel_values) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and conditional embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs(self, prompt, image, height, width, callback_steps): + if not isinstance(prompt, str) and not isinstance(prompt, PIL.Image.Image) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` `PIL.Image` or `list` but is {type(prompt)}") + if not isinstance(image, str) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list): + raise ValueError(f"`image` has to be of type `str` `PIL.Image` or `list` but is {type(image)}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def set_transformer_params(self, mix_ratio: float = 0.5, condition_types: Tuple = ("text", "image")): + for name, module in self.image_unet.named_modules(): + if isinstance(module, DualTransformer2DModel): + module.mix_ratio = mix_ratio + + for i, type in enumerate(condition_types): + if type == "text": + module.condition_lengths[i] = self.text_encoder.config.max_position_embeddings + module.transformer_index_for_condition[i] = 1 # use the second (text) transformer + else: + module.condition_lengths[i] = 257 + module.transformer_index_for_condition[i] = 0 # use the first (image) transformer + + @torch.no_grad() + def __call__( + self, + prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], + image: Union[str, List[str]], + text_to_image_strength: float = 0.5, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionDualGuidedPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + >>> text = "a red car in the sun" + + >>> pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe.remove_unused_weights() + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> text_to_image_strength = 0.75 + + >>> image = pipe( + ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator + ... ).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, image, height, width, callback_steps) + + # 2. Define call parameters + prompt = [prompt] if not isinstance(prompt, list) else prompt + image = [image] if not isinstance(image, list) else image + batch_size = len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompts + prompt_embeds = self._encode_text_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) + image_embeddings = self._encode_image_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance) + dual_prompt_embeddings = torch.cat([prompt_embeds, image_embeddings], dim=1) + prompt_types = ("text", "image") + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + dual_prompt_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Combine the attention blocks of the image and text UNets + self.set_transformer_params(text_to_image_strength, prompt_types) + + # 8. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=dual_prompt_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py new file mode 100644 index 000000000..345c15f18 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -0,0 +1,397 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): + r""" + Pipeline for image variation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + image_feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + image_unet: UNet2DConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + def __init__( + self, + image_feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + image_unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + image_feature_extractor=image_feature_extractor, + image_encoder=image_encoder, + image_unet=image_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + + def normalize_embeddings(encoder_output): + embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) + embeds = self.image_encoder.visual_projection(embeds) + embeds_pooled = embeds[:, 0:1] + embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) + return embeds + + if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: + prompt = list(prompt) + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") + pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) + image_embeddings = self.image_encoder(pixel_values) + image_embeddings = normalize_embeddings(image_embeddings) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_images: List[str] + if negative_prompt is None: + uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, PIL.Image.Image): + uncond_images = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_images = negative_prompt + + uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") + pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) + negative_prompt_embeds = self.image_encoder(pixel_values) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and conditional embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): + The image prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionImageVariationPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + + >>> pipe = VersatileDiffusionImageVariationPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe(image, generator=generator).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + image_embeddings = self._encode_prompt( + image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py new file mode 100644 index 000000000..0b2518f7e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -0,0 +1,475 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import torch +import torch.utils.checkpoint +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_text_unet import UNetFlatConditionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModelWithProjection + image_unet: UNet2DConditionModel + text_unet: UNetFlatConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + _optional_components = ["text_unet"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + image_unet: UNet2DConditionModel, + text_unet: UNetFlatConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if self.text_unet is not None: + self._swap_unet_attention_blocks() + + def _swap_unet_attention_blocks(self): + """ + Swap the `Transformer2DModel` blocks between the image and text UNets + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, Transformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( + self.text_unet.get_submodule(parent_name)[index], + self.image_unet.get_submodule(parent_name)[index], + ) + + def remove_unused_weights(self): + self.register_modules(text_unet=None) + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + + def normalize_embeddings(encoder_output): + embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) + embeds_pooled = encoder_output.text_embeds + embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = normalize_embeddings(prompt_embeds) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionTextToImagePipeline + >>> import torch + + >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe.remove_unused_weights() + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] + >>> image.save("./astronaut.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py new file mode 100644 index 000000000..070903377 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py @@ -0,0 +1,57 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + LearnedClassifierFreeSamplingEmbeddings, + VQDiffusionPipeline, + ) + + _dummy_objects.update( + { + "LearnedClassifierFreeSamplingEmbeddings": LearnedClassifierFreeSamplingEmbeddings, + "VQDiffusionPipeline": VQDiffusionPipeline, + } + ) +else: + _import_structure["pipeline_vq_diffusion"] = ["LearnedClassifierFreeSamplingEmbeddings", "VQDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + LearnedClassifierFreeSamplingEmbeddings, + VQDiffusionPipeline, + ) + else: + from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py new file mode 100644 index 000000000..0c55d04e6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -0,0 +1,325 @@ +# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin, Transformer2DModel, VQModel +from ....schedulers import VQDiffusionScheduler +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin): + """ + Utility class for storing learned text embeddings for classifier free sampling + """ + + @register_to_config + def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None): + super().__init__() + + self.learnable = learnable + + if self.learnable: + assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" + assert length is not None, "learnable=True requires `length` to be set" + + embeddings = torch.zeros(length, hidden_size) + else: + embeddings = None + + self.embeddings = torch.nn.Parameter(embeddings) + + +class VQDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using VQ Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vqvae ([`VQModel`]): + Vector Quantized Variational Auto-Encoder (VAE) model to encode and decode images to and from latent + representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + transformer ([`Transformer2DModel`]): + A conditional `Transformer2DModel` to denoise the encoded image latents. + scheduler ([`VQDiffusionScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + vqvae: VQModel + text_encoder: CLIPTextModel + tokenizer: CLIPTokenizer + transformer: Transformer2DModel + learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings + scheduler: VQDiffusionScheduler + + def __init__( + self, + vqvae: VQModel, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + transformer: Transformer2DModel, + scheduler: VQDiffusionScheduler, + learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + transformer=transformer, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, + ) + + def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0] + + # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. + # While CLIP does normalize the pooled output of the text transformer when combining + # the image and text embeddings, CLIP does not directly normalize the last hidden state. + # + # CLIP normalizing the pooled output. + # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 + prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True) + + # duplicate text embeddings for each generation per prompt + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + if self.learned_classifier_free_sampling_embeddings.learnable: + negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings + negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1) + else: + uncond_tokens = [""] * batch_size + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0] + # See comment for normalizing text embeddings + negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + num_inference_steps: int = 100, + guidance_scale: float = 5.0, + truncation_rate: float = 1.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)): + Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at + most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above + `truncation_rate` are set to zero. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor` of shape (batch), *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Must be valid embedding indices.If not provided, a latents tensor will be generated of + completely masked latent pixels. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # get the initial completely masked latents unless the user supplied it + + latents_shape = (batch_size, self.transformer.num_latent_pixels) + if latents is None: + mask_class = self.transformer.num_vector_embeds - 1 + latents = torch.full(latents_shape, mask_class).to(self.device) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): + raise ValueError( + "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," + f" {self.transformer.num_vector_embeds - 1} (inclusive)." + ) + latents = latents.to(self.device) + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + + timesteps_tensor = self.scheduler.timesteps.to(self.device) + + sample = latents + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the sample if we are doing classifier free guidance + latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample + + # predict the un-noised image + # model_output == `log_p_x_0` + model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample + + if do_classifier_free_guidance: + model_output_uncond, model_output_text = model_output.chunk(2) + model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) + model_output -= torch.logsumexp(model_output, dim=1, keepdim=True) + + model_output = self.truncate(model_output, truncation_rate) + + # remove `log(0)`'s (`-inf`s) + model_output = model_output.clamp(-70) + + # compute the previous noisy sample x_t -> x_t-1 + sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, t, sample) + + embedding_channels = self.vqvae.config.vq_embed_dim + embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) + embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape) + image = self.vqvae.decode(embeddings, force_not_quantize=True).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) + + def truncate(self, log_p_x_0: torch.FloatTensor, truncation_rate: float) -> torch.FloatTensor: + """ + Truncates `log_p_x_0` such that for each column vector, the total cumulative probability is `truncation_rate` + The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to + zero. + """ + sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True) + sorted_p_x_0 = torch.exp(sorted_log_p_x_0) + keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate + + # Ensure that at least the largest probability is not zeroed out + all_true = torch.full_like(keep_mask[:, 0:1, :], True) + keep_mask = torch.cat((all_true, keep_mask), dim=1) + keep_mask = keep_mask[:, :-1, :] + + keep_mask = keep_mask.gather(1, indices.argsort(1)) + + rv = log_p_x_0.clone() + + rv[~keep_mask] = -torch.inf # -inf = log(0) + + return rv diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/__init__.py new file mode 100644 index 000000000..fe2a94f3c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_dit": ["DiTPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_dit import DiTPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/pipeline_dit.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/pipeline_dit.py new file mode 100644 index 000000000..289ea4960 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/dit/pipeline_dit.py @@ -0,0 +1,233 @@ +# Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) +# William Peebles and Saining Xie +# +# Copyright (c) 2021 OpenAI +# MIT License +# +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Tuple, Union + +import torch + +from ...models import AutoencoderKL, Transformer2DModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DiTPipeline(DiffusionPipeline): + r""" + Pipeline for image generation based on a Transformer backbone instead of a UNet. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + transformer ([`Transformer2DModel`]): + A class conditioned `Transformer2DModel` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "transformer->vae" + + def __init__( + self, + transformer: Transformer2DModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + id2label: Optional[Dict[int, str]] = None, + ): + super().__init__() + self.register_modules(transformer=transformer, vae=vae, scheduler=scheduler) + + # create a imagenet -> id dictionary for easier use + self.labels = {} + if id2label is not None: + for key, value in id2label.items(): + for label in value.split(","): + self.labels[label.lstrip().rstrip()] = int(key) + self.labels = dict(sorted(self.labels.items())) + + def get_label_ids(self, label: Union[str, List[str]]) -> List[int]: + r""" + + Map label strings from ImageNet to corresponding class ids. + + Parameters: + label (`str` or `dict` of `str`): + Label strings to be mapped to class ids. + + Returns: + `list` of `int`: + Class ids to be processed by pipeline. + """ + + if not isinstance(label, list): + label = list(label) + + for l in label: + if l not in self.labels: + raise ValueError( + f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." + ) + + return [self.labels[l] for l in label] + + @torch.no_grad() + def __call__( + self, + class_labels: List[int], + guidance_scale: float = 4.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + class_labels (List[int]): + List of ImageNet class labels for the images to be generated. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 250): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + ```py + >>> from diffusers import DiTPipeline, DPMSolverMultistepScheduler + >>> import torch + + >>> pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe = pipe.to("cuda") + + >>> # pick words from Imagenet class labels + >>> pipe.labels # to print all available words + + >>> # pick words that exist in ImageNet + >>> words = ["white shark", "umbrella"] + + >>> class_ids = pipe.get_label_ids(words) + + >>> generator = torch.manual_seed(33) + >>> output = pipe(class_labels=class_ids, num_inference_steps=25, generator=generator) + + >>> image = output.images[0] # label 'white shark' + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + batch_size = len(class_labels) + latent_size = self.transformer.config.sample_size + latent_channels = self.transformer.config.in_channels + + latents = randn_tensor( + shape=(batch_size, latent_channels, latent_size, latent_size), + generator=generator, + device=self._execution_device, + dtype=self.transformer.dtype, + ) + latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents + + class_labels = torch.tensor(class_labels, device=self._execution_device).reshape(-1) + class_null = torch.tensor([1000] * batch_size, device=self._execution_device) + class_labels_input = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + for t in self.progress_bar(self.scheduler.timesteps): + if guidance_scale > 1: + half = latent_model_input[: len(latent_model_input) // 2] + latent_model_input = torch.cat([half, half], dim=0) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + timesteps = t + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(timesteps, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=latent_model_input.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(latent_model_input.shape[0]) + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, timestep=timesteps, class_labels=class_labels_input + ).sample + + # perform guidance + if guidance_scale > 1: + eps, rest = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + + half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) + eps = torch.cat([half_eps, half_eps], dim=0) + + noise_pred = torch.cat([eps, rest], dim=1) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + model_output, _ = torch.split(noise_pred, latent_channels, dim=1) + else: + model_output = noise_pred + + # compute previous image: x_t -> x_t-1 + latent_model_input = self.scheduler.step(model_output, t, latent_model_input).prev_sample + + if guidance_scale > 1: + latents, _ = latent_model_input.chunk(2, dim=0) + else: + latents = latent_model_input + + latents = 1 / self.vae.config.scaling_factor * latents + samples = self.vae.decode(latents).sample + + samples = (samples / 2 + 0.5).clamp(0, 1) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + samples = samples.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + samples = self.numpy_to_pil(samples) + + if not return_dict: + return (samples,) + + return ImagePipelineOutput(images=samples) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/free_init_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/free_init_utils.py new file mode 100644 index 000000000..50c28cc69 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/free_init_utils.py @@ -0,0 +1,184 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Tuple, Union + +import torch +import torch.fft as fft + +from ..utils.torch_utils import randn_tensor + + +class FreeInitMixin: + r"""Mixin class for FreeInit.""" + + def enable_free_init( + self, + num_iters: int = 3, + use_fast_sampling: bool = False, + method: str = "butterworth", + order: int = 4, + spatial_stop_frequency: float = 0.25, + temporal_stop_frequency: float = 0.25, + ): + """Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537. + + This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit). + + Args: + num_iters (`int`, *optional*, defaults to `3`): + Number of FreeInit noise re-initialization iterations. + use_fast_sampling (`bool`, *optional*, defaults to `False`): + Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables + the "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`. + method (`str`, *optional*, defaults to `butterworth`): + Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the + FreeInit low pass filter. + order (`int`, *optional*, defaults to `4`): + Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour + whereas lower values lead to `gaussian` method behaviour. + spatial_stop_frequency (`float`, *optional*, defaults to `0.25`): + Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in + the original implementation. + temporal_stop_frequency (`float`, *optional*, defaults to `0.25`): + Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in + the original implementation. + """ + self._free_init_num_iters = num_iters + self._free_init_use_fast_sampling = use_fast_sampling + self._free_init_method = method + self._free_init_order = order + self._free_init_spatial_stop_frequency = spatial_stop_frequency + self._free_init_temporal_stop_frequency = temporal_stop_frequency + + def disable_free_init(self): + """Disables the FreeInit mechanism if enabled.""" + self._free_init_num_iters = None + + @property + def free_init_enabled(self): + return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None + + def _get_free_init_freq_filter( + self, + shape: Tuple[int, ...], + device: Union[str, torch.dtype], + filter_type: str, + order: float, + spatial_stop_frequency: float, + temporal_stop_frequency: float, + ) -> torch.Tensor: + r"""Returns the FreeInit filter based on filter type and other input conditions.""" + + time, height, width = shape[-3], shape[-2], shape[-1] + mask = torch.zeros(shape) + + if spatial_stop_frequency == 0 or temporal_stop_frequency == 0: + return mask + + if filter_type == "butterworth": + + def retrieve_mask(x): + return 1 / (1 + (x / spatial_stop_frequency**2) ** order) + elif filter_type == "gaussian": + + def retrieve_mask(x): + return math.exp(-1 / (2 * spatial_stop_frequency**2) * x) + elif filter_type == "ideal": + + def retrieve_mask(x): + return 1 if x <= spatial_stop_frequency * 2 else 0 + else: + raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal") + + for t in range(time): + for h in range(height): + for w in range(width): + d_square = ( + ((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / time - 1)) ** 2 + + (2 * h / height - 1) ** 2 + + (2 * w / width - 1) ** 2 + ) + mask[..., t, h, w] = retrieve_mask(d_square) + + return mask.to(device) + + def _apply_freq_filter(self, x: torch.Tensor, noise: torch.Tensor, low_pass_filter: torch.Tensor) -> torch.Tensor: + r"""Noise reinitialization.""" + # FFT + x_freq = fft.fftn(x, dim=(-3, -2, -1)) + x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1)) + noise_freq = fft.fftn(noise, dim=(-3, -2, -1)) + noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1)) + + # frequency mix + high_pass_filter = 1 - low_pass_filter + x_freq_low = x_freq * low_pass_filter + noise_freq_high = noise_freq * high_pass_filter + x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain + + # IFFT + x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1)) + x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real + + return x_mixed + + def _apply_free_init( + self, + latents: torch.Tensor, + free_init_iteration: int, + num_inference_steps: int, + device: torch.device, + dtype: torch.dtype, + generator: torch.Generator, + ): + if free_init_iteration == 0: + self._free_init_initial_noise = latents.detach().clone() + return latents, self.scheduler.timesteps + + latent_shape = latents.shape + + free_init_filter_shape = (1, *latent_shape[1:]) + free_init_freq_filter = self._get_free_init_freq_filter( + shape=free_init_filter_shape, + device=device, + filter_type=self._free_init_method, + order=self._free_init_order, + spatial_stop_frequency=self._free_init_spatial_stop_frequency, + temporal_stop_frequency=self._free_init_temporal_stop_frequency, + ) + + current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 + diffuse_timesteps = torch.full((latent_shape[0],), current_diffuse_timestep).long() + + z_t = self.scheduler.add_noise( + original_samples=latents, noise=self._free_init_initial_noise, timesteps=diffuse_timesteps.to(device) + ).to(dtype=torch.float32) + + z_rand = randn_tensor( + shape=latent_shape, + generator=generator, + device=device, + dtype=torch.float32, + ) + latents = self._apply_freq_filter(z_t, z_rand, low_pass_filter=free_init_freq_filter) + latents = latents.to(dtype) + + # Coarse-to-Fine Sampling for faster inference (can lead to lower quality) + if self._free_init_use_fast_sampling: + num_inference_steps = int(num_inference_steps / self._free_init_num_iters * (free_init_iteration + 1)) + self.scheduler.set_timesteps(num_inference_steps, device=device) + + return latents, self.scheduler.timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/__init__.py new file mode 100644 index 000000000..b24a7e4ce --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/__init__.py @@ -0,0 +1,46 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_i2vgen_xl"] = ["I2VGenXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_i2vgen_xl import I2VGenXLPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py new file mode 100644 index 000000000..cb6f3e300 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py @@ -0,0 +1,798 @@ +# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import AutoencoderKL +from ...models.unets.unet_i2vgen_xl import I2VGenXLUNet +from ...schedulers import DDIMScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import I2VGenXLPipeline + >>> from diffusers.utils import export_to_gif, load_image + + >>> pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16") + >>> pipeline.enable_model_cpu_offload() + + >>> image_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png" + >>> image = load_image(image_url).convert("RGB") + + >>> prompt = "Papers were floating in the air on a table in the library" + >>> negative_prompt = "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + >>> generator = torch.manual_seed(8888) + + >>> frames = pipeline( + ... prompt=prompt, + ... image=image, + ... num_inference_steps=50, + ... negative_prompt=negative_prompt, + ... guidance_scale=9.0, + ... generator=generator + ... ).frames[0] + >>> video_path = export_to_gif(frames, "i2v.gif") + ``` +""" + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +@dataclass +class I2VGenXLPipelineOutput(BaseOutput): + r""" + Output class for image-to-video pipeline. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] + + +class I2VGenXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, +): + r""" + Pipeline for image-to-video generation as proposed in [I2VGenXL](https://i2vgen-xl.github.io/). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`I2VGenXLUNet`]): + A [`I2VGenXLUNet`] to denoise the encoded video latents. + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + image_encoder: CLIPVisionModelWithProjection, + feature_extractor: CLIPImageProcessor, + unet: I2VGenXLUNet, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + # `do_resize=False` as we do custom resizing. + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=False) + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + def encode_prompt( + self, + prompt, + device, + num_videos_per_prompt, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_videos_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if self.do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + # Apply clip_skip to negative prompt embeds + if clip_skip is None: + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + else: + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + negative_prompt_embeds = negative_prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + negative_prompt_embeds = self.text_encoder.text_model.final_layer_norm(negative_prompt_embeds) + + if self.do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds + + def _encode_image(self, image, device, num_videos_per_prompt): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.image_processor.pil_to_numpy(image) + image = self.image_processor.numpy_to_pt(image) + + # Normalize the image with CLIP training stats. + image = self.feature_extractor( + images=image, + do_normalize=True, + do_center_crop=False, + do_resize=False, + do_rescale=False, + return_tensors="pt", + ).pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + if self.do_classifier_free_guidance: + negative_image_embeddings = torch.zeros_like(image_embeddings) + image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) + + return image_embeddings + + def decode_latents(self, latents, decode_chunk_size=None): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + if decode_chunk_size is not None: + frames = [] + for i in range(0, latents.shape[0], decode_chunk_size): + frame = self.vae.decode(latents[i : i + decode_chunk_size]).sample + frames.append(frame) + image = torch.cat(frames, dim=0) + else: + image = self.vae.decode(latents).sample + + decode_shape = (batch_size, num_frames, -1) + image.shape[2:] + video = image[None, :].reshape(decode_shape).permute(0, 2, 1, 3, 4) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + def prepare_image_latents( + self, + image, + device, + num_frames, + num_videos_per_prompt, + ): + image = image.to(device=device) + image_latents = self.vae.encode(image).latent_dist.sample() + image_latents = image_latents * self.vae.config.scaling_factor + + # Add frames dimension to image latents + image_latents = image_latents.unsqueeze(2) + + # Append a position mask for each subsequent frame + # after the intial image latent frame + frame_position_mask = [] + for frame_idx in range(num_frames - 1): + scale = (frame_idx + 1) / (num_frames - 1) + frame_position_mask.append(torch.ones_like(image_latents[:, :, :1]) * scale) + if frame_position_mask: + frame_position_mask = torch.cat(frame_position_mask, dim=2) + image_latents = torch.cat([image_latents, frame_position_mask], dim=2) + + # duplicate image_latents for each generation per prompt, using mps friendly method + image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1, 1) + + if self.do_classifier_free_guidance: + image_latents = torch.cat([image_latents] * 2) + + return image_latents + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = 704, + width: Optional[int] = 1280, + target_fps: Optional[int] = 16, + num_frames: int = 16, + num_inference_steps: int = 50, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + num_videos_per_prompt: Optional[int] = 1, + decode_chunk_size: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = 1, + ): + r""" + The call function to the pipeline for image-to-video generation with [`I2VGenXLPipeline`]. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): + Image or images to guide image generation. If you provide a tensor, it needs to be compatible with + [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + target_fps (`int`, *optional*): + Frames per second. The rate at which the generated images shall be exported to a video after generation. This is also used as a "micro-condition" while generation. + num_frames (`int`, *optional*): + The number of video frames to generate. + num_inference_steps (`int`, *optional*): + The number of denoising steps. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + num_videos_per_prompt (`int`, *optional*): + The number of images to generate per prompt. + decode_chunk_size (`int`, *optional*): + The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency + between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once + for maximal quality. Reduce `decode_chunk_size` to reduce memory usage. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, image, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + self._guidance_scale = guidance_scale + + # 3.1 Encode input text prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 3.2 Encode image prompt + # 3.2.1 Image encodings. + # https://github.com/ali-vilab/i2vgen-xl/blob/2539c9262ff8a2a22fa9daecbfd13f0a2dbc32d0/tools/inferences/inference_i2vgen_entrance.py#L114 + cropped_image = _center_crop_wide(image, (width, width)) + cropped_image = _resize_bilinear( + cropped_image, (self.feature_extractor.crop_size["width"], self.feature_extractor.crop_size["height"]) + ) + image_embeddings = self._encode_image(cropped_image, device, num_videos_per_prompt) + + # 3.2.2 Image latents. + resized_image = _center_crop_wide(image, (width, height)) + image = self.image_processor.preprocess(resized_image).to(device=device, dtype=image_embeddings.dtype) + image_latents = self.prepare_image_latents( + image, + device=device, + num_frames=num_frames, + num_videos_per_prompt=num_videos_per_prompt, + ) + + # 3.3 Prepare additional conditions for the UNet. + if self.do_classifier_free_guidance: + fps_tensor = torch.tensor([target_fps, target_fps]).to(device) + else: + fps_tensor = torch.tensor([target_fps]).to(device) + fps_tensor = fps_tensor.repeat(batch_size * num_videos_per_prompt, 1).ravel() + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + fps=fps_tensor, + image_latents=image_latents, + image_embeddings=image_embeddings, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + batch_size, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(batch_size, frames, channel, width, height).permute(0, 2, 1, 3, 4) + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 8. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size=decode_chunk_size) + video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) + + # 9. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return I2VGenXLPipelineOutput(frames=video) + + +# The following utilities are taken and adapted from +# https://github.com/ali-vilab/i2vgen-xl/blob/main/utils/transforms.py. + + +def _convert_pt_to_pil(image: Union[torch.Tensor, List[torch.Tensor]]): + if isinstance(image, list) and isinstance(image[0], torch.Tensor): + image = torch.cat(image, 0) + + if isinstance(image, torch.Tensor): + if image.ndim == 3: + image = image.unsqueeze(0) + + image_numpy = VaeImageProcessor.pt_to_numpy(image) + image_pil = VaeImageProcessor.numpy_to_pil(image_numpy) + image = image_pil + + return image + + +def _resize_bilinear( + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int] +): + # First convert the images to PIL in case they are float tensors (only relevant for tests now). + image = _convert_pt_to_pil(image) + + if isinstance(image, list): + image = [u.resize(resolution, PIL.Image.BILINEAR) for u in image] + else: + image = image.resize(resolution, PIL.Image.BILINEAR) + return image + + +def _center_crop_wide( + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int] +): + # First convert the images to PIL in case they are float tensors (only relevant for tests now). + image = _convert_pt_to_pil(image) + + if isinstance(image, list): + scale = min(image[0].size[0] / resolution[0], image[0].size[1] / resolution[1]) + image = [u.resize((round(u.width // scale), round(u.height // scale)), resample=PIL.Image.BOX) for u in image] + + # center crop + x1 = (image[0].width - resolution[0]) // 2 + y1 = (image[0].height - resolution[1]) // 2 + image = [u.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) for u in image] + return image + else: + scale = min(image.size[0] / resolution[0], image.size[1] / resolution[1]) + image = image.resize((round(image.width // scale), round(image.height // scale)), resample=PIL.Image.BOX) + x1 = (image.width - resolution[0]) // 2 + y1 = (image.height - resolution[1]) // 2 + image = image.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) + return image diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/__init__.py new file mode 100644 index 000000000..606f7b378 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/__init__.py @@ -0,0 +1,66 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky"] = ["KandinskyPipeline"] + _import_structure["pipeline_kandinsky_combined"] = [ + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyInpaintCombinedPipeline", + ] + _import_structure["pipeline_kandinsky_img2img"] = ["KandinskyImg2ImgPipeline"] + _import_structure["pipeline_kandinsky_inpaint"] = ["KandinskyInpaintPipeline"] + _import_structure["pipeline_kandinsky_prior"] = ["KandinskyPriorPipeline", "KandinskyPriorPipelineOutput"] + _import_structure["text_encoder"] = ["MultilingualCLIP"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_kandinsky import KandinskyPipeline + from .pipeline_kandinsky_combined import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyInpaintCombinedPipeline, + ) + from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline + from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline + from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput + from .text_encoder import MultilingualCLIP + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py new file mode 100644 index 000000000..34b5a47c2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -0,0 +1,407 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch +from transformers import ( + XLMRobertaTokenizer, +) + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler, DDPMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> negative_image_emb = out.negative_image_embeds + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") + >>> pipe.to("cuda") + + >>> image = pipe( + ... prompt, + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +class KandinskyPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=77, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.unet.config.in_channels + + height, width = get_new_h_w(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py new file mode 100644 index 000000000..da5ff52ed --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -0,0 +1,814 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, + XLMRobertaTokenizer, +) + +from ...models import PriorTransformer, UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler +from ...utils import ( + replace_example_docstring, +) +from ..pipeline_utils import DiffusionPipeline +from .pipeline_kandinsky import KandinskyPipeline +from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline +from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline +from .pipeline_kandinsky_prior import KandinskyPriorPipeline +from .text_encoder import MultilingualCLIP + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] + ``` +""" + + +class KandinskyCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder" + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + outputs = self.decoder_pipe( + prompt=prompt, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->" "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyImg2ImgPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + strength: float = 0.3, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + strength=strength, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyInpaintCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyInpaintPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + mask_image=mask_image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py new file mode 100644 index 000000000..4d091e7d7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py @@ -0,0 +1,500 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image +from transformers import ( + XLMRobertaTokenizer, +) + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "A red cartoon frog, 4k" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyImg2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/frog.png" + ... ) + + >>> image = pipe( + ... prompt, + ... image=init_image, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... strength=0.2, + ... ).images + + >>> image[0].save("red_frog.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ image encoder and decoder + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + movq: VQModel, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + + shape = latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + latents = self.add_noise(latents, noise, latent_timestep) + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + # add_noise method to overwrite the one in schedule because it use a different beta schedule for adding noise vs sampling + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32) + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + + return noisy_samples + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + image_embeds: torch.FloatTensor, + negative_image_embeds: torch.FloatTensor, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + strength: float = 0.3, + guidance_scale: float = 7.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + # 1. Define call parameters + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + + # 2. get text and image embeddings + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + # 3. pre-processing initial image + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=prompt_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + + timesteps_tensor, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + + # the formular to calculate timestep for add_noise is taken from the original kandinsky repo + latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2 + + latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device) + + num_channels_latents = self.unet.config.in_channels + + height, width = get_new_h_w(height, width, self.movq_scale_factor) + + # 5. Create initial latent + latents = self.prepare_latents( + latents, + latent_timestep, + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + self.scheduler, + ) + + # 6. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 7. post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py new file mode 100644 index 000000000..d8d9e96e6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -0,0 +1,635 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from packaging import version +from PIL import Image +from transformers import ( + XLMRobertaTokenizer, +) + +from ... import __version__ +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + >>> import numpy as np + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "a hat" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyInpaintPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 + + >>> out = pipe( + ... prompt, + ... image=init_image, + ... mask_image=mask, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ) + + >>> image = out.images[0] + >>> image.save("cat_with_hat.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +def prepare_mask(masks): + prepared_masks = [] + for mask in masks: + old_mask = deepcopy(mask) + for i in range(mask.shape[1]): + for j in range(mask.shape[2]): + if old_mask[0][i][j] == 1: + continue + if i != 0: + mask[:, i - 1, j] = 0 + if j != 0: + mask[:, i, j - 1] = 0 + if i != 0 and j != 0: + mask[:, i - 1, j - 1] = 0 + if i != mask.shape[1] - 1: + mask[:, i + 1, j] = 0 + if j != mask.shape[2] - 1: + mask[:, i, j + 1] = 0 + if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: + mask[:, i + 1, j + 1] = 0 + prepared_masks.append(mask) + return torch.stack(prepared_masks, dim=0) + + +def prepare_mask_and_masked_image(image, mask, height, width): + r""" + Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will + be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for + the ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + mask = 1 - mask + + return mask, image + + +class KandinskyInpaintPipeline(DiffusionPipeline): + """ + Pipeline for text-guided image inpainting using Kandinsky2.1 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ image encoder and decoder + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + movq: VQModel, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + movq=movq, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], + image_embeds: torch.FloatTensor, + negative_image_embeds: torch.FloatTensor, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image` or `np.ndarray`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + mask_image (`PIL.Image.Image`,`torch.FloatTensor` or `np.ndarray`): + `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the + image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the + expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL + image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it + will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected + shape is `(H, W)`. + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.23.0.dev0" + ): + logger.warning( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0" + ) + self._warn_has_been_called = True + + # Define call parameters + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + # preprocess image and mask + mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) + + image = image.to(dtype=prompt_embeds.dtype, device=device) + image = self.movq.encode(image)["latents"] + + mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) + + image_shape = tuple(image.shape[-2:]) + mask_image = F.interpolate( + mask_image, + image_shape, + mode="nearest", + ) + mask_image = prepare_mask(mask_image) + masked_image = image * mask_image + + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + mask_image = mask_image.repeat(2, 1, 1, 1) + masked_image = masked_image.repeat(2, 1, 1, 1) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.movq.config.latent_channels + + # get h, w for latents + sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, sample_height, sample_width), + text_encoder_hidden_states.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # Check that sizes of mask, masked image and latents match with expected + num_channels_mask = mask_image.shape[1] + num_channels_masked_image = masked_image.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py new file mode 100644 index 000000000..0d9f54349 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -0,0 +1,547 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior") + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> negative_image_emb = out.negative_image_embeds + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") + >>> pipe.to("cuda") + + >>> image = pipe( + ... prompt, + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPriorPipeline, KandinskyPipeline + >>> from diffusers.utils import load_image + >>> import PIL + + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights) + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + >>> pipe.to("cuda") + + >>> image = pipe( + ... "", + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=150, + ... ).images[0] + + >>> image.save("starry_cat.png") + ``` +""" + + +@dataclass +class KandinskyPriorPipelineOutput(BaseOutput): + """ + Output class for KandinskyPriorPipeline. + + Args: + image_embeds (`torch.FloatTensor`) + clip image embeddings for text prompt + negative_image_embeds (`List[PIL.Image.Image]` or `np.ndarray`) + clip image embeddings for unconditional tokens + """ + + image_embeds: Union[torch.FloatTensor, np.ndarray] + negative_image_embeds: Union[torch.FloatTensor, np.ndarray] + + +class KandinskyPriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _exclude_from_cpu_offload = ["prior"] + model_cpu_offload_seq = "text_encoder->prior" + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + if isinstance(cond, PIL.Image.Image): + cond = ( + self.image_processor(cond, return_tensors="pt") + .pixel_values[0] + .unsqueeze(0) + .to(dtype=self.image_encoder.dtype, device=device) + ) + + image_emb = self.image_encoder(cond)["image_embeds"] + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True) + + out_zero = self( + negative_prompt, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ) + zero_image_emb = out_zero.negative_image_embeds if negative_prompt == "" else out_zero.image_embeds + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + prior_timesteps_tensor = self.scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == prior_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = prior_timesteps_tensor[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + + self.maybe_free_model_hooks() + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.prior_hook.offload() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/text_encoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/text_encoder.py new file mode 100644 index 000000000..caa0029f0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky/text_encoder.py @@ -0,0 +1,27 @@ +import torch +from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel + + +class MCLIPConfig(XLMRobertaConfig): + model_type = "M-CLIP" + + def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): + self.transformerDimensions = transformerDimSize + self.numDims = imageDimSize + super().__init__(**kwargs) + + +class MultilingualCLIP(PreTrainedModel): + config_class = MCLIPConfig + + def __init__(self, config, *args, **kwargs): + super().__init__(config, *args, **kwargs) + self.transformer = XLMRobertaModel(config) + self.LinearTransformation = torch.nn.Linear( + in_features=config.transformerDimensions, out_features=config.numDims + ) + + def forward(self, input_ids, attention_mask): + embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] + embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] + return self.LinearTransformation(embs2), embs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/__init__.py new file mode 100644 index 000000000..67e97f161 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/__init__.py @@ -0,0 +1,70 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky2_2"] = ["KandinskyV22Pipeline"] + _import_structure["pipeline_kandinsky2_2_combined"] = [ + "KandinskyV22CombinedPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22InpaintCombinedPipeline", + ] + _import_structure["pipeline_kandinsky2_2_controlnet"] = ["KandinskyV22ControlnetPipeline"] + _import_structure["pipeline_kandinsky2_2_controlnet_img2img"] = ["KandinskyV22ControlnetImg2ImgPipeline"] + _import_structure["pipeline_kandinsky2_2_img2img"] = ["KandinskyV22Img2ImgPipeline"] + _import_structure["pipeline_kandinsky2_2_inpainting"] = ["KandinskyV22InpaintPipeline"] + _import_structure["pipeline_kandinsky2_2_prior"] = ["KandinskyV22PriorPipeline"] + _import_structure["pipeline_kandinsky2_2_prior_emb2emb"] = ["KandinskyV22PriorEmb2EmbPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_kandinsky2_2 import KandinskyV22Pipeline + from .pipeline_kandinsky2_2_combined import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, + ) + from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline + from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline + from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline + from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline + from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline + from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py new file mode 100644 index 000000000..4b977af0d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -0,0 +1,320 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import torch + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") + >>> pipe_prior.to("cuda") + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> zero_image_emb = out.negative_image_embeds + >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images + >>> image[0].save("cat.png") + ``` +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class KandinskyV22Pipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + device = self._execution_device + + self._guidance_scale = guidance_scale + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] * num_images_per_prompt + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_channels_latents = self.unet.config.in_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if not output_type == "latent": + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py new file mode 100644 index 000000000..65ba22cd6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -0,0 +1,851 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer, UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler, UnCLIPScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline +from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline +from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] + ``` +""" + + +class KandinskyV22CombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Pipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference of the prior pipeline. + The function is called with the following arguments: `prior_callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your prior pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference of the decoder pipeline. + The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, + step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors + as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + outputs = self.decoder_pipe( + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Img2ImgPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload() + self.decoder_pipe.enable_model_cpu_offload() + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + outputs = self.decoder_pipe( + image=image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + strength=strength, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self.maybe_free_model_hooks() + return outputs + + +class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for inpainting generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22InpaintPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_kwargs = {} + if kwargs.get("prior_callback", None) is not None: + prior_kwargs["callback"] = kwargs.pop("prior_callback") + deprecate( + "prior_callback", + "1.0.0", + "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + if kwargs.get("prior_callback_steps", None) is not None: + deprecate( + "prior_callback_steps", + "1.0.0", + "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") + + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + **prior_kwargs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + + outputs = self.decoder_pipe( + image=image, + mask_image=mask_image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + **kwargs, + ) + self.maybe_free_model_hooks() + + return outputs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py new file mode 100644 index 000000000..de87dd3c3 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -0,0 +1,320 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + logging, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import numpy as np + + >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline + >>> from transformers import pipeline + >>> from diffusers.utils import load_image + + + >>> def make_hint(image, depth_estimator): + ... image = depth_estimator(image)["depth"] + ... image = np.array(image) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... detected_map = torch.from_numpy(image).float() / 255.0 + ... hint = detected_map.permute(2, 0, 1) + ... return hint + + + >>> depth_estimator = pipeline("depth-estimation") + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior = pipe_prior.to("cuda") + + >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((768, 768)) + + >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") + + >>> prompt = "A robot, 4k photo" + >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + + >>> generator = torch.Generator(device="cuda").manual_seed(43) + + >>> image_emb, zero_image_emb = pipe_prior( + ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator + ... ).to_tuple() + + >>> images = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... hint=hint, + ... num_inference_steps=50, + ... generator=generator, + ... height=768, + ... width=768, + ... ).images + + >>> images[0].save("robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class KandinskyV22ControlnetPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + hint: torch.FloatTensor, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + hint (`torch.FloatTensor`): + The controlnet condition. + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + do_classifier_free_guidance = guidance_scale > 1.0 + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + if isinstance(hint, list): + hint = torch.cat(hint, dim=0) + + batch_size = image_embeds.shape[0] * num_images_per_prompt + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + hint = hint.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.movq.config.latent_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py new file mode 100644 index 000000000..c3ac7bcf6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py @@ -0,0 +1,381 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + logging, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import numpy as np + + >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline + >>> from transformers import pipeline + >>> from diffusers.utils import load_image + + + >>> def make_hint(image, depth_estimator): + ... image = depth_estimator(image)["depth"] + ... image = np.array(image) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... detected_map = torch.from_numpy(image).float() / 255.0 + ... hint = detected_map.permute(2, 0, 1) + ... return hint + + + >>> depth_estimator = pipeline("depth-estimation") + + >>> pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior = pipe_prior.to("cuda") + + >>> pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((768, 768)) + + + >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") + + >>> prompt = "A robot, 4k photo" + >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + + >>> generator = torch.Generator(device="cuda").manual_seed(43) + + >>> img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator) + >>> negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) + + >>> images = pipe( + ... image=img, + ... strength=0.5, + ... image_embeds=img_emb.image_embeds, + ... negative_image_embeds=negative_emb.image_embeds, + ... hint=hint, + ... num_inference_steps=50, + ... generator=generator, + ... height=768, + ... width=768, + ... ).images + + >>> images[0].save("robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyV22ControlnetImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2_img2img.KandinskyV22Img2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + hint: torch.FloatTensor, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + hint (`torch.FloatTensor`): + The controlnet condition. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + do_classifier_free_guidance = guidance_scale > 1.0 + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + if isinstance(hint, list): + hint = torch.cat(hint, dim=0) + + batch_size = image_embeds.shape[0] + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + hint = hint.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) + + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=image_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator + ) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py new file mode 100644 index 000000000..3fdae934a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py @@ -0,0 +1,399 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "A red cartoon frog, 4k" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/frog.png" + ... ) + + >>> image = pipe( + ... image=init_image, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... strength=0.2, + ... ).images + + >>> image[0].save("red_frog.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyV22Img2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + device = self._execution_device + + self._guidance_scale = guidance_scale + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=image_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator + ) + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil` ,`np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py new file mode 100644 index 000000000..2fb8731f8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -0,0 +1,556 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from packaging import version +from PIL import Image + +from ... import __version__ +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + >>> import numpy as np + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "a hat" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyV22InpaintPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 + + >>> out = pipe( + ... image=init_image, + ... mask_image=mask, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ) + + >>> image = out.images[0] + >>> image.save("cat_with_hat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask +def prepare_mask(masks): + prepared_masks = [] + for mask in masks: + old_mask = deepcopy(mask) + for i in range(mask.shape[1]): + for j in range(mask.shape[2]): + if old_mask[0][i][j] == 1: + continue + if i != 0: + mask[:, i - 1, j] = 0 + if j != 0: + mask[:, i, j - 1] = 0 + if i != 0 and j != 0: + mask[:, i - 1, j - 1] = 0 + if i != mask.shape[1] - 1: + mask[:, i + 1, j] = 0 + if j != mask.shape[2] - 1: + mask[:, i, j + 1] = 0 + if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: + mask[:, i + 1, j + 1] = 0 + prepared_masks.append(mask) + return torch.stack(prepared_masks, dim=0) + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask_and_masked_image +def prepare_mask_and_masked_image(image, mask, height, width): + r""" + Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will + be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for + the ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + mask = 1 - mask + + return mask, image + + +class KandinskyV22InpaintPipeline(DiffusionPipeline): + """ + Pipeline for text-guided image inpainting using Kandinsky2.1 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds", "masked_image", "mask_image"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + image: Union[torch.FloatTensor, PIL.Image.Image], + mask_image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], + negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.23.0.dev0" + ): + logger.warning( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0" + ) + self._warn_has_been_called = True + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + self._guidance_scale = guidance_scale + + device = self._execution_device + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] * num_images_per_prompt + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # preprocess image and mask + mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) + + image = image.to(dtype=image_embeds.dtype, device=device) + image = self.movq.encode(image)["latents"] + + mask_image = mask_image.to(dtype=image_embeds.dtype, device=device) + + image_shape = tuple(image.shape[-2:]) + mask_image = F.interpolate( + mask_image, + image_shape, + mode="nearest", + ) + mask_image = prepare_mask(mask_image) + masked_image = image * mask_image + + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) + if self.do_classifier_free_guidance: + mask_image = mask_image.repeat(2, 1, 1, 1) + masked_image = masked_image.repeat(2, 1, 1, 1) + + num_channels_latents = self.movq.config.latent_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + noise = torch.clone(latents) + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + init_latents_proper = image[:1] + init_mask = mask_image[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = init_mask * init_latents_proper + (1 - init_mask) * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + masked_image = callback_outputs.pop("masked_image", masked_image) + mask_image = callback_outputs.pop("mask_image", mask_image) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py new file mode 100644 index 000000000..83427c68f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -0,0 +1,549 @@ +from typing import Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..kandinsky import KandinskyPriorPipelineOutput +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") + >>> pipe_prior.to("cuda") + >>> prompt = "red cat, 4k photo" + >>> image_emb, negative_image_emb = pipe_prior(prompt).to_tuple() + + >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline + >>> from diffusers.utils import load_image + >>> import PIL + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> out = pipe_prior.interpolate(images_texts, weights) + >>> pipe = KandinskyV22Pipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=out.image_embeds, + ... negative_image_embeds=out.negative_image_embeds, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images[0] + >>> image.save("starry_cat.png") + ``` +""" + + +class KandinskyV22PriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->prior" + _exclude_from_cpu_offload = ["prior"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "text_encoder_hidden_states", "text_mask"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds.unsqueeze(0) + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + if isinstance(cond, PIL.Image.Image): + cond = ( + self.image_processor(cond, return_tensors="pt") + .pixel_values[0] + .unsqueeze(0) + .to(dtype=self.image_encoder.dtype, device=device) + ) + + image_emb = self.image_encoder(cond)["image_embeds"].repeat(num_images_per_prompt, 1).unsqueeze(0) + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0) + + out_zero = self( + negative_prompt, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ) + zero_image_emb = out_zero.negative_image_embeds if negative_prompt == "" else out_zero.image_embeds + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline.get_zero_embed + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", # pt only + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + self._guidance_scale = guidance_scale + + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt + ) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if self.do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + self.guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == timesteps.shape[0]: + prev_timestep = None + else: + prev_timestep = timesteps[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + text_mask = callback_outputs.pop("text_mask", text_mask) + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py new file mode 100644 index 000000000..bef70821c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -0,0 +1,563 @@ +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..kandinsky import KandinskyPriorPipelineOutput +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + >>> image_emb, nagative_image_emb = pipe_prior(prompt, image=img, strength=0.2).to_tuple() + + >>> pipe = KandinskyPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder, torch_dtype=torch.float16" + ... ) + >>> pipe.to("cuda") + + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22Pipeline + >>> from diffusers.utils import load_image + >>> import PIL + + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights) + + >>> pipe = KandinskyV22Pipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=150, + ... ).images[0] + + >>> image.save("starry_cat.png") + ``` +""" + + +class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->prior" + _exclude_from_cpu_offload = ["prior"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds.unsqueeze(0) + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + image_emb = self._encode_image( + cond, device=device, num_images_per_prompt=num_images_per_prompt + ).unsqueeze(0) + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0) + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=torch.randn_like(image_emb)) + + def _encode_image( + self, + image: Union[torch.Tensor, List[PIL.Image.Image]], + device, + num_images_per_prompt, + ): + if not isinstance(image, torch.Tensor): + image = self.image_processor(image, return_tensors="pt").pixel_values.to( + dtype=self.image_encoder.dtype, device=device + ) + + image_emb = self.image_encoder(image)["image_embeds"] # B, D + image_emb = image_emb.repeat_interleave(num_images_per_prompt, dim=0) + image_emb.to(device=device) + + return image_emb + + def prepare_latents(self, emb, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + emb = emb.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + init_latents = emb + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline.get_zero_embed + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], + strength: float = 0.3, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", # pt only + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `emb`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. + emb (`torch.FloatTensor`): + The image embedding. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if not isinstance(image, List): + image = [image] + + if isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + + if isinstance(image, torch.Tensor) and image.ndim == 2: + # allow user to pass image_embeds directly + image_embeds = image.repeat_interleave(num_images_per_prompt, dim=0) + elif isinstance(image, torch.Tensor) and image.ndim != 4: + raise ValueError( + f" if pass `image` as pytorch tensor, or a list of pytorch tensor, please make sure each tensor has shape [batch_size, channels, height, width], currently {image[0].unsqueeze(0).shape}" + ) + else: + image_embeds = self._encode_image(image, device, num_images_per_prompt) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + + latents = image_embeds + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size) + latents = self.prepare_latents( + latents, + latent_timestep, + batch_size // num_images_per_prompt, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == timesteps.shape[0]: + prev_timestep = None + else: + prev_timestep = timesteps[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/__init__.py new file mode 100644 index 000000000..e8a306314 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky3"] = ["Kandinsky3Pipeline"] + _import_structure["pipeline_kandinsky3_img2img"] = ["Kandinsky3Img2ImgPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_kandinsky3 import Kandinsky3Pipeline + from .pipeline_kandinsky3_img2img import Kandinsky3Img2ImgPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py new file mode 100644 index 000000000..4fe8c54eb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +import argparse +import fnmatch + +from safetensors.torch import load_file + +from diffusers import Kandinsky3UNet + + +MAPPING = { + "to_time_embed.1": "time_embedding.linear_1", + "to_time_embed.3": "time_embedding.linear_2", + "in_layer": "conv_in", + "out_layer.0": "conv_norm_out", + "out_layer.2": "conv_out", + "down_samples": "down_blocks", + "up_samples": "up_blocks", + "projection_lin": "encoder_hid_proj.projection_linear", + "projection_ln": "encoder_hid_proj.projection_norm", + "feature_pooling": "add_time_condition", + "to_query": "to_q", + "to_key": "to_k", + "to_value": "to_v", + "output_layer": "to_out.0", + "self_attention_block": "attentions.0", +} + +DYNAMIC_MAP = { + "resnet_attn_blocks.*.0": "resnets_in.*", + "resnet_attn_blocks.*.1": ("attentions.*", 1), + "resnet_attn_blocks.*.2": "resnets_out.*", +} +# MAPPING = {} + + +def convert_state_dict(unet_state_dict): + """ + Convert the state dict of a U-Net model to match the key format expected by Kandinsky3UNet model. + Args: + unet_model (torch.nn.Module): The original U-Net model. + unet_kandi3_model (torch.nn.Module): The Kandinsky3UNet model to match keys with. + + Returns: + OrderedDict: The converted state dictionary. + """ + # Example of renaming logic (this will vary based on your model's architecture) + converted_state_dict = {} + for key in unet_state_dict: + new_key = key + for pattern, new_pattern in MAPPING.items(): + new_key = new_key.replace(pattern, new_pattern) + + for dyn_pattern, dyn_new_pattern in DYNAMIC_MAP.items(): + has_matched = False + if fnmatch.fnmatch(new_key, f"*.{dyn_pattern}.*") and not has_matched: + star = int(new_key.split(dyn_pattern.split(".")[0])[-1].split(".")[1]) + + if isinstance(dyn_new_pattern, tuple): + new_star = star + dyn_new_pattern[-1] + dyn_new_pattern = dyn_new_pattern[0] + else: + new_star = star + + pattern = dyn_pattern.replace("*", str(star)) + new_pattern = dyn_new_pattern.replace("*", str(new_star)) + + new_key = new_key.replace(pattern, new_pattern) + has_matched = True + + converted_state_dict[new_key] = unet_state_dict[key] + + return converted_state_dict + + +def main(model_path, output_path): + # Load your original U-Net model + unet_state_dict = load_file(model_path) + + # Initialize your Kandinsky3UNet model + config = {} + + # Convert the state dict + converted_state_dict = convert_state_dict(unet_state_dict) + + unet = Kandinsky3UNet(config) + unet.load_state_dict(converted_state_dict) + + unet.save_pretrained(output_path) + print(f"Converted model saved to {output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert U-Net PyTorch model to Kandinsky3UNet format") + parser.add_argument("--model_path", type=str, required=True, help="Path to the original U-Net PyTorch model") + parser.add_argument("--output_path", type=str, required=True, help="Path to save the converted model") + + args = parser.parse_args() + main(args.model_path, args.output_path) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py new file mode 100644 index 000000000..fcf7ddcb9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py @@ -0,0 +1,589 @@ +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import Kandinsky3UNet, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + deprecate, + is_accelerate_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForText2Image + >>> import torch + + >>> pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." + + >>> generator = torch.Generator(device="cpu").manual_seed(0) + >>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] + ``` + +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class Kandinsky3Pipeline(DiffusionPipeline, LoraLoaderMixin): + model_cpu_offload_seq = "text_encoder->unet->movq" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "negative_attention_mask", + "attention_mask", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: Kandinsky3UNet, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq + ) + + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet, self.movq]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + def process_embeds(self, embeddings, attention_mask, cut_context): + if cut_context: + embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) + max_seq_length = attention_mask.sum(-1).max() + 1 + embeddings = embeddings[:, :max_seq_length] + attention_mask = attention_mask[:, :max_seq_length] + return embeddings, attention_mask + + @torch.no_grad() + def encode_prompt( + self, + prompt, + do_classifier_free_guidance=True, + num_images_per_prompt=1, + device=None, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + _cut_context=False, + attention_mask: Optional[torch.FloatTensor] = None, + negative_attention_mask: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = 128 + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context) + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + attention_mask = attention_mask.repeat(num_images_per_prompt, 1) + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + if negative_prompt is not None: + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=128, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] + negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] + negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) + + else: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_attention_mask = torch.zeros_like(attention_mask) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + if negative_prompt_embeds.shape != prompt_embeds.shape: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + negative_attention_mask = None + return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask + + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + attention_mask=None, + negative_attention_mask=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if negative_prompt_embeds is not None and negative_attention_mask is None: + raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") + + if negative_prompt_embeds is not None and negative_attention_mask is not None: + if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: + raise ValueError( + "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" + f" {negative_attention_mask.shape}." + ) + + if prompt_embeds is not None and attention_mask is None: + raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") + + if prompt_embeds is not None and attention_mask is not None: + if prompt_embeds.shape[:2] != attention_mask.shape: + raise ValueError( + "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" + f" {attention_mask.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 3.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = 1024, + width: Optional[int] = 1024, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + negative_attention_mask: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + latents=None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 3.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + cut_context = True + device = self._execution_device + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + attention_mask, + negative_attention_mask, + ) + + self._guidance_scale = guidance_scale + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( + prompt, + self.do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + _cut_context=cut_context, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + height, width = downscale_height_and_width(height, width, 8) + + latents = self.prepare_latents( + (batch_size * num_images_per_prompt, 4, height, width), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=attention_mask, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + + noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond + # noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + attention_mask = callback_outputs.pop("attention_mask", attention_mask) + negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py new file mode 100644 index 000000000..7f4164a04 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py @@ -0,0 +1,654 @@ +import inspect +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import PIL.Image +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...loaders import LoraLoaderMixin +from ...models import Kandinsky3UNet, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + deprecate, + is_accelerate_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForImage2Image + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A painting of the inside of a subway train with tiny raccoons." + >>> image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png") + + >>> generator = torch.Generator(device="cpu").manual_seed(0) + >>> image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0] + ``` +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +def prepare_image(pil_image): + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class Kandinsky3Img2ImgPipeline(DiffusionPipeline, LoraLoaderMixin): + model_cpu_offload_seq = "text_encoder->movq->unet->movq" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "negative_attention_mask", + "attention_mask", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: Kandinsky3UNet, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def remove_all_hooks(self): + if is_accelerate_available(): + from accelerate.hooks import remove_hook_from_module + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + for model in [self.text_encoder, self.unet]: + if model is not None: + remove_hook_from_module(model, recurse=True) + + self.unet_offload_hook = None + self.text_encoder_offload_hook = None + self.final_offload_hook = None + + def _process_embeds(self, embeddings, attention_mask, cut_context): + # return embeddings, attention_mask + if cut_context: + embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) + max_seq_length = attention_mask.sum(-1).max() + 1 + embeddings = embeddings[:, :max_seq_length] + attention_mask = attention_mask[:, :max_seq_length] + return embeddings, attention_mask + + @torch.no_grad() + def encode_prompt( + self, + prompt, + do_classifier_free_guidance=True, + num_images_per_prompt=1, + device=None, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + _cut_context=False, + attention_mask: Optional[torch.FloatTensor] = None, + negative_attention_mask: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = 128 + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_embeds, attention_mask = self._process_embeds(prompt_embeds, attention_mask, _cut_context) + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + attention_mask = attention_mask.repeat(num_images_per_prompt, 1) + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + if negative_prompt is not None: + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=128, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] + negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] + negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) + + else: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_attention_mask = torch.zeros_like(attention_mask) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + if negative_prompt_embeds.shape != prompt_embeds.shape: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + negative_attention_mask = None + return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + attention_mask=None, + negative_attention_mask=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if negative_prompt_embeds is not None and negative_attention_mask is None: + raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") + + if negative_prompt_embeds is not None and negative_attention_mask is not None: + if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: + raise ValueError( + "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" + f" {negative_attention_mask.shape}." + ) + + if prompt_embeds is not None and attention_mask is None: + raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") + + if prompt_embeds is not None and attention_mask is not None: + if prompt_embeds.shape[:2] != attention_mask.shape: + raise ValueError( + "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" + f" {attention_mask.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None, + strength: float = 0.3, + num_inference_steps: int = 25, + guidance_scale: float = 3.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + negative_attention_mask: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 3.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + cut_context = True + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + attention_mask, + negative_attention_mask, + ) + + self._guidance_scale = guidance_scale + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( + prompt, + self.do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + _cut_context=cut_context, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i) for i in image], dim=0) + image = image.to(dtype=prompt_embeds.dtype, device=device) + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + # 5. Prepare latents + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=attention_mask, + )[0] + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + + noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + attention_mask = callback_outputs.pop("attention_mask", attention_mask) + negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/__init__.py new file mode 100644 index 000000000..8f79d3c47 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latent_consistency_img2img"] = ["LatentConsistencyModelImg2ImgPipeline"] + _import_structure["pipeline_latent_consistency_text2img"] = ["LatentConsistencyModelPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline + from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py new file mode 100644 index 000000000..f64854ea9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py @@ -0,0 +1,956 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LCMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForImage2Image + >>> import torch + >>> import PIL + + >>> pipe = AutoPipelineForImage2Image.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") + >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. + >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) + + >>> prompt = "High altitude snowy mountains" + >>> image = PIL.Image.open("./snowy_mountains.png") + + >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. + >>> num_inference_steps = 4 + >>> images = pipe( + ... prompt=prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=8.0 + ... ).images + + >>> images[0].save("image.png") + ``` + +""" + + +class LatentConsistencyModelImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image-to-image generation using a latent consistency model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + supports [`LCMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + requires_safety_checker (`bool`, *optional*, defaults to `True`): + Whether the pipeline requires a safety checker component. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: LCMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt: Union[str, List[str]], + strength: float, + callback_steps: int, + prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def clip_skip(self): + return self._clip_skip + + @property + def do_classifier_free_guidance(self): + return False + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 4, + strength: float = 0.8, + original_inference_steps: int = None, + timesteps: List[int] = None, + guidance_scale: float = 8.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + original_inference_steps (`int`, *optional*): + The original number of inference steps use to generate a linearly-spaced timestep schedule, from which + we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, + following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the + scheduler's `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps on the original LCM training/distillation timestep schedule are used. Must be in descending + order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + Note that the original latent consistency models paper uses a different CFG formulation where the + guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > + 0`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided + # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the + # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts. + prompt_embeds, _ = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Encode image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + original_inference_steps=original_inference_steps, + strength=strength, + ) + + # 6. Prepare latent variables + original_inference_steps = ( + original_inference_steps + if original_inference_steps is not None + else self.scheduler.config.original_inference_steps + ) + latent_timestep = timesteps[:1] + latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + bs = batch_size * num_images_per_prompt + + # 6. Get Guidance Scale Embedding + # NOTE: We use the Imagen CFG formulation that StableDiffusionPipeline uses rather than the original LCM paper + # CFG formulation, so we need to subtract 1 from the input guidance_scale. + # LCM CFG formulation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond), (cfg_scale > 0.0 using CFG) + w = torch.tensor(self.guidance_scale - 1).repeat(bs) + w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to( + device=device, dtype=latents.dtype + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. LCM Multistep Sampling Loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latents = latents.to(prompt_embeds.dtype) + + # model prediction (v-prediction, eps, x) + model_pred = self.unet( + latents, + t, + timestep_cond=w_embedding, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents, denoised = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + w_embedding = callback_outputs.pop("w_embedding", w_embedding) + denoised = callback_outputs.pop("denoised", denoised) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + denoised = denoised.to(prompt_embeds.dtype) + if not output_type == "latent": + image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = denoised + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py new file mode 100644 index 000000000..e9bacaa89 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py @@ -0,0 +1,888 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LCMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import DiffusionPipeline + >>> import torch + + >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") + >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. + >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) + + >>> prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" + + >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. + >>> num_inference_steps = 4 + >>> images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0).images + >>> images[0].save("image.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class LatentConsistencyModelPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using a latent consistency model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + supports [`LCMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + requires_safety_checker (`bool`, *optional*, defaults to `True`): + Whether the pipeline requires a safety checker component. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: LCMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed + def check_inputs( + self, + prompt: Union[str, List[str]], + height: int, + width: int, + callback_steps: int, + prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def clip_skip(self): + return self._clip_skip + + @property + def do_classifier_free_guidance(self): + return False + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 4, + original_inference_steps: int = None, + timesteps: List[int] = None, + guidance_scale: float = 8.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + original_inference_steps (`int`, *optional*): + The original number of inference steps use to generate a linearly-spaced timestep schedule, from which + we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, + following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the + scheduler's `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps on the original LCM training/distillation timestep schedule are used. Must be in descending + order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + Note that the original latent consistency models paper uses a different CFG formulation where the + guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > + 0`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided + # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the + # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts. + prompt_embeds, _ = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, original_inference_steps=original_inference_steps + ) + + # 5. Prepare latent variable + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + bs = batch_size * num_images_per_prompt + + # 6. Get Guidance Scale Embedding + # NOTE: We use the Imagen CFG formulation that StableDiffusionPipeline uses rather than the original LCM paper + # CFG formulation, so we need to subtract 1 from the input guidance_scale. + # LCM CFG formulation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond), (cfg_scale > 0.0 using CFG) + w = torch.tensor(self.guidance_scale - 1).repeat(bs) + w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to( + device=device, dtype=latents.dtype + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. LCM MultiStep Sampling Loop: + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latents = latents.to(prompt_embeds.dtype) + + # model prediction (v-prediction, eps, x) + model_pred = self.unet( + latents, + t, + timestep_cond=w_embedding, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents, denoised = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + w_embedding = callback_outputs.pop("w_embedding", w_embedding) + denoised = callback_outputs.pop("denoised", denoised) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + denoised = denoised.to(prompt_embeds.dtype) + if not output_type == "latent": + image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = denoised + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/__init__.py new file mode 100644 index 000000000..561f96fc7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latent_diffusion"] = ["LDMBertModel", "LDMTextToImagePipeline"] + _import_structure["pipeline_latent_diffusion_superresolution"] = ["LDMSuperResolutionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline + from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py new file mode 100644 index 000000000..f39cbc839 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -0,0 +1,746 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint +from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutput +from transformers.utils import logging + +from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class LDMTextToImagePipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + def __init__( + self, + vqvae: Union[VQModel, AutoencoderKL], + bert: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + unet: Union[UNet2DModel, UNet2DConditionModel], + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + ): + super().__init__() + self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 1.0, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 1.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DiffusionPipeline + + >>> # load model and scheduler + >>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> prompt = "A painting of a squirrel eating a burger" + >>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images + + >>> # save images + >>> for idx, image in enumerate(images): + ... image.save(f"squirrel-{idx}.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get unconditional embeddings for classifier free guidance + if guidance_scale != 1.0: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt" + ) + negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0] + + # get prompt text embeddings + text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt") + prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0] + + # get the initial random noise unless the user supplied it + latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor( + latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype + ) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + latents = latents.to(self._execution_device) + + self.scheduler.set_timesteps(num_inference_steps) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(self.scheduler.timesteps): + if guidance_scale == 1.0: + # guidance_scale of 1 means no guidance + latents_input = latents + context = prompt_embeds + else: + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = torch.cat([latents] * 2) + context = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # predict the noise residual + noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample + # perform guidance + if guidance_scale != 1.0: + noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample + + # scale and decode the image latents with vae + latents = 1 / self.vqvae.config.scaling_factor * latents + image = self.vqvae.decode(latents).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) + + +################################################################################ +# Code for the text transformer model +################################################################################ +""" PyTorch LDMBERT model.""" + + +logger = logging.get_logger(__name__) + +LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "ldm-bert", + # See all LDMBert models at https://huggingface.co/models?filter=ldmbert +] + + +LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json", +} + + +""" LDMBERT model configuration""" + + +class LDMBertConfig(PretrainedConfig): + model_type = "ldmbert" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=30522, + max_position_embeddings=77, + encoder_layers=32, + encoder_ffn_dim=5120, + encoder_attention_heads=8, + head_dim=64, + encoder_layerdrop=0.0, + activation_function="gelu", + d_model=1280, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + classifier_dropout=0.0, + scale_embedding=False, + use_cache=True, + pad_token_id=0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.head_dim = head_dim + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.classifier_dropout = classifier_dropout + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + + super().__init__(pad_token_id=pad_token_id, **kwargs) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert +class LDMBertAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + head_dim: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = False, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = head_dim + self.inner_dim = head_dim * num_heads + + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.out_proj = nn.Linear(self.inner_dim, embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class LDMBertEncoderLayer(nn.Module): + def __init__(self, config: LDMBertConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = LDMBertAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + head_dim=config.head_dim, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert +class LDMBertPreTrainedModel(PreTrainedModel): + config_class = LDMBertConfig + base_model_prefix = "model" + _supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (LDMBertEncoder,)): + module.gradient_checkpointing = value + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + } + return dummy_inputs + + +class LDMBertEncoder(LDMBertPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`LDMBertEncoderLayer`]. + + Args: + config: LDMBertConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: LDMBertConfig): + super().__init__(config) + + self.dropout = config.dropout + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) + self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) + self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layer_norm = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + seq_len = input_shape[1] + if position_ids is None: + position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) + embed_pos = self.embed_positions(position_ids) + + hidden_states = inputs_embeds + embed_pos + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class LDMBertModel(LDMBertPreTrainedModel): + _no_split_modules = [] + + def __init__(self, config: LDMBertConfig): + super().__init__(config) + self.model = LDMBertEncoder(config) + self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + return outputs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py new file mode 100644 index 000000000..bb72b4d4e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py @@ -0,0 +1,189 @@ +import inspect +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint + +from ...models import UNet2DModel, VQModel +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import PIL_INTERPOLATION +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +def preprocess(image): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +class LDMSuperResolutionPipeline(DiffusionPipeline): + r""" + A pipeline for image super-resolution using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`], + [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`]. + """ + + def __init__( + self, + vqvae: VQModel, + unet: UNet2DModel, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + ): + super().__init__() + self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + image: Union[torch.Tensor, PIL.Image.Image] = None, + batch_size: Optional[int] = 1, + num_inference_steps: Optional[int] = 100, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + image (`torch.Tensor` or `PIL.Image.Image`): + `Image` or tensor representing an image batch to be used as the starting point for the process. + batch_size (`int`, *optional*, defaults to 1): + Number of images to generate. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> import requests + >>> from PIL import Image + >>> from io import BytesIO + >>> from diffusers import LDMSuperResolutionPipeline + >>> import torch + + >>> # load model and scheduler + >>> pipeline = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages") + >>> pipeline = pipeline.to("cuda") + + >>> # let's download an image + >>> url = ( + ... "https://user-images.githubusercontent.com/38061659/199705896-b48e17b8-b231-47cd-a270-4ffa5a93fa3e.png" + ... ) + >>> response = requests.get(url) + >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> low_res_img = low_res_img.resize((128, 128)) + + >>> # run pipeline in inference (sample random noise and denoise) + >>> upscaled_image = pipeline(low_res_img, num_inference_steps=100, eta=1).images[0] + >>> # save image + >>> upscaled_image.save("ldm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, torch.Tensor): + batch_size = image.shape[0] + else: + raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(image)}") + + if isinstance(image, PIL.Image.Image): + image = preprocess(image) + + height, width = image.shape[-2:] + + # in_channels should be 6: 3 for latents, 3 for low resolution image + latents_shape = (batch_size, self.unet.config.in_channels // 2, height, width) + latents_dtype = next(self.unet.parameters()).dtype + + latents = randn_tensor(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) + + image = image.to(device=self.device, dtype=latents_dtype) + + # set timesteps and move to the correct device + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps_tensor = self.scheduler.timesteps + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(timesteps_tensor): + # concat latents and low resolution image in the channel dimension. + latents_input = torch.cat([latents, image], dim=1) + latents_input = self.scheduler.scale_model_input(latents_input, t) + # predict the noise residual + noise_pred = self.unet(latents_input, t).sample + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample + + # decode the image latents with the VQVAE + image = self.vqvae.decode(latents).sample + image = torch.clamp(image, -1.0, 1.0) + image = image / 2 + 0.5 + image = image.cpu().permute(0, 2, 3, 1).numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/__init__.py new file mode 100644 index 000000000..aae3b1cb1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_leditspp_stable_diffusion"] = ["LEditsPPPipelineStableDiffusion"] + _import_structure["pipeline_leditspp_stable_diffusion_xl"] = ["LEditsPPPipelineStableDiffusionXL"] + + _import_structure["pipeline_output"] = ["LEditsPPDiffusionPipelineOutput", "LEditsPPDiffusionPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_leditspp_stable_diffusion import ( + LEditsPPDiffusionPipelineOutput, + LEditsPPInversionPipelineOutput, + LEditsPPPipelineStableDiffusion, + ) + from .pipeline_leditspp_stable_diffusion_xl import LEditsPPPipelineStableDiffusionXL + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py new file mode 100644 index 000000000..a6357c4cd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py @@ -0,0 +1,1505 @@ +import inspect +import math +from itertools import repeat +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import Attention, AttnProcessor +from ...models.lora import adjust_lora_scale_text_encoder +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import LEditsPPPipelineStableDiffusion + + >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png" + >>> image = download_image(img_url) + + >>> _ = pipe.invert( + ... image = image, + ... num_inversion_steps=50, + ... skip=0.1 + ... ) + + >>> edited_image = pipe( + ... editing_prompt=["cherry blossom"], + ... edit_guidance_scale=10.0, + ... edit_threshold=0.75, + ).images[0] + ``` +""" + + +# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore +class LeditsAttentionStore: + @staticmethod + def get_empty_store(): + return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): + # attn.shape = batch_size * head_size, seq_len query, seq_len_key + if attn.shape[1] <= self.max_size: + bs = 1 + int(PnP) + editing_prompts + skip = 2 if PnP else 1 # skip PnP & unconditional + attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) + source_batch_size = int(attn.shape[1] // bs) + self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) + + def forward(self, attn, is_cross: bool, place_in_unet: str): + key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" + + self.step_store[key].append(attn) + + def between_steps(self, store_step=True): + if store_step: + if self.average: + if len(self.attention_store) == 0: + self.attention_store = self.step_store + else: + for key in self.attention_store: + for i in range(len(self.attention_store[key])): + self.attention_store[key][i] += self.step_store[key][i] + else: + if len(self.attention_store) == 0: + self.attention_store = [self.step_store] + else: + self.attention_store.append(self.step_store) + + self.cur_step += 1 + self.step_store = self.get_empty_store() + + def get_attention(self, step: int): + if self.average: + attention = { + key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store + } + else: + assert step is not None + attention = self.attention_store[step] + return attention + + def aggregate_attention( + self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int + ): + out = [[] for x in range(self.batch_size)] + if isinstance(res, int): + num_pixels = res**2 + resolution = (res, res) + else: + num_pixels = res[0] * res[1] + resolution = res[:2] + + for location in from_where: + for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + for batch, item in enumerate(bs_item): + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] + out[batch].append(cross_maps) + + out = torch.stack([torch.cat(x, dim=0) for x in out]) + # average over heads + out = out.sum(1) / out.shape[1] + return out + + def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): + self.step_store = self.get_empty_store() + self.attention_store = [] + self.cur_step = 0 + self.average = average + self.batch_size = batch_size + if max_size is None: + self.max_size = max_resolution**2 + elif max_size is not None and max_resolution is None: + self.max_size = max_size + else: + raise ValueError("Only allowed to set one of max_resolution or max_size") + + +# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing +class LeditsGaussianSmoothing: + def __init__(self, device): + kernel_size = [3, 3] + sigma = [0.5, 0.5] + + # The gaussian kernel is the product of the gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) + + self.weight = kernel.to(device) + + def __call__(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return F.conv2d(input, weight=self.weight.to(input.dtype)) + + +class LEDITSCrossAttnProcessor: + def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): + self.attnstore = attention_store + self.place_in_unet = place_in_unet + self.editing_prompts = editing_prompts + self.pnp = pnp + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask=None, + temb=None, + ): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + self.attnstore( + attention_probs, + is_cross=True, + place_in_unet=self.place_in_unet, + editing_prompts=self.editing_prompts, + PnP=self.pnp, + ) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states / attn.rescale_output_factor + return hidden_states + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class LEditsPPPipelineStableDiffusion( + DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin +): + """ + Pipeline for textual image editing using LEDits++ with Stable Diffusion. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass + documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular + device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer ([`~transformers.CLIPTokenizer`]): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically + be set to [`DPMSolverMultistepScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): + scheduler = DPMSolverMultistepScheduler.from_config( + scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 + ) + logger.warning( + "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " + "The scheduler has been changed to DPMSolverMultistepScheduler." + ) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.inversion_steps = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, eta, generator=None): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + negative_prompt=None, + editing_prompt_embeddings=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: + if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: + raise ValueError( + "`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): + # shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + + # if latents.shape != shape: + # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_unet(self, attention_store, PnP: bool = False): + attn_procs = {} + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + if "attn2" in name and place_in_unet != "mid": + attn_procs[name] = LEDITSCrossAttnProcessor( + attention_store=attention_store, + place_in_unet=place_in_unet, + pnp=PnP, + editing_prompts=self.enabled_editing_prompts, + ) + else: + attn_procs[name] = AttnProcessor() + + self.unet.set_attn_processor(attn_procs) + + def encode_prompt( + self, + device, + num_images_per_prompt, + enable_edit_guidance, + negative_prompt=None, + editing_prompt=None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + editing_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + enable_edit_guidance (`bool`): + whether to perform any editing or reconstruct the input image instead + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + editing_prompt (`str` or `List[str]`, *optional*): + Editing prompt(s) to be encoded. If not defined, one has to pass + `editing_prompt_embeds` instead. + editing_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + batch_size = self.batch_size + num_edit_tokens = None + + if negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected" + f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = negative_prompt_embeds.dtype + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + if enable_edit_guidance: + if editing_prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + # if isinstance(self, TextualInversionLoaderMixin): + # prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + + max_length = negative_prompt_embeds.shape[1] + text_inputs = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + return_length=True, + ) + + num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="longest", + return_tensors="pt", + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + editing_prompt_embeds = editing_prompt_embeds[0] + else: + editing_prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) + + editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) + + bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape + editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) + editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) + editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 0, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + user_mask: Optional[torch.FloatTensor] = None, + sem_guidance: Optional[List[torch.Tensor]] = None, + use_cross_attn_mask: bool = False, + use_intersect_mask: bool = True, + attn_store_steps: Optional[List[int]] = [], + store_averaged_over_steps: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] + method has to be called beforehand. Edits will always be performed for the last inverted image(s). + + Args: + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a + plain tuple. + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. The image is reconstructed by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. + editing_prompt_embeds (`torch.Tensor>`, *optional*): + Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should be + specified via `reverse_editing_direction`. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. + `edit_guidance_scale` is defined as `s_e` of equation 12 of + [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which guidance will not be applied. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which guidance will no longer be applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Masking threshold of guidance. Threshold should be proportional to the image region that is modified. + 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + user_mask (`torch.FloatTensor`, *optional*): + User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit + masks do not meet user preferences. + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + use_cross_attn_mask (`bool`, defaults to `False`): + Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask + is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of + [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + use_intersect_mask (`bool`, defaults to `True`): + Whether the masking term is calculated as intersection of cross-attention masks and masks derived + from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise + estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + attn_store_steps (`List[int]`, *optional*): + Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. + store_averaged_over_steps (`bool`, defaults to `True`): + Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. + If False, attention maps for each step are stores separately. Just for visualization purposes. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, + otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the + second element is a list of `bool`s denoting whether the corresponding generated image likely represents + "not-safe-for-work" (nsfw) content, according to the `safety_checker`. + """ + + if self.inversion_steps is None: + raise ValueError( + "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." + ) + + eta = self.eta + num_images_per_prompt = 1 + latents = self.init_latents + + zs = self.zs + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + if use_intersect_mask: + use_cross_attn_mask = True + + if use_cross_attn_mask: + self.smoothing = LeditsGaussianSmoothing(self.device) + + if user_mask is not None: + user_mask = user_mask.to(self.device) + + org_prompt = "" + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + negative_prompt, + editing_prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + batch_size = self.batch_size + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + self.enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeds is not None: + enable_edit_guidance = True + self.enabled_editing_prompts = editing_prompt_embeds.shape[0] + else: + self.enabled_editing_prompts = 0 + enable_edit_guidance = False + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt( + editing_prompt=editing_prompt, + device=self.device, + num_images_per_prompt=num_images_per_prompt, + enable_edit_guidance=enable_edit_guidance, + negative_prompt=negative_prompt, + editing_prompt_embeds=editing_prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if enable_edit_guidance: + text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) + self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt + else: + text_embeddings = torch.cat([uncond_embeddings]) + + # 4. Prepare timesteps + # self.scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps = self.inversion_steps + t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])} + + if use_cross_attn_mask: + self.attention_store = LeditsAttentionStore( + average=store_averaged_over_steps, + batch_size=batch_size, + max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), + max_resolution=None, + ) + self.prepare_unet(self.attention_store, PnP=False) + resolution = latents.shape[-2:] + att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + None, + None, + text_embeddings.dtype, + self.device, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + self.sem_guidance = None + self.activation_mask = None + + # 7. Denoising loop + num_warmup_steps = 0 + with self.progress_bar(total=len(timesteps)) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + + if enable_edit_guidance: + latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) + else: + latent_model_input = latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + text_embed_input = text_embeddings + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample + + noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond = noise_pred_out[0] + noise_pred_edit_concepts = noise_pred_out[1:] + + noise_guidance_edit = torch.zeros( + noise_pred_uncond.shape, + device=self.device, + dtype=noise_pred_uncond.dtype, + ) + + if sem_guidance is not None and len(sem_guidance) > i: + noise_guidance_edit += sem_guidance[i].to(self.device) + + elif enable_edit_guidance: + if self.activation_mask is None: + self.activation_mask = torch.zeros( + (len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) + ) + + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) + + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + if i < edit_warmup_steps_c: + continue + + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + + if i >= edit_cooldown_steps_c: + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + if user_mask is not None: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask + + if use_cross_attn_mask: + out = self.attention_store.aggregate_attention( + attention_maps=self.attention_store.step_store, + prompts=self.text_cross_attention_maps, + res=att_res, + from_where=["up", "down"], + is_cross=True, + select=self.text_cross_attention_maps.index(editing_prompt[c]), + ) + attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext + + # average over all tokens + if attn_map.shape[3] != num_edit_tokens[c]: + raise ValueError( + f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" + ) + + attn_map = torch.sum(attn_map, dim=3) + + # gaussian_smoothing + attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") + attn_map = self.smoothing(attn_map).squeeze(1) + + # torch.quantile function expects float32 + if attn_map.dtype == torch.float32: + tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) + else: + tmp = torch.quantile( + attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 + ).to(attn_map.dtype) + attn_mask = torch.where( + attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 + ) + + # resolution must match latent space dimension + attn_mask = F.interpolate( + attn_mask.unsqueeze(1), + noise_guidance_edit_tmp.shape[-2:], # 64,64 + ).repeat(1, 4, 1, 1) + self.activation_mask[i, c] = attn_mask.detach().cpu() + if not use_intersect_mask: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + if use_intersect_mask: + if t <= 800: + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( + 1, self.unet.config.in_channels, 1, 1 + ) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + intersect_mask = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + * attn_mask + ) + + self.activation_mask[i, c] = intersect_mask.detach().cpu() + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask + + else: + # print(f"only attention mask for step {i}") + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + elif not use_cross_attn_mask: + # calculate quantile + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + self.activation_mask[i, c] = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + .detach() + .cpu() + ) + + noise_guidance_edit_tmp = torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + + noise_guidance_edit += noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + noise_pred = noise_pred_uncond + noise_guidance_edit + + if enable_edit_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, + noise_pred_edit_concepts.mean(dim=0, keepdim=False), + guidance_rescale=self.guidance_rescale, + ) + + idx = t_to_idx[int(t)] + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs + ).prev_sample + + # step callback + if use_cross_attn_mask: + store_step = i in attn_store_steps + self.attention_store.between_steps(store_step) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + # prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + @torch.no_grad() + def invert( + self, + image: PipelineImageInput, + source_prompt: str = "", + source_guidance_scale: float = 3.5, + num_inversion_steps: int = 30, + skip: float = 0.15, + generator: Optional[torch.Generator] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + height: Optional[int] = None, + width: Optional[int] = None, + resize_mode: Optional[str] = "default", + crops_coords: Optional[Tuple[int, int, int, int]] = None, + ): + r""" + The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) + will be performed instead. + + Args: + image (`PipelineImageInput`): + Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect + ratio. + source_prompt (`str`, defaults to `""`): + Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled + if the `source_prompt` is `""`. + source_guidance_scale (`float`, defaults to `3.5`): + Strength of guidance during inversion. + num_inversion_steps (`int`, defaults to `30`): + Number of total performed inversion steps after discarding the initial `skip` steps. + skip (`float`, defaults to `0.15`): + Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values + will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + inversion deterministic. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit + within the specified width and height, and it may not maintaining the original aspect ratio. + If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, filling empty with data from image. + If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image + within the dimensions, cropping the excess. + Note that resize_mode `fill` and `crop` are only supported for PIL image input. + crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): + The crop coordinates for each image in the batch. If `None`, will not crop the image. + + Returns: + [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: + Output will contain the resized input image(s) and respective VAE reconstruction(s). + """ + # Reset attn processor, we do not want to store attn maps during inversion + self.unet.set_attn_processor(AttnProcessor()) + + self.eta = 1.0 + + self.scheduler.config.timestep_spacing = "leading" + self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) + self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] + timesteps = self.inversion_steps + + # 1. encode image + x0, resized = self.encode_image( + image, + dtype=self.text_encoder.dtype, + height=height, + width=width, + resize_mode=resize_mode, + crops_coords=crops_coords, + ) + self.batch_size = x0.shape[0] + + # autoencoder reconstruction + image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] + image_rec = self.image_processor.postprocess(image_rec, output_type="pil") + + # 2. get embeddings + do_classifier_free_guidance = source_guidance_scale > 1.0 + + lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + + uncond_embedding, text_embeddings, _ = self.encode_prompt( + num_images_per_prompt=1, + device=self.device, + negative_prompt=None, + enable_edit_guidance=do_classifier_free_guidance, + editing_prompt=source_prompt, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + # 3. find zs and xts + variance_noise_shape = (num_inversion_steps, *x0.shape) + + # intermediate latents + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) + + for t in reversed(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) + xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) + xts = torch.cat([x0.unsqueeze(0), xts], dim=0) + + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + # noise maps + zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) + + with self.progress_bar(total=len(timesteps)) as progress_bar: + for t in timesteps: + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + # 1. predict noise residual + xt = xts[idx + 1] + + noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample + + if not source_prompt == "": + noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample + noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) + + xtm1 = xts[idx] + z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) + zs[idx] = z + + # correction to avoid error accumulation + xts[idx] = xtm1_corrected + + progress_bar.update() + + self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) + zs = zs.flip(0) + self.zs = zs + + return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) + + @torch.no_grad() + def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): + image = self.image_processor.preprocess( + image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + resized = self.image_processor.postprocess(image=image, output_type="pil") + + if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: + logger.warning( + "Your input images far exceed the default resolution of the underlying diffusion model. " + "The output images may contain severe artifacts! " + "Consider down-sampling the input using the `height` and `width` parameters" + ) + image = image.to(dtype) + + x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() + x0 = x0.to(dtype) + x0 = self.vae.config.scaling_factor * x0 + return x0, resized + + +def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + # modifed so that updated xtm1 is returned as well (to avoid error accumulation) + mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + if variance > 0.0: + noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) + else: + noise = torch.tensor([0.0]).to(latents.device) + + return noise, mu_xt + (eta * variance**0.5) * noise + + +def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): + def first_order_update(model_output, sample): # timestep, prev_timestep, sample): + sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + + mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + mu_xt = scheduler.dpm_solver_first_order_update( + model_output=model_output, sample=sample, noise=torch.zeros_like(sample) + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + return noise, prev_sample + + def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): + sigma_t, sigma_s0, sigma_s1 = ( + scheduler.sigmas[scheduler.step_index + 1], + scheduler.sigmas[scheduler.step_index], + scheduler.sigmas[scheduler.step_index - 1], + ) + + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + + mu_xt = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + + return noise, prev_sample + + if scheduler.step_index is None: + scheduler._init_step_index(timestep) + + model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) + for i in range(scheduler.config.solver_order - 1): + scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] + scheduler.model_outputs[-1] = model_output + + if scheduler.lower_order_nums < 1: + noise, prev_sample = first_order_update(model_output, latents) + else: + noise, prev_sample = second_order_update(scheduler.model_outputs, latents) + + if scheduler.lower_order_nums < scheduler.config.solver_order: + scheduler.lower_order_nums += 1 + + # upon completion increase step index by one + scheduler._step_index += 1 + + return noise, prev_sample + + +def compute_noise(scheduler, *args): + if isinstance(scheduler, DDIMScheduler): + return compute_noise_ddim(scheduler, *args) + elif ( + isinstance(scheduler, DPMSolverMultistepScheduler) + and scheduler.config.algorithm_type == "sde-dpmsolver++" + and scheduler.config.solver_order == 2 + ): + return compute_noise_sde_dpm_pp_2nd(scheduler, *args) + else: + raise NotImplementedError diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py new file mode 100644 index 000000000..874a10a7c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py @@ -0,0 +1,1797 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + Attention, + AttnProcessor, + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import PIL + >>> import requests + >>> from io import BytesIO + + >>> from diffusers import LEditsPPPipelineStableDiffusionXL + + >>> pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg" + >>> image = download_image(img_url) + + >>> _ = pipe.invert( + ... image = image, + ... num_inversion_steps=50, + ... skip=0.2 + ... ) + + >>> edited_image = pipe( + ... editing_prompt=["tennis ball","tomato"], + ... reverse_editing_direction=[True,False], + ... edit_guidance_scale=[5.0,10.0], + ... edit_threshold=[0.9,0.85], + ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsAttentionStore +class LeditsAttentionStore: + @staticmethod + def get_empty_store(): + return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): + # attn.shape = batch_size * head_size, seq_len query, seq_len_key + if attn.shape[1] <= self.max_size: + bs = 1 + int(PnP) + editing_prompts + skip = 2 if PnP else 1 # skip PnP & unconditional + attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) + source_batch_size = int(attn.shape[1] // bs) + self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) + + def forward(self, attn, is_cross: bool, place_in_unet: str): + key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" + + self.step_store[key].append(attn) + + def between_steps(self, store_step=True): + if store_step: + if self.average: + if len(self.attention_store) == 0: + self.attention_store = self.step_store + else: + for key in self.attention_store: + for i in range(len(self.attention_store[key])): + self.attention_store[key][i] += self.step_store[key][i] + else: + if len(self.attention_store) == 0: + self.attention_store = [self.step_store] + else: + self.attention_store.append(self.step_store) + + self.cur_step += 1 + self.step_store = self.get_empty_store() + + def get_attention(self, step: int): + if self.average: + attention = { + key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store + } + else: + assert step is not None + attention = self.attention_store[step] + return attention + + def aggregate_attention( + self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int + ): + out = [[] for x in range(self.batch_size)] + if isinstance(res, int): + num_pixels = res**2 + resolution = (res, res) + else: + num_pixels = res[0] * res[1] + resolution = res[:2] + + for location in from_where: + for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + for batch, item in enumerate(bs_item): + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] + out[batch].append(cross_maps) + + out = torch.stack([torch.cat(x, dim=0) for x in out]) + # average over heads + out = out.sum(1) / out.shape[1] + return out + + def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): + self.step_store = self.get_empty_store() + self.attention_store = [] + self.cur_step = 0 + self.average = average + self.batch_size = batch_size + if max_size is None: + self.max_size = max_resolution**2 + elif max_size is not None and max_resolution is None: + self.max_size = max_size + else: + raise ValueError("Only allowed to set one of max_resolution or max_size") + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsGaussianSmoothing +class LeditsGaussianSmoothing: + def __init__(self, device): + kernel_size = [3, 3] + sigma = [0.5, 0.5] + + # The gaussian kernel is the product of the gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) + + self.weight = kernel.to(device) + + def __call__(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return F.conv2d(input, weight=self.weight.to(input.dtype)) + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEDITSCrossAttnProcessor +class LEDITSCrossAttnProcessor: + def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): + self.attnstore = attention_store + self.place_in_unet = place_in_unet + self.editing_prompts = editing_prompts + self.pnp = pnp + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask=None, + temb=None, + ): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + self.attnstore( + attention_probs, + is_cross=True, + place_in_unet=self.place_in_unet, + editing_prompts=self.editing_prompts, + PnP=self.pnp, + ) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states / attn.rescale_output_factor + return hidden_states + + +class LEditsPPPipelineStableDiffusionXL( + DiffusionPipeline, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + """ + Pipeline for textual image editing using LEDits++ with Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionXLPipeline`]. Check the superclass + documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular + device, etc.). + + In addition the pipeline inherits the following loading methods: + - *LoRA*: [`LEditsPPPipelineStableDiffusionXL.load_lora_weights`] + - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] + + as well as the following saving methods: + - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer ([`~transformers.CLIPTokenizer`]): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically + be set to [`DPMSolverMultistepScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DPMSolverMultistepScheduler, DDIMScheduler], + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): + self.scheduler = DPMSolverMultistepScheduler.from_config( + scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 + ) + logger.warning( + "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " + "The scheduler has been changed to DPMSolverMultistepScheduler." + ) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + self.inversion_steps = None + + def encode_prompt( + self, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + enable_edit_guidance: bool = True, + editing_prompt: Optional[str] = None, + editing_prompt_embeds: Optional[torch.FloatTensor] = None, + editing_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ) -> object: + r""" + Encodes the prompt into text encoder hidden states. + + Args: + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + enable_edit_guidance (`bool`): + Whether to guide towards an editing prompt or not. + editing_prompt (`str` or `List[str]`, *optional*): + Editing prompt(s) to be encoded. If not defined and 'enable_edit_guidance' is True, one has to pass + `editing_prompt_embeds` instead. + editing_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided and 'enable_edit_guidance' is True, editing_prompt_embeds will be generated from `editing_prompt` input + argument. + editing_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated edit pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled editing_pooled_prompt_embeds will be generated from `editing_prompt` + input argument. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + batch_size = self.batch_size + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + num_edit_tokens = 0 + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + + if negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + + if batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but image inversion " + f" has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of the input images." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(negative_prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(negative_pooled_prompt_embeds) + + if enable_edit_guidance and editing_prompt_embeds is None: + editing_prompt_2 = editing_prompt + + editing_prompts = [editing_prompt, editing_prompt_2] + edit_prompt_embeds_list = [] + + for editing_prompt, tokenizer, text_encoder in zip(editing_prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + editing_prompt = self.maybe_convert_prompt(editing_prompt, tokenizer) + + max_length = negative_prompt_embeds.shape[1] + edit_concepts_input = tokenizer( + # [x for item in editing_prompt for x in repeat(item, batch_size)], + editing_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + return_length=True, + ) + num_edit_tokens = edit_concepts_input.length - 2 + + edit_concepts_embeds = text_encoder( + edit_concepts_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + editing_pooled_prompt_embeds = edit_concepts_embeds[0] + if clip_skip is None: + edit_concepts_embeds = edit_concepts_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + edit_concepts_embeds = edit_concepts_embeds.hidden_states[-(clip_skip + 2)] + + edit_prompt_embeds_list.append(edit_concepts_embeds) + + edit_concepts_embeds = torch.concat(edit_prompt_embeds_list, dim=-1) + elif not enable_edit_guidance: + edit_concepts_embeds = None + editing_pooled_prompt_embeds = None + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + bs_embed, seq_len, _ = negative_prompt_embeds.shape + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if enable_edit_guidance: + bs_embed_edit, seq_len, _ = edit_concepts_embeds.shape + edit_concepts_embeds = edit_concepts_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + edit_concepts_embeds = edit_concepts_embeds.repeat(1, num_images_per_prompt, 1) + edit_concepts_embeds = edit_concepts_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) + + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if enable_edit_guidance: + editing_pooled_prompt_embeds = editing_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed_edit * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return ( + negative_prompt_embeds, + edit_concepts_embeds, + negative_pooled_prompt_embeds, + editing_pooled_prompt_embeds, + num_edit_tokens, + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, eta, generator=None): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, device, latents): + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.prepare_unet + def prepare_unet(self, attention_store, PnP: bool = False): + attn_procs = {} + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + if "attn2" in name and place_in_unet != "mid": + attn_procs[name] = LEDITSCrossAttnProcessor( + attention_store=attention_store, + place_in_unet=place_in_unet, + pnp=PnP, + editing_prompts=self.enabled_editing_prompts, + ) + else: + attn_procs[name] = AttnProcessor() + + self.unet.set_attn_processor(attn_procs) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + denoising_end: Optional[float] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeddings: Optional[torch.Tensor] = None, + editing_pooled_prompt_embeds: Optional[torch.Tensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 0, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + sem_guidance: Optional[List[torch.Tensor]] = None, + use_cross_attn_mask: bool = False, + use_intersect_mask: bool = False, + user_mask: Optional[torch.FloatTensor] = None, + attn_store_steps: Optional[List[int]] = [], + store_averaged_over_steps: bool = True, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusionXL.invert`] + method has to be called beforehand. Edits will always be performed for the last inverted image(s). + + Args: + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.7): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. The image is reconstructed by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. + editing_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input + argument. + editing_pooled_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-generated pooled edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input + argument. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. + `edit_guidance_scale` is defined as `s_e` of equation 12 of + [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which guidance is not applied. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which guidance is no longer applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Masking threshold of guidance. Threshold should be proportional to the image region that is modified. + 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + use_cross_attn_mask: + Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask + is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of + [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + use_intersect_mask: + Whether the masking term is calculated as intersection of cross-attention masks and masks derived + from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise + estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + user_mask: + User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit + masks do not meet user preferences. + attn_store_steps: + Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. + store_averaged_over_steps: + Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. + If False, attention maps for each step are stores separately. Just for visualization purposes. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, + otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. + """ + if self.inversion_steps is None: + raise ValueError( + "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." + ) + + eta = self.eta + num_images_per_prompt = 1 + latents = self.init_latents + + zs = self.zs + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + if use_intersect_mask: + use_cross_attn_mask = True + + if use_cross_attn_mask: + self.smoothing = LeditsGaussianSmoothing(self.device) + + if user_mask is not None: + user_mask = user_mask.to(self.device) + + # TODO: Check inputs + # 1. Check inputs. Raise error if not correct + # self.check_inputs( + # callback_steps, + # negative_prompt, + # negative_prompt_2, + # prompt_embeds, + # negative_prompt_embeds, + # pooled_prompt_embeds, + # negative_pooled_prompt_embeds, + # ) + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + batch_size = self.batch_size + + device = self._execution_device + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + self.enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeddings is not None: + enable_edit_guidance = True + self.enabled_editing_prompts = editing_prompt_embeddings.shape[0] + else: + self.enabled_editing_prompts = 0 + enable_edit_guidance = False + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + edit_prompt_embeds, + negative_pooled_prompt_embeds, + pooled_edit_embeds, + num_edit_tokens, + ) = self.encode_prompt( + device=device, + num_images_per_prompt=num_images_per_prompt, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_embeds=negative_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + enable_edit_guidance=enable_edit_guidance, + editing_prompt=editing_prompt, + editing_prompt_embeds=editing_prompt_embeddings, + editing_pooled_prompt_embeds=editing_pooled_prompt_embeds, + ) + + # 4. Prepare timesteps + # self.scheduler.set_timesteps(num_inference_steps, device=device) + + timesteps = self.inversion_steps + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + + if use_cross_attn_mask: + self.attention_store = LeditsAttentionStore( + average=store_averaged_over_steps, + batch_size=batch_size, + max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), + max_resolution=None, + ) + self.prepare_unet(self.attention_store) + resolution = latents.shape[-2:] + att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) + + # 5. Prepare latent variables + latents = self.prepare_latents(device=device, latents=latents) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + # 7. Prepare added time ids & embeddings + add_text_embeds = negative_pooled_prompt_embeds + add_time_ids = self._get_add_time_ids( + self.size, + crops_coords_top_left, + self.size, + dtype=negative_pooled_prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if enable_edit_guidance: + prompt_embeds = torch.cat([prompt_embeds, edit_prompt_embeds], dim=0) + add_text_embeds = torch.cat([add_text_embeds, pooled_edit_embeds], dim=0) + edit_concepts_time_ids = add_time_ids.repeat(edit_prompt_embeds.shape[0], 1) + add_time_ids = torch.cat([add_time_ids, edit_concepts_time_ids], dim=0) + self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None: + # TODO: fix image encoding + image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + image_embeds = image_embeds.to(device) + + # 8. Denoising loop + self.sem_guidance = None + self.activation_mask = None + + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond = noise_pred_out[0] + noise_pred_edit_concepts = noise_pred_out[1:] + + noise_guidance_edit = torch.zeros( + noise_pred_uncond.shape, + device=self.device, + dtype=noise_pred_uncond.dtype, + ) + + if sem_guidance is not None and len(sem_guidance) > i: + noise_guidance_edit += sem_guidance[i].to(self.device) + + elif enable_edit_guidance: + if self.activation_mask is None: + self.activation_mask = torch.zeros( + (len(timesteps), self.enabled_editing_prompts, *noise_pred_edit_concepts[0].shape) + ) + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) + + # noise_guidance_edit = torch.zeros_like(noise_guidance) + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + if i < edit_warmup_steps_c: + continue + + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + + if i >= edit_cooldown_steps_c: + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + if user_mask is not None: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask + + if use_cross_attn_mask: + out = self.attention_store.aggregate_attention( + attention_maps=self.attention_store.step_store, + prompts=self.text_cross_attention_maps, + res=att_res, + from_where=["up", "down"], + is_cross=True, + select=self.text_cross_attention_maps.index(editing_prompt[c]), + ) + attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext + + # average over all tokens + if attn_map.shape[3] != num_edit_tokens[c]: + raise ValueError( + f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" + ) + attn_map = torch.sum(attn_map, dim=3) + + # gaussian_smoothing + attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") + attn_map = self.smoothing(attn_map).squeeze(1) + + # torch.quantile function expects float32 + if attn_map.dtype == torch.float32: + tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) + else: + tmp = torch.quantile( + attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 + ).to(attn_map.dtype) + attn_mask = torch.where( + attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 + ) + + # resolution must match latent space dimension + attn_mask = F.interpolate( + attn_mask.unsqueeze(1), + noise_guidance_edit_tmp.shape[-2:], # 64,64 + ).repeat(1, 4, 1, 1) + self.activation_mask[i, c] = attn_mask.detach().cpu() + if not use_intersect_mask: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + if use_intersect_mask: + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( + 1, self.unet.config.in_channels, 1, 1 + ) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + intersect_mask = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + * attn_mask + ) + + self.activation_mask[i, c] = intersect_mask.detach().cpu() + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask + + elif not use_cross_attn_mask: + # calculate quantile + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + self.activation_mask[i, c] = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + .detach() + .cpu() + ) + + noise_guidance_edit_tmp = torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + + noise_guidance_edit += noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + noise_pred = noise_pred_uncond + noise_guidance_edit + + # compute the previous noisy sample x_t -> x_t-1 + if enable_edit_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, + noise_pred_edit_concepts.mean(dim=0, keepdim=False), + guidance_rescale=self.guidance_rescale, + ) + + idx = t_to_idx[int(t)] + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs, return_dict=False + )[0] + + # step callback + if use_cross_attn_mask: + store_step = i in attn_store_steps + self.attention_store.between_steps(store_step) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + # negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > 0 and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=None) + + @torch.no_grad() + # Modified from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.encode_image + def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): + image = self.image_processor.preprocess( + image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + resized = self.image_processor.postprocess(image=image, output_type="pil") + + if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: + logger.warning( + "Your input images far exceed the default resolution of the underlying diffusion model. " + "The output images may contain severe artifacts! " + "Consider down-sampling the input using the `height` and `width` parameters" + ) + image = image.to(self.device, dtype=dtype) + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + image = image.float() + self.upcast_vae() + image = image.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + x0 = self.vae.encode(image).latent_dist.mode() + x0 = x0.to(dtype) + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + x0 = self.vae.config.scaling_factor * x0 + return x0, resized + + @torch.no_grad() + def invert( + self, + image: PipelineImageInput, + source_prompt: str = "", + source_guidance_scale=3.5, + negative_prompt: str = None, + negative_prompt_2: str = None, + num_inversion_steps: int = 50, + skip: float = 0.15, + generator: Optional[torch.Generator] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + num_zero_noise_steps: int = 3, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + r""" + The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). + If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) + will be performed instead. + + Args: + image (`PipelineImageInput`): + Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect + ratio. + source_prompt (`str`, defaults to `""`): + Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled + if the `source_prompt` is `""`. + source_guidance_scale (`float`, defaults to `3.5`): + Strength of guidance during inversion. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_inversion_steps (`int`, defaults to `50`): + Number of total performed inversion steps after discarding the initial `skip` steps. + skip (`float`, defaults to `0.15`): + Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values + will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + inversion deterministic. + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + num_zero_noise_steps (`int`, defaults to `3`): + Number of final diffusion steps that will not renoise the current image. If no steps are set to zero + SD-XL in combination with [`DPMSolverMultistepScheduler`] will produce noise artifacts. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Returns: + [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: + Output will contain the resized input image(s) and respective VAE reconstruction(s). + """ + + # Reset attn processor, we do not want to store attn maps during inversion + self.unet.set_attn_processor(AttnProcessor()) + + self.eta = 1.0 + + self.scheduler.config.timestep_spacing = "leading" + self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) + self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] + timesteps = self.inversion_steps + + num_images_per_prompt = 1 + + device = self._execution_device + + # 0. Ensure that only uncond embedding is used if prompt = "" + if source_prompt == "": + # noise pred should only be noise_pred_uncond + source_guidance_scale = 0.0 + do_classifier_free_guidance = False + else: + do_classifier_free_guidance = source_guidance_scale > 1.0 + + # 1. prepare image + x0, resized = self.encode_image(image, dtype=self.text_encoder_2.dtype) + width = x0.shape[2] * self.vae_scale_factor + height = x0.shape[3] * self.vae_scale_factor + self.size = (height, width) + + self.batch_size = x0.shape[0] + + # 2. get embeddings + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + + if isinstance(source_prompt, str): + source_prompt = [source_prompt] * self.batch_size + + ( + negative_prompt_embeds, + prompt_embeds, + negative_pooled_prompt_embeds, + edit_pooled_prompt_embeds, + _, + ) = self.encode_prompt( + device=device, + num_images_per_prompt=num_images_per_prompt, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + editing_prompt=source_prompt, + lora_scale=text_encoder_lora_scale, + enable_edit_guidance=do_classifier_free_guidance, + ) + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + # 3. Prepare added time ids & embeddings + add_text_embeds = negative_pooled_prompt_embeds + add_time_ids = self._get_add_time_ids( + self.size, + crops_coords_top_left, + self.size, + dtype=negative_prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([add_text_embeds, edit_pooled_prompt_embeds], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + negative_prompt_embeds = negative_prompt_embeds.to(device) + + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(self.batch_size * num_images_per_prompt, 1) + + # autoencoder reconstruction + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image_rec = self.vae.decode( + x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator + )[0] + elif self.vae.config.force_upcast: + x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image_rec = self.vae.decode( + x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator + )[0] + else: + image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] + + image_rec = self.image_processor.postprocess(image_rec, output_type="pil") + + # 5. find zs and xts + variance_noise_shape = (num_inversion_steps, *x0.shape) + + # intermediate latents + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) + + for t in reversed(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) + xts[idx] = self.scheduler.add_noise(x0, noise, t.unsqueeze(0)) + xts = torch.cat([x0.unsqueeze(0), xts], dim=0) + + # noise maps + zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) + + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + for t in self.progress_bar(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + # 1. predict noise residual + xt = xts[idx + 1] + + latent_model_input = torch.cat([xt] * 2) if do_classifier_free_guidance else xt + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=negative_prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # 2. perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk(2) + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + noise_pred = noise_pred_uncond + source_guidance_scale * (noise_pred_text - noise_pred_uncond) + + xtm1 = xts[idx] + z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) + zs[idx] = z + + # correction to avoid error accumulation + xts[idx] = xtm1_corrected + + self.init_latents = xts[-1] + zs = zs.flip(0) + + if num_zero_noise_steps > 0: + zs[-num_zero_noise_steps:] = torch.zeros_like(zs[-num_zero_noise_steps:]) + self.zs = zs + return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) + + +# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_ddim +def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + # modifed so that updated xtm1 is returned as well (to avoid error accumulation) + mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + if variance > 0.0: + noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) + else: + noise = torch.tensor([0.0]).to(latents.device) + + return noise, mu_xt + (eta * variance**0.5) * noise + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_sde_dpm_pp_2nd +def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): + def first_order_update(model_output, sample): # timestep, prev_timestep, sample): + sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + + mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + mu_xt = scheduler.dpm_solver_first_order_update( + model_output=model_output, sample=sample, noise=torch.zeros_like(sample) + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + return noise, prev_sample + + def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): + sigma_t, sigma_s0, sigma_s1 = ( + scheduler.sigmas[scheduler.step_index + 1], + scheduler.sigmas[scheduler.step_index], + scheduler.sigmas[scheduler.step_index - 1], + ) + + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + + mu_xt = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + + return noise, prev_sample + + if scheduler.step_index is None: + scheduler._init_step_index(timestep) + + model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) + for i in range(scheduler.config.solver_order - 1): + scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] + scheduler.model_outputs[-1] = model_output + + if scheduler.lower_order_nums < 1: + noise, prev_sample = first_order_update(model_output, latents) + else: + noise, prev_sample = second_order_update(scheduler.model_outputs, latents) + + if scheduler.lower_order_nums < scheduler.config.solver_order: + scheduler.lower_order_nums += 1 + + # upon completion increase step index by one + scheduler._step_index += 1 + + return noise, prev_sample + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise +def compute_noise(scheduler, *args): + if isinstance(scheduler, DDIMScheduler): + return compute_noise_ddim(scheduler, *args) + elif ( + isinstance(scheduler, DPMSolverMultistepScheduler) + and scheduler.config.algorithm_type == "sde-dpmsolver++" + and scheduler.config.solver_order == 2 + ): + return compute_noise_sde_dpm_pp_2nd(scheduler, *args) + else: + raise NotImplementedError diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_output.py new file mode 100644 index 000000000..b90005c97 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/ledits_pp/pipeline_output.py @@ -0,0 +1,43 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class LEditsPPDiffusionPipelineOutput(BaseOutput): + """ + Output class for LEdits++ Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +@dataclass +class LEditsPPInversionPipelineOutput(BaseOutput): + """ + Output class for LEdits++ Diffusion pipelines. + + Args: + input_images (`List[PIL.Image.Image]` or `np.ndarray`) + List of the cropped and resized input images as PIL images of length `batch_size` or NumPy array of shape ` + (batch_size, height, width, num_channels)`. + vae_reconstruction_images (`List[PIL.Image.Image]` or `np.ndarray`) + List of VAE reconstruction of all input images as PIL images of length `batch_size` or NumPy array of shape ` + (batch_size, height, width, num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + vae_reconstruction_images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/__init__.py new file mode 100644 index 000000000..ed71eeb1d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_musicldm"] = ["MusicLDMPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_musicldm import MusicLDMPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/pipeline_musicldm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/pipeline_musicldm.py new file mode 100644 index 000000000..5fde3450b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/musicldm/pipeline_musicldm.py @@ -0,0 +1,635 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + ClapFeatureExtractor, + ClapModel, + ClapTextModelWithProjection, + RobertaTokenizer, + RobertaTokenizerFast, + SpeechT5HifiGan, +) + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + is_accelerate_available, + is_accelerate_version, + is_librosa_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin + + +if is_librosa_available(): + import librosa + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import MusicLDMPipeline + >>> import torch + >>> import scipy + + >>> repo_id = "ucsd-reach/musicldm" + >>> pipe = MusicLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" + >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] + + >>> # save the audio sample as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ``` +""" + + +class MusicLDMPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-audio generation using MusicLDM. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapModel`]): + Frozen text-audio embedding model (`ClapTextModel`), specifically the + [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. + tokenizer ([`PreTrainedTokenizer`]): + A [`~transformers.RobertaTokenizer`] to tokenize text. + feature_extractor ([`~transformers.ClapFeatureExtractor`]): + Feature extractor to compute mel-spectrograms from audio waveforms. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan`. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: Union[ClapTextModelWithProjection, ClapModel], + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + feature_extractor: Optional[ClapFeatureExtractor], + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def _encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLAP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder.get_text_features( + text_input_ids.to(device), + attention_mask=attention_mask.to(device), + ) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) + + ( + bs_embed, + seq_len, + ) = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder.get_text_features( + uncond_input_ids, + attention_mask=attention_mask, + ) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + # Copied from diffusers.pipelines.audioldm2.pipeline_audioldm2.AudioLDM2Pipeline.score_waveforms + def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): + if not is_librosa_available(): + logger.info( + "Automatic scoring of the generated audio waveforms against the input prompt text requires the " + "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " + "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." + ) + return audio + inputs = self.tokenizer(text, return_tensors="pt", padding=True) + resampled_audio = librosa.resample( + audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate + ) + inputs["input_features"] = self.feature_extractor( + list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate + ).input_features.type(dtype) + inputs = inputs.to(device) + + # compute the audio-text similarity score using the CLAP model + logits_per_text = self.text_encoder(**inputs).logits_per_text + # sort by the highest matching generations per prompt + indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] + audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) + return audio + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.check_inputs + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + self.vocoder.config.model_in_dim // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + model_sequence = [ + self.text_encoder.text_model, + self.text_encoder.text_projection, + self.unet, + self.vae, + self.vocoder, + self.text_encoder, + ] + + hook = None + for cpu_offloaded_model in model_sequence: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 200, + guidance_scale: float = 2.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 10.24): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 200): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 2.0): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, the text encoding + model is a joint text-audio model ([`~transformers.ClapModel`]), and the tokenizer is a + `[~transformers.ClapProcessor]`, then automatic scoring will be performed between the generated outputs + and the input text. This scoring ranks the generated waveforms based on their cosine similarity to text + input in the joint text-audio embedding space. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion + model (LDM) output. + + Examples: + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=None, + class_labels=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + # 8. Post-processing + if not output_type == "latent": + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + else: + return AudioPipelineOutput(audios=latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + # 9. Automatic scoring + if num_waveforms_per_prompt > 1 and prompt is not None: + audio = self.score_waveforms( + text=prompt, + audio=audio, + num_waveforms_per_prompt=num_waveforms_per_prompt, + device=device, + dtype=prompt_embeds.dtype, + ) + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/onnx_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/onnx_utils.py new file mode 100644 index 000000000..11f2241c6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/onnx_utils.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import shutil +from pathlib import Path +from typing import Optional, Union + +import numpy as np +from huggingface_hub import hf_hub_download +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging + + +if is_onnx_available(): + import onnxruntime as ort + + +logger = logging.get_logger(__name__) + +ORT_TO_NP_TYPE = { + "tensor(bool)": np.bool_, + "tensor(int8)": np.int8, + "tensor(uint8)": np.uint8, + "tensor(int16)": np.int16, + "tensor(uint16)": np.uint16, + "tensor(int32)": np.int32, + "tensor(uint32)": np.uint32, + "tensor(int64)": np.int64, + "tensor(uint64)": np.uint64, + "tensor(float16)": np.float16, + "tensor(float)": np.float32, + "tensor(double)": np.float64, +} + + +class OnnxRuntimeModel: + def __init__(self, model=None, **kwargs): + logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.") + self.model = model + self.model_save_dir = kwargs.get("model_save_dir", None) + self.latest_model_name = kwargs.get("latest_model_name", ONNX_WEIGHTS_NAME) + + def __call__(self, **kwargs): + inputs = {k: np.array(v) for k, v in kwargs.items()} + return self.model.run(None, inputs) + + @staticmethod + def load_model(path: Union[str, Path], provider=None, sess_options=None): + """ + Loads an ONNX Inference session with an ExecutionProvider. Default provider is `CPUExecutionProvider` + + Arguments: + path (`str` or `Path`): + Directory from which to load + provider(`str`, *optional*): + Onnxruntime execution provider to use for loading the model, defaults to `CPUExecutionProvider` + """ + if provider is None: + logger.info("No onnxruntime provider specified, using CPUExecutionProvider") + provider = "CPUExecutionProvider" + + return ort.InferenceSession(path, providers=[provider], sess_options=sess_options) + + def _save_pretrained(self, save_directory: Union[str, Path], file_name: Optional[str] = None, **kwargs): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~optimum.onnxruntime.modeling_ort.ORTModel.from_pretrained`] class method. It will always save the + latest_model_name. + + Arguments: + save_directory (`str` or `Path`): + Directory where to save the model file. + file_name(`str`, *optional*): + Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to save the + model with a different name. + """ + model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME + + src_path = self.model_save_dir.joinpath(self.latest_model_name) + dst_path = Path(save_directory).joinpath(model_file_name) + try: + shutil.copyfile(src_path, dst_path) + except shutil.SameFileError: + pass + + # copy external weights (for models >2GB) + src_path = self.model_save_dir.joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) + if src_path.exists(): + dst_path = Path(save_directory).joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) + try: + shutil.copyfile(src_path, dst_path) + except shutil.SameFileError: + pass + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + **kwargs, + ): + """ + Save a model to a directory, so that it can be re-loaded using the [`~OnnxModel.from_pretrained`] class + method.: + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + """ + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + # saving model weights/files + self._save_pretrained(save_directory, **kwargs) + + @classmethod + @validate_hf_hub_args + def _from_pretrained( + cls, + model_id: Union[str, Path], + token: Optional[Union[bool, str, None]] = None, + revision: Optional[Union[str, None]] = None, + force_download: bool = False, + cache_dir: Optional[str] = None, + file_name: Optional[str] = None, + provider: Optional[str] = None, + sess_options: Optional["ort.SessionOptions"] = None, + **kwargs, + ): + """ + Load a model from a directory or the HF Hub. + + Arguments: + model_id (`str` or `Path`): + Directory from which to load + token (`str` or `bool`): + Is needed to load models from a private or gated repository + revision (`str`): + Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id + cache_dir (`Union[str, Path]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + file_name(`str`): + Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to load + different model files from the same repository or directory. + provider(`str`): + The ONNX runtime provider, e.g. `CPUExecutionProvider` or `CUDAExecutionProvider`. + kwargs (`Dict`, *optional*): + kwargs will be passed to the model during initialization + """ + model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME + # load model from local directory + if os.path.isdir(model_id): + model = OnnxRuntimeModel.load_model( + Path(model_id, model_file_name).as_posix(), provider=provider, sess_options=sess_options + ) + kwargs["model_save_dir"] = Path(model_id) + # load model from hub + else: + # download model + model_cache_path = hf_hub_download( + repo_id=model_id, + filename=model_file_name, + token=token, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + ) + kwargs["model_save_dir"] = Path(model_cache_path).parent + kwargs["latest_model_name"] = Path(model_cache_path).name + model = OnnxRuntimeModel.load_model(model_cache_path, provider=provider, sess_options=sess_options) + return cls(model=model, **kwargs) + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + model_id: Union[str, Path], + force_download: bool = True, + token: Optional[str] = None, + cache_dir: Optional[str] = None, + **model_kwargs, + ): + revision = None + if len(str(model_id).split("@")) == 2: + model_id, revision = model_id.split("@") + + return cls._from_pretrained( + model_id=model_id, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + token=token, + **model_kwargs, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/__init__.py new file mode 100644 index 000000000..aaa775f69 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/__init__.py @@ -0,0 +1,55 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["image_encoder"] = ["PaintByExampleImageEncoder"] + _import_structure["pipeline_paint_by_example"] = ["PaintByExamplePipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .image_encoder import PaintByExampleImageEncoder + from .pipeline_paint_by_example import PaintByExamplePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/image_encoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/image_encoder.py new file mode 100644 index 000000000..2fd0338b1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/image_encoder.py @@ -0,0 +1,67 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from torch import nn +from transformers import CLIPPreTrainedModel, CLIPVisionModel + +from ...models.attention import BasicTransformerBlock +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class PaintByExampleImageEncoder(CLIPPreTrainedModel): + def __init__(self, config, proj_size=None): + super().__init__(config) + self.proj_size = proj_size or getattr(config, "projection_dim", 768) + + self.model = CLIPVisionModel(config) + self.mapper = PaintByExampleMapper(config) + self.final_layer_norm = nn.LayerNorm(config.hidden_size) + self.proj_out = nn.Linear(config.hidden_size, self.proj_size) + + # uncondition for scaling + self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size))) + + def forward(self, pixel_values, return_uncond_vector=False): + clip_output = self.model(pixel_values=pixel_values) + latent_states = clip_output.pooler_output + latent_states = self.mapper(latent_states[:, None]) + latent_states = self.final_layer_norm(latent_states) + latent_states = self.proj_out(latent_states) + if return_uncond_vector: + return latent_states, self.uncond_vector + + return latent_states + + +class PaintByExampleMapper(nn.Module): + def __init__(self, config): + super().__init__() + num_layers = (config.num_hidden_layers + 1) // 5 + hid_size = config.hidden_size + num_heads = 1 + self.blocks = nn.ModuleList( + [ + BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True) + for _ in range(num_layers) + ] + ) + + def forward(self, hidden_states): + for block in self.blocks: + hidden_states = block(hidden_states) + + return hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py new file mode 100644 index 000000000..8a24f134e --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py @@ -0,0 +1,621 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .image_encoder import PaintByExampleImageEncoder + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def prepare_mask_and_masked_image(image, mask): + """ + Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Batched mask + if mask.shape[0] == image.shape[0]: + mask = mask.unsqueeze(1) + else: + mask = mask.unsqueeze(0) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + assert mask.shape[1] == 1, "Mask image must have a single channel" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # paint-by-example inverses the mask + mask = 1 - mask + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + if isinstance(image, PIL.Image.Image): + image = [image] + + image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0) + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, PIL.Image.Image): + mask = [mask] + + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + + # paint-by-example inverses the mask + mask = 1 - mask + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * mask + + return mask, masked_image + + +class PaintByExamplePipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + + + 🧪 This is an experimental feature! + + + + Pipeline for image-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`PaintByExampleImageEncoder`]): + Encodes the example input image. The `unet` is conditioned on the example image instead of a text prompt. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + + """ + + # TODO: feature_extractor is required to encode initial images (if they are in PIL format), + # we should give a descriptive message if the pipeline doesn't have one. + + model_cpu_offload_seq = "unet->vae" + _exclude_from_cpu_offload = ["image_encoder"] + _optional_components = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + image_encoder: PaintByExampleImageEncoder, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = False, + ): + super().__init__() + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1) + negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + @torch.no_grad() + def __call__( + self, + example_image: Union[torch.FloatTensor, PIL.Image.Image], + image: Union[torch.FloatTensor, PIL.Image.Image], + mask_image: Union[torch.FloatTensor, PIL.Image.Image], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + example_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + An example image to guide image generation. + image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + `Image` or tensor representing an image batch to be inpainted (parts of the image are masked out with + `mask_image` and repainted according to `prompt`). + mask_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted, + while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel + (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the + expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Example: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + >>> from diffusers import PaintByExamplePipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = ( + ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png" + ... ) + >>> mask_url = ( + ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png" + ... ) + >>> example_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/reference/example_1.jpg" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + >>> example_image = download_image(example_url).resize((512, 512)) + + >>> pipe = PaintByExamplePipeline.from_pretrained( + ... "Fantasy-Studio/Paint-by-Example", + ... torch_dtype=torch.float16, + ... ) + >>> pipe = pipe.to("cuda") + + >>> image = pipe(image=init_image, mask_image=mask_image, example_image=example_image).images[0] + >>> image + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 1. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 2. Preprocess mask and image + mask, masked_image = prepare_mask_and_masked_image(image, mask_image) + height, width = masked_image.shape[-2:] + + # 3. Check inputs + self.check_inputs(example_image, height, width, callback_steps) + + # 4. Encode input image + image_embeddings = self._encode_image( + example_image, device, num_images_per_prompt, do_classifier_free_guidance + ) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + image_embeddings.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/__init__.py new file mode 100644 index 000000000..16e800496 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/__init__.py @@ -0,0 +1,46 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pia"] = ["PIAPipeline", "PIAPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_pia import PIAPipeline, PIAPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/pipeline_pia.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/pipeline_pia.py new file mode 100644 index 000000000..507088991 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pia/pipeline_pia.py @@ -0,0 +1,1034 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +import torch.fft as fft +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import ( + ... EulerDiscreteScheduler, + ... MotionAdapter, + ... PIAPipeline, + ... ) + >>> from diffusers.utils import export_to_gif, load_image + >>> adapter = MotionAdapter.from_pretrained("../checkpoints/pia-diffusers") + >>> pipe = PIAPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter) + >>> pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" + ... ) + >>> image = image.resize((512, 512)) + >>> prompt = "cat in a hat" + >>> negative_prompt = "wrong white balance, dark, sketches,worst quality,low quality, deformed, distorted, disfigured, bad eyes, wrong lips,weird mouth, bad teeth, mutated hands and fingers, bad anatomy,wrong anatomy, amputation, extra limb, missing limb, floating,limbs, disconnected limbs, mutation, ugly, disgusting, bad_pictures, negative_hand-neg" + >>> generator = torch.Generator("cpu").manual_seed(0) + >>> output = pipe(image=image, prompt=prompt, negative_prompt=negative_prompt, generator=generator) + >>> frames = output.frames[0] + >>> export_to_gif(frames, "pia-animation.gif") + ``` +""" + +RANGE_LIST = [ + [1.0, 0.9, 0.85, 0.85, 0.85, 0.8], # 0 Small Motion + [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75], # Moderate Motion + [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5], # Large Motion + [1.0, 0.9, 0.85, 0.85, 0.85, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.85, 0.85, 0.9, 1.0], # Loop + [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75, 0.75, 0.75, 0.75, 0.75, 0.78, 0.79, 0.8, 0.8, 1.0], # Loop + [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5, 0.6, 0.7, 0.7, 0.7, 0.7, 0.8, 1.0], # Loop + [0.5, 0.4, 0.4, 0.4, 0.35, 0.3], # Style Transfer Candidate Small Motion + [0.5, 0.4, 0.4, 0.4, 0.35, 0.35, 0.3, 0.25, 0.2], # Style Transfer Moderate Motion + [0.5, 0.2], # Style Transfer Large Motion +] + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +def prepare_mask_coef_by_statistics(num_frames: int, cond_frame: int, motion_scale: int): + assert num_frames > 0, "video_length should be greater than 0" + + assert num_frames > cond_frame, "video_length should be greater than cond_frame" + + range_list = RANGE_LIST + + assert motion_scale < len(range_list), f"motion_scale type{motion_scale} not implemented" + + coef = range_list[motion_scale] + coef = coef + ([coef[-1]] * (num_frames - len(coef))) + + order = [abs(i - cond_frame) for i in range(num_frames)] + coef = [coef[order[i]] for i in range(num_frames)] + + return coef + + +def _get_freeinit_freq_filter( + shape: Tuple[int, ...], + device: Union[str, torch.dtype], + filter_type: str, + order: float, + spatial_stop_frequency: float, + temporal_stop_frequency: float, +) -> torch.Tensor: + r"""Returns the FreeInit filter based on filter type and other input conditions.""" + + time, height, width = shape[-3], shape[-2], shape[-1] + mask = torch.zeros(shape) + + if spatial_stop_frequency == 0 or temporal_stop_frequency == 0: + return mask + + if filter_type == "butterworth": + + def retrieve_mask(x): + return 1 / (1 + (x / spatial_stop_frequency**2) ** order) + elif filter_type == "gaussian": + + def retrieve_mask(x): + return math.exp(-1 / (2 * spatial_stop_frequency**2) * x) + elif filter_type == "ideal": + + def retrieve_mask(x): + return 1 if x <= spatial_stop_frequency * 2 else 0 + else: + raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal") + + for t in range(time): + for h in range(height): + for w in range(width): + d_square = ( + ((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / time - 1)) ** 2 + + (2 * h / height - 1) ** 2 + + (2 * w / width - 1) ** 2 + ) + mask[..., t, h, w] = retrieve_mask(d_square) + + return mask.to(device) + + +def _freq_mix_3d(x: torch.Tensor, noise: torch.Tensor, LPF: torch.Tensor) -> torch.Tensor: + r"""Noise reinitialization.""" + # FFT + x_freq = fft.fftn(x, dim=(-3, -2, -1)) + x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1)) + noise_freq = fft.fftn(noise, dim=(-3, -2, -1)) + noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1)) + + # frequency mix + HPF = 1 - LPF + x_freq_low = x_freq * LPF + noise_freq_high = noise_freq * HPF + x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain + + # IFFT + x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1)) + x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real + + return x_mixed + + +@dataclass +class PIAPipelineOutput(BaseOutput): + r""" + Output class for PIAPipeline. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + Nested list of length `batch_size` with denoised PIL image sequences of length `num_frames`, + NumPy array of shape `(batch_size, num_frames, channels, height, width, + Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] + + +class PIAPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, + FreeInitMixin, +): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + motion_adapter: Optional[MotionAdapter] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_masked_condition( + self, + image, + batch_size, + num_channels_latents, + num_frames, + height, + width, + dtype, + device, + generator, + motion_scale=0, + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + _, _, _, scaled_height, scaled_width = shape + + image = self.image_processor.preprocess(image) + image = image.to(device, dtype) + + if isinstance(generator, list): + image_latent = [ + self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) + ] + image_latent = torch.cat(image_latent, dim=0) + else: + image_latent = self.vae.encode(image).latent_dist.sample(generator) + + image_latent = image_latent.to(device=device, dtype=dtype) + image_latent = torch.nn.functional.interpolate(image_latent, size=[scaled_height, scaled_width]) + image_latent_padding = image_latent.clone() * self.vae.config.scaling_factor + + mask = torch.zeros((batch_size, 1, num_frames, scaled_height, scaled_width)).to(device=device, dtype=dtype) + mask_coef = prepare_mask_coef_by_statistics(num_frames, 0, motion_scale) + masked_image = torch.zeros(batch_size, 4, num_frames, scaled_height, scaled_width).to( + device=device, dtype=self.unet.dtype + ) + for f in range(num_frames): + mask[:, :, f, :, :] = mask_coef[f] + masked_image[:, :, f, :, :] = image_latent_padding.clone() + + mask = torch.cat([mask] * 2) if self.do_classifier_free_guidance else mask + masked_image = torch.cat([masked_image] * 2) if self.do_classifier_free_guidance else masked_image + + return mask, masked_image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: PipelineImageInput, + prompt: Union[str, List[str]] = None, + strength: float = 1.0, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + motion_scale: int = 0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PipelineImageInput`): + The input image to be used for video generation. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + strength (`float`, *optional*, defaults to 1.0): Indicates extent to transform the reference `image`. Must be between 0 and 1. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + motion_scale: (`int`, *optional*, defaults to 0): + Parameter that controls the amount and type of motion that is added to the image. Increasing the value increases the amount of motion, while specific + ranges of values control the type of motion that is added. Must be between 0 and 8. + Set between 0-2 to only increase the amount of motion. + Set between 3-5 to create looping motion. + Set between 6-8 to perform motion with image style transfer. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or + `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + 4, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents=latents, + ) + mask, masked_image = self.prepare_masked_condition( + image, + batch_size * num_videos_per_prompt, + 4, + num_frames=num_frames, + height=height, + width=width, + dtype=self.unet.dtype, + device=device, + generator=generator, + motion_scale=motion_scale, + ) + if strength < 1.0: + noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) + latents = self.scheduler.add_noise(masked_image[0], noise, latent_timestep) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. Denoising loop + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return PIAPipelineOutput(frames=video) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_flax_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_flax_utils.py new file mode 100644 index 000000000..b1035c1f2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_flax_utils.py @@ -0,0 +1,616 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +import os +from typing import Any, Dict, List, Optional, Union + +import flax +import numpy as np +import PIL.Image +from flax.core.frozen_dict import FrozenDict +from huggingface_hub import create_repo, snapshot_download +from huggingface_hub.utils import validate_hf_hub_args +from PIL import Image +from tqdm.auto import tqdm + +from ..configuration_utils import ConfigMixin +from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin +from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin +from ..utils import ( + CONFIG_NAME, + BaseOutput, + PushToHubMixin, + http_user_agent, + is_transformers_available, + logging, +) + + +if is_transformers_available(): + from transformers import FlaxPreTrainedModel + +INDEX_FILE = "diffusion_flax_model.bin" + + +logger = logging.get_logger(__name__) + + +LOADABLE_CLASSES = { + "diffusers": { + "FlaxModelMixin": ["save_pretrained", "from_pretrained"], + "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], + "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], + }, + "transformers": { + "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], + "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], + "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], + "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], + "ProcessorMixin": ["save_pretrained", "from_pretrained"], + "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], + }, +} + +ALL_IMPORTABLE_CLASSES = {} +for library in LOADABLE_CLASSES: + ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) + + +def import_flax_or_no_model(module, class_name): + try: + # 1. First make sure that if a Flax object is present, import this one + class_obj = getattr(module, "Flax" + class_name) + except AttributeError: + # 2. If this doesn't work, it's not a model and we don't append "Flax" + class_obj = getattr(module, class_name) + except AttributeError: + raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") + + return class_obj + + +@flax.struct.dataclass +class FlaxImagePipelineOutput(BaseOutput): + """ + Output class for image pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): + r""" + Base class for Flax-based pipelines. + + [`FlaxDiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and + provides methods for loading, downloading and saving models. It also includes methods to: + + - enable/disable the progress bar for the denoising iteration + + Class attributes: + + - **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + """ + + config_name = "model_index.json" + + def register_modules(self, **kwargs): + # import it here to avoid circular import + from diffusers import pipelines + + for name, module in kwargs.items(): + if module is None: + register_dict = {name: (None, None)} + else: + # retrieve library + library = module.__module__.split(".")[0] + + # check if the module is a pipeline module + pipeline_dir = module.__module__.split(".")[-2] + path = module.__module__.split(".") + is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) + + # if library is not in LOADABLE_CLASSES, then it is a custom module. + # Or if it's a pipeline module, then the module is inside the pipeline + # folder so we set the library to module name. + if library not in LOADABLE_CLASSES or is_pipeline_module: + library = pipeline_dir + + # retrieve class_name + class_name = module.__class__.__name__ + + register_dict = {name: (library, class_name)} + + # save model index config + self.register_to_config(**register_dict) + + # set models + setattr(self, name, module) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + params: Union[Dict, FrozenDict], + push_to_hub: bool = False, + **kwargs, + ): + # TODO: handle inference_state + """ + Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its + class implements both a save and loading method. The pipeline is easily reloaded using the + [`~FlaxDiffusionPipeline.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory) + + model_index_dict = dict(self.config) + model_index_dict.pop("_class_name") + model_index_dict.pop("_diffusers_version") + model_index_dict.pop("_module", None) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + for pipeline_component_name in model_index_dict.keys(): + sub_model = getattr(self, pipeline_component_name) + if sub_model is None: + # edge case for saving a pipeline with safety_checker=None + continue + + model_cls = sub_model.__class__ + + save_method_name = None + # search for the model's base class in LOADABLE_CLASSES + for library_name, library_classes in LOADABLE_CLASSES.items(): + library = importlib.import_module(library_name) + for base_class, save_load_methods in library_classes.items(): + class_candidate = getattr(library, base_class, None) + if class_candidate is not None and issubclass(model_cls, class_candidate): + # if we found a suitable base class in LOADABLE_CLASSES then grab its save method + save_method_name = save_load_methods[0] + break + if save_method_name is not None: + break + + save_method = getattr(sub_model, save_method_name) + expects_params = "params" in set(inspect.signature(save_method).parameters.keys()) + + if expects_params: + save_method( + os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name] + ) + else: + save_method(os.path.join(save_directory, pipeline_component_name)) + + if push_to_hub: + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a Flax-based diffusion pipeline from pretrained pipeline weights. + + The pipeline is set in evaluation mode (`model.eval()) by default and dropout modules are deactivated. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + using [`~FlaxDiffusionPipeline.save_pretrained`]. + dtype (`str` or `jnp.dtype`, *optional*): + Override the default `jnp.dtype` and load the model under this dtype. If `"auto"`, the dtype is + automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components) of the specific pipeline + class. The overwritten components are passed directly to the pipelines `__init__` method. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import FlaxDiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> # Requires to be logged in to Hugging Face hub, + >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) + >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", + ... revision="bf16", + ... dtype=jnp.bfloat16, + ... ) + + >>> # Download pipeline, but use a different scheduler + >>> from diffusers import FlaxDPMSolverMultistepScheduler + + >>> model_id = "runwayml/stable-diffusion-v1-5" + >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( + ... model_id, + ... subfolder="scheduler", + ... ) + + >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( + ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp + ... ) + >>> dpm_params["scheduler"] = dpmpp_state + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", False) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_pt = kwargs.pop("from_pt", False) + use_memory_efficient_attention = kwargs.pop("use_memory_efficient_attention", False) + split_head_dim = kwargs.pop("split_head_dim", False) + dtype = kwargs.pop("dtype", None) + + # 1. Download the checkpoints and configs + # use snapshot download here to get it working from from_pretrained + if not os.path.isdir(pretrained_model_name_or_path): + config_dict = cls.load_config( + pretrained_model_name_or_path, + cache_dir=cache_dir, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + ) + # make sure we only download sub-folders and `diffusers` filenames + folder_names = [k for k in config_dict.keys() if not k.startswith("_")] + allow_patterns = [os.path.join(k, "*") for k in folder_names] + allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name] + + ignore_patterns = ["*.bin", "*.safetensors"] if not from_pt else [] + ignore_patterns += ["*.onnx", "*.onnx_data", "*.xml", "*.pb"] + + if cls != FlaxDiffusionPipeline: + requested_pipeline_class = cls.__name__ + else: + requested_pipeline_class = config_dict.get("_class_name", cls.__name__) + requested_pipeline_class = ( + requested_pipeline_class + if requested_pipeline_class.startswith("Flax") + else "Flax" + requested_pipeline_class + ) + + user_agent = {"pipeline_class": requested_pipeline_class} + user_agent = http_user_agent(user_agent) + + # download all allow_patterns + cached_folder = snapshot_download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + else: + cached_folder = pretrained_model_name_or_path + + config_dict = cls.load_config(cached_folder) + + # 2. Load the pipeline class, if using custom module then load it from the hub + # if we load from explicit class, let's use it + if cls != FlaxDiffusionPipeline: + pipeline_class = cls + else: + diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) + class_name = ( + config_dict["_class_name"] + if config_dict["_class_name"].startswith("Flax") + else "Flax" + config_dict["_class_name"] + ) + pipeline_class = getattr(diffusers_module, class_name) + + # some modules can be passed directly to the init + # in this case they are already instantiated in `kwargs` + # extract them here + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + + init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) + + # define init kwargs + init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} + init_kwargs = {**init_kwargs, **passed_pipe_kwargs} + + # remove `null` components + def load_module(name, value): + if value[0] is None: + return False + if name in passed_class_obj and passed_class_obj[name] is None: + return False + return True + + init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} + + # Throw nice warnings / errors for fast accelerate loading + if len(unused_kwargs) > 0: + logger.warning( + f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." + ) + + # inference_params + params = {} + + # import it here to avoid circular import + from diffusers import pipelines + + # 3. Load each module in the pipeline + for name, (library_name, class_name) in init_dict.items(): + if class_name is None: + # edge case for when the pipeline was saved with safety_checker=None + init_kwargs[name] = None + continue + + is_pipeline_module = hasattr(pipelines, library_name) + loaded_sub_model = None + sub_model_should_be_defined = True + + # if the model is in a pipeline module, then we load it from the pipeline + if name in passed_class_obj: + # 1. check that passed_class_obj has correct parent class + if not is_pipeline_module: + library = importlib.import_module(library_name) + class_obj = getattr(library, class_name) + importable_classes = LOADABLE_CLASSES[library_name] + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + expected_class_obj = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + expected_class_obj = class_candidate + + if not issubclass(passed_class_obj[name].__class__, expected_class_obj): + raise ValueError( + f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be" + f" {expected_class_obj}" + ) + elif passed_class_obj[name] is None: + logger.warning( + f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note" + f" that this might lead to problems when using {pipeline_class} and is not recommended." + ) + sub_model_should_be_defined = False + else: + logger.warning( + f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" + " has the correct type" + ) + + # set passed class object + loaded_sub_model = passed_class_obj[name] + elif is_pipeline_module: + pipeline_module = getattr(pipelines, library_name) + class_obj = import_flax_or_no_model(pipeline_module, class_name) + + importable_classes = ALL_IMPORTABLE_CLASSES + class_candidates = {c: class_obj for c in importable_classes.keys()} + else: + # else we just import it from the library. + library = importlib.import_module(library_name) + class_obj = import_flax_or_no_model(library, class_name) + + importable_classes = LOADABLE_CLASSES[library_name] + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + if loaded_sub_model is None and sub_model_should_be_defined: + load_method_name = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + load_method_name = importable_classes[class_name][1] + + load_method = getattr(class_obj, load_method_name) + + # check if the module is in a subdirectory + if os.path.isdir(os.path.join(cached_folder, name)): + loadable_folder = os.path.join(cached_folder, name) + else: + loaded_sub_model = cached_folder + + if issubclass(class_obj, FlaxModelMixin): + loaded_sub_model, loaded_params = load_method( + loadable_folder, + from_pt=from_pt, + use_memory_efficient_attention=use_memory_efficient_attention, + split_head_dim=split_head_dim, + dtype=dtype, + ) + params[name] = loaded_params + elif is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel): + if from_pt: + # TODO(Suraj): Fix this in Transformers. We should be able to use `_do_init=False` here + loaded_sub_model = load_method(loadable_folder, from_pt=from_pt) + loaded_params = loaded_sub_model.params + del loaded_sub_model._params + else: + loaded_sub_model, loaded_params = load_method(loadable_folder, _do_init=False) + params[name] = loaded_params + elif issubclass(class_obj, FlaxSchedulerMixin): + loaded_sub_model, scheduler_state = load_method(loadable_folder) + params[name] = scheduler_state + else: + loaded_sub_model = load_method(loadable_folder) + + init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) + + # 4. Potentially add passed objects if expected + missing_modules = set(expected_modules) - set(init_kwargs.keys()) + passed_modules = list(passed_class_obj.keys()) + + if len(missing_modules) > 0 and missing_modules <= set(passed_modules): + for module in missing_modules: + init_kwargs[module] = passed_class_obj.get(module, None) + elif len(missing_modules) > 0: + passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." + ) + + model = pipeline_class(**init_kwargs, dtype=dtype) + return model, params + + @classmethod + def _get_signature_keys(cls, obj): + parameters = inspect.signature(obj.__init__).parameters + required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} + optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) + expected_modules = set(required_parameters.keys()) - {"self"} + + return expected_modules, optional_parameters + + @property + def components(self) -> Dict[str, Any]: + r""" + + The `self.components` property can be useful to run different pipelines with the same weights and + configurations to not have to re-allocate memory. + + Examples: + + ```py + >>> from diffusers import ( + ... FlaxStableDiffusionPipeline, + ... FlaxStableDiffusionImg2ImgPipeline, + ... ) + + >>> text2img = FlaxStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jnp.bfloat16 + ... ) + >>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components) + ``` + + Returns: + A dictionary containing all the modules needed to initialize the pipeline. + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + components = { + k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters + } + + if set(components.keys()) != expected_modules: + raise ValueError( + f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" + f" {expected_modules} to be defined, but {components} are defined." + ) + + return components + + @staticmethod + def numpy_to_pil(images): + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + # TODO: make it compatible with jax.lax + def progress_bar(self, iterable): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + return tqdm(iterable, **self._progress_bar_config) + + def set_progress_bar_config(self, **kwargs): + self._progress_bar_config = kwargs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_loading_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_loading_utils.py new file mode 100644 index 000000000..30c17eec1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_loading_utils.py @@ -0,0 +1,508 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import importlib +import os +import re +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import torch +from huggingface_hub import ( + model_info, +) +from packaging import version + +from ..utils import ( + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, + get_class_from_dynamic_module, + is_peft_available, + is_transformers_available, + logging, +) +from ..utils.torch_utils import is_compiled_module + + +if is_transformers_available(): + import transformers + from transformers import PreTrainedModel + from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME + from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME + from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME + + +INDEX_FILE = "diffusion_pytorch_model.bin" +CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" +DUMMY_MODULES_FOLDER = "diffusers.utils" +TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" +CONNECTED_PIPES_KEYS = ["prior"] + +logger = logging.get_logger(__name__) + +LOADABLE_CLASSES = { + "diffusers": { + "ModelMixin": ["save_pretrained", "from_pretrained"], + "SchedulerMixin": ["save_pretrained", "from_pretrained"], + "DiffusionPipeline": ["save_pretrained", "from_pretrained"], + "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], + }, + "transformers": { + "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], + "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], + "PreTrainedModel": ["save_pretrained", "from_pretrained"], + "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], + "ProcessorMixin": ["save_pretrained", "from_pretrained"], + "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], + }, + "onnxruntime.training": { + "ORTModule": ["save_pretrained", "from_pretrained"], + }, +} + +ALL_IMPORTABLE_CLASSES = {} +for library in LOADABLE_CLASSES: + ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) + + +def is_safetensors_compatible(filenames, variant=None, passed_components=None) -> bool: + """ + Checking for safetensors compatibility: + - By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch + files to know which safetensors files are needed. + - The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file. + + Converting default pytorch serialized filenames to safetensors serialized filenames: + - For models from the diffusers library, just replace the ".bin" extension with ".safetensors" + - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin" + extension is replaced with ".safetensors" + """ + pt_filenames = [] + + sf_filenames = set() + + passed_components = passed_components or [] + + for filename in filenames: + _, extension = os.path.splitext(filename) + + if len(filename.split("/")) == 2 and filename.split("/")[0] in passed_components: + continue + + if extension == ".bin": + pt_filenames.append(os.path.normpath(filename)) + elif extension == ".safetensors": + sf_filenames.add(os.path.normpath(filename)) + + for filename in pt_filenames: + # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extension = '.bam' + path, filename = os.path.split(filename) + filename, extension = os.path.splitext(filename) + + if filename.startswith("pytorch_model"): + filename = filename.replace("pytorch_model", "model") + else: + filename = filename + + expected_sf_filename = os.path.normpath(os.path.join(path, filename)) + expected_sf_filename = f"{expected_sf_filename}.safetensors" + if expected_sf_filename not in sf_filenames: + logger.warning(f"{expected_sf_filename} not found") + return False + + return True + + +def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: + weight_names = [ + WEIGHTS_NAME, + SAFETENSORS_WEIGHTS_NAME, + FLAX_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + ONNX_EXTERNAL_WEIGHTS_NAME, + ] + + if is_transformers_available(): + weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] + + # model_pytorch, diffusion_model_pytorch, ... + weight_prefixes = [w.split(".")[0] for w in weight_names] + # .bin, .safetensors, ... + weight_suffixs = [w.split(".")[-1] for w in weight_names] + # -00001-of-00002 + transformers_index_format = r"\d{5}-of-\d{5}" + + if variant is not None: + # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors` + variant_file_re = re.compile( + rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$" + ) + # `text_encoder/pytorch_model.bin.index.fp16.json` + variant_index_re = re.compile( + rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$" + ) + + # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors` + non_variant_file_re = re.compile( + rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$" + ) + # `text_encoder/pytorch_model.bin.index.json` + non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json") + + if variant is not None: + variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None} + variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None} + variant_filenames = variant_weights | variant_indexes + else: + variant_filenames = set() + + non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None} + non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None} + non_variant_filenames = non_variant_weights | non_variant_indexes + + # all variant filenames will be used by default + usable_filenames = set(variant_filenames) + + def convert_to_variant(filename): + if "index" in filename: + variant_filename = filename.replace("index", f"index.{variant}") + elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None: + variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}" + else: + variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}" + return variant_filename + + for f in non_variant_filenames: + variant_filename = convert_to_variant(f) + if variant_filename not in usable_filenames: + usable_filenames.add(f) + + return usable_filenames, variant_filenames + + +@validate_hf_hub_args +def warn_deprecated_model_variant(pretrained_model_name_or_path, token, variant, revision, model_filenames): + info = model_info( + pretrained_model_name_or_path, + token=token, + revision=None, + ) + filenames = {sibling.rfilename for sibling in info.siblings} + comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) + comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] + + if set(model_filenames).issubset(set(comp_model_filenames)): + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", + FutureWarning, + ) + else: + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", + FutureWarning, + ) + + +def _unwrap_model(model): + """Unwraps a model.""" + if is_compiled_module(model): + model = model._orig_mod + + if is_peft_available(): + from peft import PeftModel + + if isinstance(model, PeftModel): + model = model.base_model.model + + return model + + +def maybe_raise_or_warn( + library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module +): + """Simple helper method to raise or warn in case incorrect module has been passed""" + if not is_pipeline_module: + library = importlib.import_module(library_name) + class_obj = getattr(library, class_name) + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + expected_class_obj = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + expected_class_obj = class_candidate + + # Dynamo wraps the original model in a private class. + # I didn't find a public API to get the original class. + sub_model = passed_class_obj[name] + unwrapped_sub_model = _unwrap_model(sub_model) + model_cls = unwrapped_sub_model.__class__ + + if not issubclass(model_cls, expected_class_obj): + raise ValueError( + f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}" + ) + else: + logger.warning( + f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" + " has the correct type" + ) + + +def get_class_obj_and_candidates( + library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=None, cache_dir=None +): + """Simple helper method to retrieve class object of module as well as potential parent class objects""" + component_folder = os.path.join(cache_dir, component_name) + + if is_pipeline_module: + pipeline_module = getattr(pipelines, library_name) + + class_obj = getattr(pipeline_module, class_name) + class_candidates = {c: class_obj for c in importable_classes.keys()} + elif os.path.isfile(os.path.join(component_folder, library_name + ".py")): + # load custom component + class_obj = get_class_from_dynamic_module( + component_folder, module_file=library_name + ".py", class_name=class_name + ) + class_candidates = {c: class_obj for c in importable_classes.keys()} + else: + # else we just import it from the library. + library = importlib.import_module(library_name) + + class_obj = getattr(library, class_name) + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + return class_obj, class_candidates + + +def _get_pipeline_class( + class_obj, + config=None, + load_connected_pipeline=False, + custom_pipeline=None, + repo_id=None, + hub_revision=None, + class_name=None, + cache_dir=None, + revision=None, +): + if custom_pipeline is not None: + if custom_pipeline.endswith(".py"): + path = Path(custom_pipeline) + # decompose into folder & file + file_name = path.name + custom_pipeline = path.parent.absolute() + elif repo_id is not None: + file_name = f"{custom_pipeline}.py" + custom_pipeline = repo_id + else: + file_name = CUSTOM_PIPELINE_FILE_NAME + + if repo_id is not None and hub_revision is not None: + # if we load the pipeline code from the Hub + # make sure to overwrite the `revision` + revision = hub_revision + + return get_class_from_dynamic_module( + custom_pipeline, + module_file=file_name, + class_name=class_name, + cache_dir=cache_dir, + revision=revision, + ) + + if class_obj.__name__ != "DiffusionPipeline": + return class_obj + + diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0]) + class_name = class_name or config["_class_name"] + if not class_name: + raise ValueError( + "The class name could not be found in the configuration file. Please make sure to pass the correct `class_name`." + ) + + class_name = class_name[4:] if class_name.startswith("Flax") else class_name + + pipeline_cls = getattr(diffusers_module, class_name) + + if load_connected_pipeline: + from .auto_pipeline import _get_connected_pipeline + + connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) + if connected_pipeline_cls is not None: + logger.info( + f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`" + ) + else: + logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.") + + pipeline_cls = connected_pipeline_cls or pipeline_cls + + return pipeline_cls + + +def load_sub_model( + library_name: str, + class_name: str, + importable_classes: List[Any], + pipelines: Any, + is_pipeline_module: bool, + pipeline_class: Any, + torch_dtype: torch.dtype, + provider: Any, + sess_options: Any, + device_map: Optional[Union[Dict[str, torch.device], str]], + max_memory: Optional[Dict[Union[int, str], Union[int, str]]], + offload_folder: Optional[Union[str, os.PathLike]], + offload_state_dict: bool, + model_variants: Dict[str, str], + name: str, + from_flax: bool, + variant: str, + low_cpu_mem_usage: bool, + cached_folder: Union[str, os.PathLike], +): + """Helper method to load the module `name` from `library_name` and `class_name`""" + # retrieve class candidates + class_obj, class_candidates = get_class_obj_and_candidates( + library_name, + class_name, + importable_classes, + pipelines, + is_pipeline_module, + component_name=name, + cache_dir=cached_folder, + ) + + load_method_name = None + # retrieve load method name + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + load_method_name = importable_classes[class_name][1] + + # if load method name is None, then we have a dummy module -> raise Error + if load_method_name is None: + none_module = class_obj.__module__ + is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( + TRANSFORMERS_DUMMY_MODULES_FOLDER + ) + if is_dummy_path and "dummy" in none_module: + # call class_obj for nice error message of missing requirements + class_obj() + + raise ValueError( + f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" + f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." + ) + + load_method = getattr(class_obj, load_method_name) + + # add kwargs to loading method + diffusers_module = importlib.import_module(__name__.split(".")[0]) + loading_kwargs = {} + if issubclass(class_obj, torch.nn.Module): + loading_kwargs["torch_dtype"] = torch_dtype + if issubclass(class_obj, diffusers_module.OnnxRuntimeModel): + loading_kwargs["provider"] = provider + loading_kwargs["sess_options"] = sess_options + + is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) + + if is_transformers_available(): + transformers_version = version.parse(version.parse(transformers.__version__).base_version) + else: + transformers_version = "N/A" + + is_transformers_model = ( + is_transformers_available() + and issubclass(class_obj, PreTrainedModel) + and transformers_version >= version.parse("4.20.0") + ) + + # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers. + # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default. + # This makes sure that the weights won't be initialized which significantly speeds up loading. + if is_diffusers_model or is_transformers_model: + loading_kwargs["device_map"] = device_map + loading_kwargs["max_memory"] = max_memory + loading_kwargs["offload_folder"] = offload_folder + loading_kwargs["offload_state_dict"] = offload_state_dict + loading_kwargs["variant"] = model_variants.pop(name, None) + + if from_flax: + loading_kwargs["from_flax"] = True + + # the following can be deleted once the minimum required `transformers` version + # is higher than 4.27 + if ( + is_transformers_model + and loading_kwargs["variant"] is not None + and transformers_version < version.parse("4.27.0") + ): + raise ImportError( + f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0" + ) + elif is_transformers_model and loading_kwargs["variant"] is None: + loading_kwargs.pop("variant") + + # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage` + if not (from_flax and is_transformers_model): + loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage + else: + loading_kwargs["low_cpu_mem_usage"] = False + + # check if the module is in a subdirectory + if os.path.isdir(os.path.join(cached_folder, name)): + loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) + else: + # else load from the root directory + loaded_sub_model = load_method(cached_folder, **loading_kwargs) + + return loaded_sub_model + + +def _fetch_class_library_tuple(module): + # import it here to avoid circular import + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipelines = getattr(diffusers_module, "pipelines") + + # register the config from the original module, not the dynamo compiled one + not_compiled_module = _unwrap_model(module) + library = not_compiled_module.__module__.split(".")[0] + + # check if the module is a pipeline module + module_path_items = not_compiled_module.__module__.split(".") + pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None + + path = not_compiled_module.__module__.split(".") + is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) + + # if library is not in LOADABLE_CLASSES, then it is a custom module. + # Or if it's a pipeline module, then the module is inside the pipeline + # folder so we set the library to module name. + if is_pipeline_module: + library = pipeline_dir + elif library not in LOADABLE_CLASSES: + library = not_compiled_module.__module__ + + # retrieve class_name + class_name = not_compiled_module.__class__.__name__ + + return (library, class_name) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_utils.py new file mode 100644 index 000000000..341360d4f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pipeline_utils.py @@ -0,0 +1,1771 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import fnmatch +import importlib +import inspect +import os +import re +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import requests +import torch +from huggingface_hub import ( + ModelCard, + create_repo, + hf_hub_download, + model_info, + snapshot_download, +) +from huggingface_hub.utils import OfflineModeIsEnabled, validate_hf_hub_args +from packaging import version +from requests.exceptions import HTTPError +from tqdm.auto import tqdm + +from .. import __version__ +from ..configuration_utils import ConfigMixin +from ..models import AutoencoderKL +from ..models.attention_processor import FusedAttnProcessor2_0 +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT +from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME +from ..utils import ( + CONFIG_NAME, + DEPRECATED_REVISION_ARGS, + BaseOutput, + PushToHubMixin, + deprecate, + is_accelerate_available, + is_accelerate_version, + is_torch_npu_available, + is_torch_version, + logging, + numpy_to_pil, +) +from ..utils.hub_utils import load_or_create_model_card, populate_model_card +from ..utils.torch_utils import is_compiled_module + + +if is_torch_npu_available(): + import torch_npu # noqa: F401 + + +from .pipeline_loading_utils import ( + ALL_IMPORTABLE_CLASSES, + CONNECTED_PIPES_KEYS, + CUSTOM_PIPELINE_FILE_NAME, + LOADABLE_CLASSES, + _fetch_class_library_tuple, + _get_pipeline_class, + _unwrap_model, + is_safetensors_compatible, + load_sub_model, + maybe_raise_or_warn, + variant_compatible_siblings, + warn_deprecated_model_variant, +) + + +if is_accelerate_available(): + import accelerate + + +LIBRARIES = [] +for library in LOADABLE_CLASSES: + LIBRARIES.append(library) + +logger = logging.get_logger(__name__) + + +@dataclass +class ImagePipelineOutput(BaseOutput): + """ + Output class for image pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +@dataclass +class AudioPipelineOutput(BaseOutput): + """ + Output class for audio pipelines. + + Args: + audios (`np.ndarray`) + List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`. + """ + + audios: np.ndarray + + +class DiffusionPipeline(ConfigMixin, PushToHubMixin): + r""" + Base class for all pipelines. + + [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and + provides methods for loading, downloading and saving models. It also includes methods to: + + - move all PyTorch modules to the device of your choice + - enable/disable the progress bar for the denoising iteration + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the + pipeline to function (should be overridden by subclasses). + """ + + config_name = "model_index.json" + model_cpu_offload_seq = None + _optional_components = [] + _exclude_from_cpu_offload = [] + _load_connected_pipes = False + _is_onnx = False + + def register_modules(self, **kwargs): + for name, module in kwargs.items(): + # retrieve library + if module is None or isinstance(module, (tuple, list)) and module[0] is None: + register_dict = {name: (None, None)} + else: + library, class_name = _fetch_class_library_tuple(module) + register_dict = {name: (library, class_name)} + + # save model index config + self.register_to_config(**register_dict) + + # set models + setattr(self, name, module) + + def __setattr__(self, name: str, value: Any): + if name in self.__dict__ and hasattr(self.config, name): + # We need to overwrite the config if name exists in config + if isinstance(getattr(self.config, name), (tuple, list)): + if value is not None and self.config[name][0] is not None: + class_library_tuple = _fetch_class_library_tuple(value) + else: + class_library_tuple = (None, None) + + self.register_to_config(**{name: class_library_tuple}) + else: + self.register_to_config(**{name: value}) + + super().__setattr__(name, value) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + safe_serialization: bool = True, + variant: Optional[str] = None, + push_to_hub: bool = False, + **kwargs, + ): + """ + Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its + class implements both a save and loading method. The pipeline is easily reloaded using the + [`~DiffusionPipeline.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save a pipeline to. Will be created if it doesn't exist. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + model_index_dict = dict(self.config) + model_index_dict.pop("_class_name", None) + model_index_dict.pop("_diffusers_version", None) + model_index_dict.pop("_module", None) + model_index_dict.pop("_name_or_path", None) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + expected_modules, optional_kwargs = self._get_signature_keys(self) + + def is_saveable_module(name, value): + if name not in expected_modules: + return False + if name in self._optional_components and value[0] is None: + return False + return True + + model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)} + for pipeline_component_name in model_index_dict.keys(): + sub_model = getattr(self, pipeline_component_name) + model_cls = sub_model.__class__ + + # Dynamo wraps the original model in a private class. + # I didn't find a public API to get the original class. + if is_compiled_module(sub_model): + sub_model = _unwrap_model(sub_model) + model_cls = sub_model.__class__ + + save_method_name = None + # search for the model's base class in LOADABLE_CLASSES + for library_name, library_classes in LOADABLE_CLASSES.items(): + if library_name in sys.modules: + library = importlib.import_module(library_name) + else: + logger.info( + f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}" + ) + + for base_class, save_load_methods in library_classes.items(): + class_candidate = getattr(library, base_class, None) + if class_candidate is not None and issubclass(model_cls, class_candidate): + # if we found a suitable base class in LOADABLE_CLASSES then grab its save method + save_method_name = save_load_methods[0] + break + if save_method_name is not None: + break + + if save_method_name is None: + logger.warning( + f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved." + ) + # make sure that unsaveable components are not tried to be loaded afterward + self.register_to_config(**{pipeline_component_name: (None, None)}) + continue + + save_method = getattr(sub_model, save_method_name) + + # Call the save method with the argument safe_serialization only if it's supported + save_method_signature = inspect.signature(save_method) + save_method_accept_safe = "safe_serialization" in save_method_signature.parameters + save_method_accept_variant = "variant" in save_method_signature.parameters + + save_kwargs = {} + if save_method_accept_safe: + save_kwargs["safe_serialization"] = safe_serialization + if save_method_accept_variant: + save_kwargs["variant"] = variant + + save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs) + + # finally save the config + self.save_config(save_directory) + + if push_to_hub: + # Create a new empty model card and eventually tag it + model_card = load_or_create_model_card(repo_id, token=token, is_pipeline=True) + model_card = populate_model_card(model_card) + model_card.save(os.path.join(save_directory, "README.md")) + + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + def to(self, *args, **kwargs): + r""" + Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the + arguments of `self.to(*args, **kwargs).` + + + + If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. Otherwise, + the returned pipeline is a copy of self with the desired torch.dtype and torch.device. + + + + + Here are the ways to call `to`: + + - `to(dtype, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + - `to(device, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified + [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) + - `to(device=None, dtype=None, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the + specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) and + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + + Arguments: + dtype (`torch.dtype`, *optional*): + Returns a pipeline with the specified + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + device (`torch.Device`, *optional*): + Returns a pipeline with the specified + [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) + silence_dtype_warnings (`str`, *optional*, defaults to `False`): + Whether to omit warnings if the target `dtype` is not compatible with the target `device`. + + Returns: + [`DiffusionPipeline`]: The pipeline converted to specified `dtype` and/or `dtype`. + """ + dtype = kwargs.pop("dtype", None) + device = kwargs.pop("device", None) + silence_dtype_warnings = kwargs.pop("silence_dtype_warnings", False) + + dtype_arg = None + device_arg = None + if len(args) == 1: + if isinstance(args[0], torch.dtype): + dtype_arg = args[0] + else: + device_arg = torch.device(args[0]) if args[0] is not None else None + elif len(args) == 2: + if isinstance(args[0], torch.dtype): + raise ValueError( + "When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`." + ) + device_arg = torch.device(args[0]) if args[0] is not None else None + dtype_arg = args[1] + elif len(args) > 2: + raise ValueError("Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`") + + if dtype is not None and dtype_arg is not None: + raise ValueError( + "You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two." + ) + + dtype = dtype or dtype_arg + + if device is not None and device_arg is not None: + raise ValueError( + "You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two." + ) + + device = device or device_arg + + # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU. + def module_is_sequentially_offloaded(module): + if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"): + return False + + return hasattr(module, "_hf_hook") and not isinstance( + module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook) + ) + + def module_is_offloaded(module): + if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"): + return False + + return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload) + + # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer + pipeline_is_sequentially_offloaded = any( + module_is_sequentially_offloaded(module) for _, module in self.components.items() + ) + if pipeline_is_sequentially_offloaded and device and torch.device(device).type == "cuda": + raise ValueError( + "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading." + ) + + # Display a warning in this case (the operation succeeds but the benefits are lost) + pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items()) + if pipeline_is_offloaded and device and torch.device(device).type == "cuda": + logger.warning( + f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading." + ) + + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded + for module in modules: + is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit + + if is_loaded_in_8bit and dtype is not None: + logger.warning( + f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {dtype} is not yet supported. Module is still in 8bit precision." + ) + + if is_loaded_in_8bit and device is not None: + logger.warning( + f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {dtype} via `.to()` is not yet supported. Module is still on {module.device}." + ) + else: + module.to(device, dtype) + + if ( + module.dtype == torch.float16 + and str(device) in ["cpu"] + and not silence_dtype_warnings + and not is_offloaded + ): + logger.warning( + "Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It" + " is not recommended to move them to `cpu` as running them will fail. Please make" + " sure to use an accelerator to run the pipeline in inference, due to the lack of" + " support for`float16` operations on this device in PyTorch. Please, remove the" + " `torch_dtype=torch.float16` argument, or use another device for inference." + ) + return self + + @property + def device(self) -> torch.device: + r""" + Returns: + `torch.device`: The torch device on which the pipeline is located. + """ + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + return module.device + + return torch.device("cpu") + + @property + def dtype(self) -> torch.dtype: + r""" + Returns: + `torch.dtype`: The torch dtype on which the pipeline is located. + """ + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + return module.dtype + + return torch.float32 + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + custom_pipeline (`str`, *optional*): + + + + 🧪 This is an experimental feature and may change in the future. + + + + Can be either: + + - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom + pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines + the custom pipeline. + - A string, the *file name* of a community pipeline hosted on GitHub under + [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file + names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` + instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the + current main branch of GitHub. + - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory + must contain a file called `pipeline.py` that defines the custom pipeline. + + For more information on how to load and create custom pipelines, please have a look at [Loading and + Adding Custom + Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. Defaults to the latest stable 🤗 Diffusers version. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn’t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + use_onnx (`bool`, *optional*, defaults to `None`): + If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights + will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is + `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending + with `.onnx` and `.pb`. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import DiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + >>> # Download pipeline that requires an authorization token + >>> # For more information on access tokens, please refer to this section + >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) + >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + + >>> # Use a different scheduler + >>> from diffusers import LMSDiscreteScheduler + + >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.scheduler = scheduler + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_flax = kwargs.pop("from_flax", False) + torch_dtype = kwargs.pop("torch_dtype", None) + custom_pipeline = kwargs.pop("custom_pipeline", None) + custom_revision = kwargs.pop("custom_revision", None) + provider = kwargs.pop("provider", None) + sess_options = kwargs.pop("sess_options", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + variant = kwargs.pop("variant", None) + use_safetensors = kwargs.pop("use_safetensors", None) + use_onnx = kwargs.pop("use_onnx", None) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) + + if low_cpu_mem_usage and not is_accelerate_available(): + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if device_map is not None and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `device_map=None`." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + if low_cpu_mem_usage is False and device_map is not None: + raise ValueError( + f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and" + " dispatching. Please make sure to set `low_cpu_mem_usage=True`." + ) + + # 1. Download the checkpoints and configs + # use snapshot download here to get it working from from_pretrained + if not os.path.isdir(pretrained_model_name_or_path): + if pretrained_model_name_or_path.count("/") > 1: + raise ValueError( + f'The provided pretrained_model_name_or_path "{pretrained_model_name_or_path}"' + " is neither a valid local path nor a valid repo id. Please check the parameter." + ) + cached_folder = cls.download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + resume_download=resume_download, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + from_flax=from_flax, + use_safetensors=use_safetensors, + use_onnx=use_onnx, + custom_pipeline=custom_pipeline, + custom_revision=custom_revision, + variant=variant, + load_connected_pipeline=load_connected_pipeline, + **kwargs, + ) + else: + cached_folder = pretrained_model_name_or_path + + config_dict = cls.load_config(cached_folder) + + # pop out "_ignore_files" as it is only needed for download + config_dict.pop("_ignore_files", None) + + # 2. Define which model components should load variants + # We retrieve the information by matching whether variant + # model checkpoints exist in the subfolders + model_variants = {} + if variant is not None: + for folder in os.listdir(cached_folder): + folder_path = os.path.join(cached_folder, folder) + is_folder = os.path.isdir(folder_path) and folder in config_dict + variant_exists = is_folder and any( + p.split(".")[1].startswith(variant) for p in os.listdir(folder_path) + ) + if variant_exists: + model_variants[folder] = variant + + # 3. Load the pipeline class, if using custom module then load it from the hub + # if we load from explicit class, let's use it + custom_class_name = None + if os.path.isfile(os.path.join(cached_folder, f"{custom_pipeline}.py")): + custom_pipeline = os.path.join(cached_folder, f"{custom_pipeline}.py") + elif isinstance(config_dict["_class_name"], (list, tuple)) and os.path.isfile( + os.path.join(cached_folder, f"{config_dict['_class_name'][0]}.py") + ): + custom_pipeline = os.path.join(cached_folder, f"{config_dict['_class_name'][0]}.py") + custom_class_name = config_dict["_class_name"][1] + + pipeline_class = _get_pipeline_class( + cls, + config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + class_name=custom_class_name, + cache_dir=cache_dir, + revision=custom_revision, + ) + + # DEPRECATED: To be removed in 1.0.0 + if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( + version.parse(config_dict["_diffusers_version"]).base_version + ) <= version.parse("0.5.1"): + from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy + + pipeline_class = StableDiffusionInpaintPipelineLegacy + + deprecation_message = ( + "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" + f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" + " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" + " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" + f" checkpoint {pretrained_model_name_or_path} to the format of" + " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" + " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." + ) + deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) + + # 4. Define expected modules given pipeline signature + # and define non-None initialized modules (=`init_kwargs`) + + # some modules can be passed directly to the init + # in this case they are already instantiated in `kwargs` + # extract them here + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + + init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) + + # define init kwargs and make sure that optional component modules are filtered out + init_kwargs = { + k: init_dict.pop(k) + for k in optional_kwargs + if k in init_dict and k not in pipeline_class._optional_components + } + init_kwargs = {**init_kwargs, **passed_pipe_kwargs} + + # remove `null` components + def load_module(name, value): + if value[0] is None: + return False + if name in passed_class_obj and passed_class_obj[name] is None: + return False + return True + + init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} + + # Special case: safety_checker must be loaded separately when using `from_flax` + if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj: + raise NotImplementedError( + "The safety checker cannot be automatically loaded when loading weights `from_flax`." + " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker" + " separately if you need it." + ) + + # 5. Throw nice warnings / errors for fast accelerate loading + if len(unused_kwargs) > 0: + logger.warning( + f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." + ) + + # import it here to avoid circular import + from diffusers import pipelines + + # 6. Load each module in the pipeline + for name, (library_name, class_name) in logging.tqdm(init_dict.items(), desc="Loading pipeline components..."): + # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names + class_name = class_name[4:] if class_name.startswith("Flax") else class_name + + # 6.2 Define all importable classes + is_pipeline_module = hasattr(pipelines, library_name) + importable_classes = ALL_IMPORTABLE_CLASSES + loaded_sub_model = None + + # 6.3 Use passed sub model or load class_name from library_name + if name in passed_class_obj: + # if the model is in a pipeline module, then we load it from the pipeline + # check that passed_class_obj has correct parent class + maybe_raise_or_warn( + library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module + ) + + loaded_sub_model = passed_class_obj[name] + else: + # load sub model + loaded_sub_model = load_sub_model( + library_name=library_name, + class_name=class_name, + importable_classes=importable_classes, + pipelines=pipelines, + is_pipeline_module=is_pipeline_module, + pipeline_class=pipeline_class, + torch_dtype=torch_dtype, + provider=provider, + sess_options=sess_options, + device_map=device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + model_variants=model_variants, + name=name, + from_flax=from_flax, + variant=variant, + low_cpu_mem_usage=low_cpu_mem_usage, + cached_folder=cached_folder, + ) + logger.info( + f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}." + ) + + init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) + + if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")): + modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) + connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} + load_kwargs = { + "cache_dir": cache_dir, + "resume_download": resume_download, + "force_download": force_download, + "proxies": proxies, + "local_files_only": local_files_only, + "token": token, + "revision": revision, + "torch_dtype": torch_dtype, + "custom_pipeline": custom_pipeline, + "custom_revision": custom_revision, + "provider": provider, + "sess_options": sess_options, + "device_map": device_map, + "max_memory": max_memory, + "offload_folder": offload_folder, + "offload_state_dict": offload_state_dict, + "low_cpu_mem_usage": low_cpu_mem_usage, + "variant": variant, + "use_safetensors": use_safetensors, + } + + def get_connected_passed_kwargs(prefix): + connected_passed_class_obj = { + k.replace(f"{prefix}_", ""): w for k, w in passed_class_obj.items() if k.split("_")[0] == prefix + } + connected_passed_pipe_kwargs = { + k.replace(f"{prefix}_", ""): w for k, w in passed_pipe_kwargs.items() if k.split("_")[0] == prefix + } + + connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs} + return connected_passed_kwargs + + connected_pipes = { + prefix: DiffusionPipeline.from_pretrained( + repo_id, **load_kwargs.copy(), **get_connected_passed_kwargs(prefix) + ) + for prefix, repo_id in connected_pipes.items() + if repo_id is not None + } + + for prefix, connected_pipe in connected_pipes.items(): + # add connected pipes to `init_kwargs` with _, e.g. "prior_text_encoder" + init_kwargs.update( + {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()} + ) + + # 7. Potentially add passed objects if expected + missing_modules = set(expected_modules) - set(init_kwargs.keys()) + passed_modules = list(passed_class_obj.keys()) + optional_modules = pipeline_class._optional_components + if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): + for module in missing_modules: + init_kwargs[module] = passed_class_obj.get(module, None) + elif len(missing_modules) > 0: + passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." + ) + + # 8. Instantiate the pipeline + model = pipeline_class(**init_kwargs) + + # 9. Save where the model was instantiated from + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + return model + + @property + def name_or_path(self) -> str: + return getattr(self.config, "_name_or_path", None) + + @property + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from + Accelerate's module hooks. + """ + for name, model in self.components.items(): + if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload: + continue + + if not hasattr(model, "_hf_hook"): + return self.device + for module in model.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + + Arguments: + gpu_id (`int`, *optional*): + The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. + device (`torch.Device` or `str`, *optional*, defaults to "cuda"): + The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will + default to "cuda". + """ + if self.model_cpu_offload_seq is None: + raise ValueError( + "Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set." + ) + + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + torch_device = torch.device(device) + device_index = torch_device.index + + if gpu_id is not None and device_index is not None: + raise ValueError( + f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" + f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" + ) + + # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 + self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) + + device_type = torch_device.type + device = torch.device(f"{device_type}:{self._offload_gpu_id}") + self._offload_device = device + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + device_mod = getattr(torch, self.device.type, None) + if hasattr(device_mod, "empty_cache") and device_mod.is_available(): + device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)} + + self._all_hooks = [] + hook = None + for model_str in self.model_cpu_offload_seq.split("->"): + model = all_model_components.pop(model_str, None) + if not isinstance(model, torch.nn.Module): + continue + + _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook) + self._all_hooks.append(hook) + + # CPU offload models that are not in the seq chain unless they are explicitly excluded + # these models will stay on CPU until maybe_free_model_hooks is called + # some models cannot be in the seq chain because they are iteratively called, such as controlnet + for name, model in all_model_components.items(): + if not isinstance(model, torch.nn.Module): + continue + + if name in self._exclude_from_cpu_offload: + model.to(device) + else: + _, hook = cpu_offload_with_hook(model, device) + self._all_hooks.append(hook) + + def maybe_free_model_hooks(self): + r""" + Function that offloads all components, removes all model hooks that were added when using + `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function + is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it + functions correctly when applying enable_model_cpu_offload. + """ + if not hasattr(self, "_all_hooks") or len(self._all_hooks) == 0: + # `enable_model_cpu_offload` has not be called, so silently do nothing + return + + for hook in self._all_hooks: + # offload model and remove hook from model + hook.offload() + hook.remove() + + # make sure the model is in the same state as before calling it + self.enable_model_cpu_offload(device=getattr(self, "_offload_device", "cuda")) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state + dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU + and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward` + method called. Offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + + Arguments: + gpu_id (`int`, *optional*): + The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. + device (`torch.Device` or `str`, *optional*, defaults to "cuda"): + The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will + default to "cuda". + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): + from accelerate import cpu_offload + else: + raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") + + torch_device = torch.device(device) + device_index = torch_device.index + + if gpu_id is not None and device_index is not None: + raise ValueError( + f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" + f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" + ) + + # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 + self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) + + device_type = torch_device.type + device = torch.device(f"{device_type}:{self._offload_gpu_id}") + self._offload_device = device + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + device_mod = getattr(torch, self.device.type, None) + if hasattr(device_mod, "empty_cache") and device_mod.is_available(): + device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + for name, model in self.components.items(): + if not isinstance(model, torch.nn.Module): + continue + + if name in self._exclude_from_cpu_offload: + model.to(device) + else: + # make sure to offload buffers if not all high level weights + # are of type nn.Module + offload_buffers = len(model._parameters) > 0 + cpu_offload(model, device, offload_buffers=offload_buffers) + + @classmethod + @validate_hf_hub_args + def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: + r""" + Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights. + + Parameters: + pretrained_model_name (`str` or `os.PathLike`, *optional*): + A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + custom_pipeline (`str`, *optional*): + Can be either: + + - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained + pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines + the custom pipeline. + + - A string, the *file name* of a community pipeline hosted on GitHub under + [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file + names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` + instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the + current `main` branch of GitHub. + + - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory + must contain a file called `pipeline.py` that defines the custom pipeline. + + + + 🧪 This is an experimental feature and may change in the future. + + + + For more information on how to load and create custom pipelines, take a look at [How to contribute a + community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline). + + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + use_onnx (`bool`, *optional*, defaults to `False`): + If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights + will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is + `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending + with `.onnx` and `.pb`. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom pipelines and components defined on the Hub in their own files. This + option should only be set to `True` for repositories you trust and in which you have read the code, as + it will execute code present on the Hub on your local machine. + + Returns: + `os.PathLike`: + A path to the downloaded pipeline. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. + + + + """ + cache_dir = kwargs.pop("cache_dir", None) + resume_download = kwargs.pop("resume_download", False) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_flax = kwargs.pop("from_flax", False) + custom_pipeline = kwargs.pop("custom_pipeline", None) + custom_revision = kwargs.pop("custom_revision", None) + variant = kwargs.pop("variant", None) + use_safetensors = kwargs.pop("use_safetensors", None) + use_onnx = kwargs.pop("use_onnx", None) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) + trust_remote_code = kwargs.pop("trust_remote_code", False) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + allow_patterns = None + ignore_patterns = None + + model_info_call_error: Optional[Exception] = None + if not local_files_only: + try: + info = model_info(pretrained_model_name, token=token, revision=revision) + except (HTTPError, OfflineModeIsEnabled, requests.ConnectionError) as e: + logger.warning(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") + local_files_only = True + model_info_call_error = e # save error to reraise it if model is not cached locally + + if not local_files_only: + config_file = hf_hub_download( + pretrained_model_name, + cls.config_name, + cache_dir=cache_dir, + revision=revision, + proxies=proxies, + force_download=force_download, + resume_download=resume_download, + token=token, + ) + + config_dict = cls._dict_from_json_file(config_file) + ignore_filenames = config_dict.pop("_ignore_files", []) + + # retrieve all folder_names that contain relevant files + folder_names = [k for k, v in config_dict.items() if isinstance(v, list) and k != "_class_name"] + + filenames = {sibling.rfilename for sibling in info.siblings} + model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant) + + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipelines = getattr(diffusers_module, "pipelines") + + # optionally create a custom component <> custom file mapping + custom_components = {} + for component in folder_names: + module_candidate = config_dict[component][0] + + if module_candidate is None or not isinstance(module_candidate, str): + continue + + # We compute candidate file path on the Hub. Do not use `os.path.join`. + candidate_file = f"{component}/{module_candidate}.py" + + if candidate_file in filenames: + custom_components[component] = module_candidate + elif module_candidate not in LOADABLE_CLASSES and not hasattr(pipelines, module_candidate): + raise ValueError( + f"{candidate_file} as defined in `model_index.json` does not exist in {pretrained_model_name} and is not a module in 'diffusers/pipelines'." + ) + + if len(variant_filenames) == 0 and variant is not None: + deprecation_message = ( + f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available." + f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`" + "if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variant" + "modeling files is deprecated." + ) + deprecate("no variant default", "0.24.0", deprecation_message, standard_warn=False) + + # remove ignored filenames + model_filenames = set(model_filenames) - set(ignore_filenames) + variant_filenames = set(variant_filenames) - set(ignore_filenames) + + # if the whole pipeline is cached we don't have to ping the Hub + if revision in DEPRECATED_REVISION_ARGS and version.parse( + version.parse(__version__).base_version + ) >= version.parse("0.22.0"): + warn_deprecated_model_variant(pretrained_model_name, token, variant, revision, model_filenames) + + model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names} + + custom_class_name = None + if custom_pipeline is None and isinstance(config_dict["_class_name"], (list, tuple)): + custom_pipeline = config_dict["_class_name"][0] + custom_class_name = config_dict["_class_name"][1] + + # all filenames compatible with variant will be added + allow_patterns = list(model_filenames) + + # allow all patterns from non-model folders + # this enables downloading schedulers, tokenizers, ... + allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names] + # add custom component files + allow_patterns += [f"{k}/{f}.py" for k, f in custom_components.items()] + # add custom pipeline file + allow_patterns += [f"{custom_pipeline}.py"] if f"{custom_pipeline}.py" in filenames else [] + # also allow downloading config.json files with the model + allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names] + + allow_patterns += [ + SCHEDULER_CONFIG_NAME, + CONFIG_NAME, + cls.config_name, + CUSTOM_PIPELINE_FILE_NAME, + ] + + load_pipe_from_hub = custom_pipeline is not None and f"{custom_pipeline}.py" in filenames + load_components_from_hub = len(custom_components) > 0 + + if load_pipe_from_hub and not trust_remote_code: + raise ValueError( + f"The repository for {pretrained_model_name} contains custom code in {custom_pipeline}.py which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{pretrained_model_name}/blob/main/{custom_pipeline}.py.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + + if load_components_from_hub and not trust_remote_code: + raise ValueError( + f"The repository for {pretrained_model_name} contains custom code in {'.py, '.join([os.path.join(k, v) for k,v in custom_components.items()])} which must be executed to correctly " + f"load the model. You can inspect the repository content at {', '.join([f'https://hf.co/{pretrained_model_name}/{k}/{v}.py' for k,v in custom_components.items()])}.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + + # retrieve passed components that should not be downloaded + pipeline_class = _get_pipeline_class( + cls, + config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + repo_id=pretrained_model_name if load_pipe_from_hub else None, + hub_revision=revision, + class_name=custom_class_name, + cache_dir=cache_dir, + revision=custom_revision, + ) + expected_components, _ = cls._get_signature_keys(pipeline_class) + passed_components = [k for k in expected_components if k in kwargs] + + if ( + use_safetensors + and not allow_pickle + and not is_safetensors_compatible( + model_filenames, variant=variant, passed_components=passed_components + ) + ): + raise EnvironmentError( + f"Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})" + ) + if from_flax: + ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"] + elif use_safetensors and is_safetensors_compatible( + model_filenames, variant=variant, passed_components=passed_components + ): + ignore_patterns = ["*.bin", "*.msgpack"] + + use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx + if not use_onnx: + ignore_patterns += ["*.onnx", "*.pb"] + + safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")} + safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")} + if ( + len(safetensors_variant_filenames) > 0 + and safetensors_model_filenames != safetensors_variant_filenames + ): + logger.warning( + f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." + ) + else: + ignore_patterns = ["*.safetensors", "*.msgpack"] + + use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx + if not use_onnx: + ignore_patterns += ["*.onnx", "*.pb"] + + bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} + bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} + if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: + logger.warning( + f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." + ) + + # Don't download any objects that are passed + allow_patterns = [ + p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components) + ] + + if pipeline_class._load_connected_pipes: + allow_patterns.append("README.md") + + # Don't download index files of forbidden patterns either + ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns] + + re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns] + re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns] + + expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)] + expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] + + snapshot_folder = Path(config_file).parent + pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) + + if pipeline_is_cached and not force_download: + # if the pipeline is cached, we can directly return it + # else call snapshot_download + return snapshot_folder + + user_agent = {"pipeline_class": cls.__name__} + if custom_pipeline is not None and not custom_pipeline.endswith(".py"): + user_agent["custom_pipeline"] = custom_pipeline + + # download all allow_patterns - ignore_patterns + try: + cached_folder = snapshot_download( + pretrained_model_name, + cache_dir=cache_dir, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + + # retrieve pipeline class from local file + cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None) + cls_name = cls_name[4:] if isinstance(cls_name, str) and cls_name.startswith("Flax") else cls_name + + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipeline_class = getattr(diffusers_module, cls_name, None) if isinstance(cls_name, str) else None + + if pipeline_class is not None and pipeline_class._load_connected_pipes: + modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) + connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], []) + for connected_pipe_repo_id in connected_pipes: + download_kwargs = { + "cache_dir": cache_dir, + "resume_download": resume_download, + "force_download": force_download, + "proxies": proxies, + "local_files_only": local_files_only, + "token": token, + "variant": variant, + "use_safetensors": use_safetensors, + } + DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs) + + return cached_folder + + except FileNotFoundError: + # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache. + # This can happen in two cases: + # 1. If the user passed `local_files_only=True` => we raise the error directly + # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error + if model_info_call_error is None: + # 1. user passed `local_files_only=True` + raise + else: + # 2. we forced `local_files_only=True` when `model_info` failed + raise EnvironmentError( + f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred" + " while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace" + " above." + ) from model_info_call_error + + @classmethod + def _get_signature_keys(cls, obj): + parameters = inspect.signature(obj.__init__).parameters + required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} + optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) + expected_modules = set(required_parameters.keys()) - {"self"} + + optional_names = list(optional_parameters) + for name in optional_names: + if name in cls._optional_components: + expected_modules.add(name) + optional_parameters.remove(name) + + return expected_modules, optional_parameters + + @property + def components(self) -> Dict[str, Any]: + r""" + The `self.components` property can be useful to run different pipelines with the same weights and + configurations without reallocating additional memory. + + Returns (`dict`): + A dictionary containing all the modules needed to initialize the pipeline. + + Examples: + + ```py + >>> from diffusers import ( + ... StableDiffusionPipeline, + ... StableDiffusionImg2ImgPipeline, + ... StableDiffusionInpaintPipeline, + ... ) + + >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) + >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) + ``` + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + components = { + k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters + } + + if set(components.keys()) != expected_modules: + raise ValueError( + f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" + f" {expected_modules} to be defined, but {components.keys()} are defined." + ) + + return components + + @staticmethod + def numpy_to_pil(images): + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + return numpy_to_pil(images) + + def progress_bar(self, iterable=None, total=None): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + if iterable is not None: + return tqdm(iterable, **self._progress_bar_config) + elif total is not None: + return tqdm(total=total, **self._progress_bar_config) + else: + raise ValueError("Either `total` or `iterable` has to be defined.") + + def set_progress_bar_config(self, **kwargs): + self._progress_bar_config = kwargs + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + r""" + Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this + option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed + up during training is not guaranteed. + + + + ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes + precedent. + + + + Parameters: + attention_op (`Callable`, *optional*): + Override the default `None` operator for use as `op` argument to the + [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) + function of xFormers. + + Examples: + + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp + + >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) + >>> # Workaround for not accepting attention shape using VAE for Flash Attention + >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None) + ``` + """ + self.set_use_memory_efficient_attention_xformers(True, attention_op) + + def disable_xformers_memory_efficient_attention(self): + r""" + Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). + """ + self.set_use_memory_efficient_attention_xformers(False) + + def set_use_memory_efficient_attention_xformers( + self, valid: bool, attention_op: Optional[Callable] = None + ) -> None: + # Recursively walk through all the children. + # Any children which exposes the set_use_memory_efficient_attention_xformers method + # gets the message + def fn_recursive_set_mem_eff(module: torch.nn.Module): + if hasattr(module, "set_use_memory_efficient_attention_xformers"): + module.set_use_memory_efficient_attention_xformers(valid, attention_op) + + for child in module.children(): + fn_recursive_set_mem_eff(child) + + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + fn_recursive_set_mem_eff(module) + + def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): + r""" + Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor + in slices to compute attention in several steps. For more than one attention head, the computation is performed + sequentially over each head. This is useful to save some memory in exchange for a small speed decrease. + + + + ⚠️ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch + 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable + this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs! + + + + Args: + slice_size (`str` or `int`, *optional*, defaults to `"auto"`): + When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If + `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + + Examples: + + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", + ... torch_dtype=torch.float16, + ... use_safetensors=True, + ... ) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> pipe.enable_attention_slicing() + >>> image = pipe(prompt).images[0] + ``` + """ + self.set_attention_slice(slice_size) + + def disable_attention_slicing(self): + r""" + Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is + computed in one step. + """ + # set slice_size = `None` to disable `attention slicing` + self.enable_attention_slicing(None) + + def set_attention_slice(self, slice_size: Optional[int]): + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")] + + for module in modules: + module.set_attention_slice(slice_size) + + +class StableDiffusionMixin: + r""" + Helper for DiffusionPipeline with vae and unet.(mainly for LDM such as stable diffusion) + """ + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): + r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stages where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values + that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + if not hasattr(self, "unet"): + raise ValueError("The pipeline must have `unet` for using FreeU.") + self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) + + def disable_freeu(self): + """Disables the FreeU mechanism if enabled.""" + self.unet.disable_freeu() + + def fuse_qkv_projections(self, unet: bool = True, vae: bool = True): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + + Args: + unet (`bool`, defaults to `True`): To apply fusion on the UNet. + vae (`bool`, defaults to `True`): To apply fusion on the VAE. + """ + self.fusing_unet = False + self.fusing_vae = False + + if unet: + self.fusing_unet = True + self.unet.fuse_qkv_projections() + self.unet.set_attn_processor(FusedAttnProcessor2_0()) + + if vae: + if not isinstance(self.vae, AutoencoderKL): + raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.") + + self.fusing_vae = True + self.vae.fuse_qkv_projections() + self.vae.set_attn_processor(FusedAttnProcessor2_0()) + + def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True): + """Disable QKV projection fusion if enabled. + + + + This API is 🧪 experimental. + + + + Args: + unet (`bool`, defaults to `True`): To apply fusion on the UNet. + vae (`bool`, defaults to `True`): To apply fusion on the VAE. + + """ + if unet: + if not self.fusing_unet: + logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.") + else: + self.unet.unfuse_qkv_projections() + self.fusing_unet = False + + if vae: + if not self.fusing_vae: + logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.") + else: + self.vae.unfuse_qkv_projections() + self.fusing_vae = False diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/__init__.py new file mode 100644 index 000000000..0bfa28fcd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pixart_alpha"] = ["PixArtAlphaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_pixart_alpha import PixArtAlphaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py new file mode 100644 index 000000000..e7213a38b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -0,0 +1,979 @@ +# Copyright 2024 PixArt-Alpha Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import re +import urllib.parse as ul +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from transformers import T5EncoderModel, T5Tokenizer + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, Transformer2DModel +from ...schedulers import DPMSolverMultistepScheduler +from ...utils import ( + BACKENDS_MAPPING, + deprecate, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import PixArtAlphaPipeline + + >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-XL-2-512x512" too. + >>> pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) + >>> # Enable memory optimizations. + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A small cactus with a happy face in the Sahara desert." + >>> image = pipe(prompt).images[0] + ``` +""" + +ASPECT_RATIO_1024_BIN = { + "0.25": [512.0, 2048.0], + "0.28": [512.0, 1856.0], + "0.32": [576.0, 1792.0], + "0.33": [576.0, 1728.0], + "0.35": [576.0, 1664.0], + "0.4": [640.0, 1600.0], + "0.42": [640.0, 1536.0], + "0.48": [704.0, 1472.0], + "0.5": [704.0, 1408.0], + "0.52": [704.0, 1344.0], + "0.57": [768.0, 1344.0], + "0.6": [768.0, 1280.0], + "0.68": [832.0, 1216.0], + "0.72": [832.0, 1152.0], + "0.78": [896.0, 1152.0], + "0.82": [896.0, 1088.0], + "0.88": [960.0, 1088.0], + "0.94": [960.0, 1024.0], + "1.0": [1024.0, 1024.0], + "1.07": [1024.0, 960.0], + "1.13": [1088.0, 960.0], + "1.21": [1088.0, 896.0], + "1.29": [1152.0, 896.0], + "1.38": [1152.0, 832.0], + "1.46": [1216.0, 832.0], + "1.67": [1280.0, 768.0], + "1.75": [1344.0, 768.0], + "2.0": [1408.0, 704.0], + "2.09": [1472.0, 704.0], + "2.4": [1536.0, 640.0], + "2.5": [1600.0, 640.0], + "3.0": [1728.0, 576.0], + "4.0": [2048.0, 512.0], +} + +ASPECT_RATIO_512_BIN = { + "0.25": [256.0, 1024.0], + "0.28": [256.0, 928.0], + "0.32": [288.0, 896.0], + "0.33": [288.0, 864.0], + "0.35": [288.0, 832.0], + "0.4": [320.0, 800.0], + "0.42": [320.0, 768.0], + "0.48": [352.0, 736.0], + "0.5": [352.0, 704.0], + "0.52": [352.0, 672.0], + "0.57": [384.0, 672.0], + "0.6": [384.0, 640.0], + "0.68": [416.0, 608.0], + "0.72": [416.0, 576.0], + "0.78": [448.0, 576.0], + "0.82": [448.0, 544.0], + "0.88": [480.0, 544.0], + "0.94": [480.0, 512.0], + "1.0": [512.0, 512.0], + "1.07": [512.0, 480.0], + "1.13": [544.0, 480.0], + "1.21": [544.0, 448.0], + "1.29": [576.0, 448.0], + "1.38": [576.0, 416.0], + "1.46": [608.0, 416.0], + "1.67": [640.0, 384.0], + "1.75": [672.0, 384.0], + "2.0": [704.0, 352.0], + "2.09": [736.0, 352.0], + "2.4": [768.0, 320.0], + "2.5": [800.0, 320.0], + "3.0": [864.0, 288.0], + "4.0": [1024.0, 256.0], +} + +ASPECT_RATIO_256_BIN = { + "0.25": [128.0, 512.0], + "0.28": [128.0, 464.0], + "0.32": [144.0, 448.0], + "0.33": [144.0, 432.0], + "0.35": [144.0, 416.0], + "0.4": [160.0, 400.0], + "0.42": [160.0, 384.0], + "0.48": [176.0, 368.0], + "0.5": [176.0, 352.0], + "0.52": [176.0, 336.0], + "0.57": [192.0, 336.0], + "0.6": [192.0, 320.0], + "0.68": [208.0, 304.0], + "0.72": [208.0, 288.0], + "0.78": [224.0, 288.0], + "0.82": [224.0, 272.0], + "0.88": [240.0, 272.0], + "0.94": [240.0, 256.0], + "1.0": [256.0, 256.0], + "1.07": [256.0, 240.0], + "1.13": [272.0, 240.0], + "1.21": [272.0, 224.0], + "1.29": [288.0, 224.0], + "1.38": [288.0, 208.0], + "1.46": [304.0, 208.0], + "1.67": [320.0, 192.0], + "1.75": [336.0, 192.0], + "2.0": [352.0, 176.0], + "2.09": [368.0, 176.0], + "2.4": [384.0, 160.0], + "2.5": [400.0, 160.0], + "3.0": [432.0, 144.0], + "4.0": [512.0, 128.0], +} + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class PixArtAlphaPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using PixArt-Alpha. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. PixArt-Alpha uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`Transformer2DModel`]): + A text conditioned `Transformer2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder"] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: Transformer2DModel, + scheduler: DPMSolverMultistepScheduler, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/utils.py + def mask_text_embeddings(self, emb, mask): + if emb.shape[0] == 1: + keep_index = mask.sum().item() + return emb[:, :, :keep_index, :], keep_index + else: + masked_feature = emb * mask[:, None, :, None] + return masked_feature, emb.shape[2] + + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_attention_mask: Optional[torch.FloatTensor] = None, + negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + max_sequence_length: int = 120, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + PixArt-Alpha, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" + string. + clean_caption (`bool`, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + max_sequence_length (`int`, defaults to 120): Maximum sequence length to use for the prompt. + """ + + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # See Section 3.1. of the paper. + max_length = max_sequence_length + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask + prompt_attention_mask = prompt_attention_mask.to(device) + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = [negative_prompt] * batch_size + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + negative_prompt_attention_mask = uncond_input.attention_mask + negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @staticmethod + def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: + """Returns binned height and width.""" + ar = float(height / width) + closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) + default_hw = ratios[closest_ratio] + return int(default_hw[0]), int(default_hw[1]) + + @staticmethod + def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor: + orig_height, orig_width = samples.shape[2], samples.shape[3] + + # Check if resizing is needed + if orig_height != new_height or orig_width != new_width: + ratio = max(new_height / orig_height, new_width / orig_width) + resized_width = int(orig_width * ratio) + resized_height = int(orig_height * ratio) + + # Resize + samples = F.interpolate( + samples, size=(resized_height, resized_width), mode="bilinear", align_corners=False + ) + + # Center Crop + start_x = (resized_width - new_width) // 2 + end_x = start_x + new_width + start_y = (resized_height - new_height) // 2 + end_y = start_y + new_height + samples = samples[:, :, start_y:end_y, start_x:end_x] + + return samples + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 20, + timesteps: List[int] = None, + guidance_scale: float = 4.5, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_attention_mask: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + use_resolution_binning: bool = True, + max_sequence_length: int = 120, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + use_resolution_binning (`bool` defaults to `True`): + If set to `True`, the requested height and width are first mapped to the closest resolutions using + `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to + the requested resolution. Useful for generating non-square images. + max_sequence_length (`int` defaults to 120): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + if use_resolution_binning: + if self.transformer.config.sample_size == 128: + aspect_ratio_bin = ASPECT_RATIO_1024_BIN + elif self.transformer.config.sample_size == 64: + aspect_ratio_bin = ASPECT_RATIO_512_BIN + elif self.transformer.config.sample_size == 32: + aspect_ratio_bin = ASPECT_RATIO_256_BIN + else: + raise ValueError("Invalid sample size") + orig_height, orig_width = height, width + height, width = self.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) + + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + clean_caption=clean_caption, + max_sequence_length=max_sequence_length, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Prepare micro-conditions. + added_cond_kwargs = {"resolution": None, "aspect_ratio": None} + if self.transformer.config.sample_size == 128: + resolution = torch.tensor([height, width]).repeat(batch_size * num_images_per_prompt, 1) + aspect_ratio = torch.tensor([float(height / width)]).repeat(batch_size * num_images_per_prompt, 1) + resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) + aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) + + if do_classifier_free_guidance: + resolution = torch.cat([resolution, resolution], dim=0) + aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) + + added_cond_kwargs = {"resolution": resolution, "aspect_ratio": aspect_ratio} + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=prompt_attention_mask, + timestep=current_timestep, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + noise_pred = noise_pred.chunk(2, dim=1)[0] + else: + noise_pred = noise_pred + + # compute previous image: x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + if use_resolution_binning: + image = self.resize_and_crop_tensor(image, orig_width, orig_height) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py new file mode 100644 index 000000000..70f5b1a54 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_output"] = ["SemanticStableDiffusionPipelineOutput"] + _import_structure["pipeline_semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py new file mode 100644 index 000000000..349912993 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class SemanticStableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py new file mode 100644 index 000000000..96873423f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -0,0 +1,718 @@ +import inspect +from itertools import repeat +from typing import Callable, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import SemanticStableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with latent editing. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass + documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular + device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`Q16SafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeddings: Optional[torch.Tensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 10, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + edit_momentum_scale: Optional[float] = 0.1, + edit_mom_beta: Optional[float] = 0.4, + edit_weights: Optional[List[float]] = None, + sem_guidance: Optional[List[torch.Tensor]] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to use for semantic guidance. Semantic guidance is disabled by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via + `reverse_editing_direction`. + editing_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-computed embeddings to use for semantic guidance. Guidance direction of embedding should be + specified via `reverse_editing_direction`. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for semantic guidance. If provided as a list, values should correspond to + `editing_prompt`. + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which semantic guidance is not applied. Momentum is + calculated for those steps and applied once all warmup periods are over. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which semantic guidance is longer applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Threshold of semantic guidance. + edit_momentum_scale (`float`, *optional*, defaults to 0.1): + Scale of the momentum to be added to the semantic guidance at each diffusion step. If set to 0.0, + momentum is disabled. Momentum is already built up during warmup (for diffusion steps smaller than + `sld_warmup_steps`). Momentum is only added to latent guidance once all warmup periods are finished. + edit_mom_beta (`float`, *optional*, defaults to 0.4): + Defines how semantic guidance momentum builds up. `edit_mom_beta` indicates how much of the previous + momentum is kept. Momentum is already built up during warmup (for diffusion steps smaller than + `edit_warmup_steps`). + edit_weights (`List[float]`, *optional*, defaults to `None`): + Indicates how much each individual concept should influence the overall guidance. If no weights are + provided all concepts are applied equally. + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + + Examples: + + ```py + >>> import torch + >>> from diffusers import SemanticStableDiffusionPipeline + + >>> pipe = SemanticStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> out = pipe( + ... prompt="a photo of the face of a woman", + ... num_images_per_prompt=1, + ... guidance_scale=7, + ... editing_prompt=[ + ... "smiling, smile", # Concepts to apply + ... "glasses, wearing glasses", + ... "curls, wavy hair, curly hair", + ... "beard, full beard, mustache", + ... ], + ... reverse_editing_direction=[ + ... False, + ... False, + ... False, + ... False, + ... ], # Direction of guidance i.e. increase all concepts + ... edit_warmup_steps=[10, 10, 10, 10], # Warmup period for each concept + ... edit_guidance_scale=[4, 5, 5, 5.4], # Guidance scale for each concept + ... edit_threshold=[ + ... 0.99, + ... 0.975, + ... 0.925, + ... 0.96, + ... ], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions + ... edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance + ... edit_mom_beta=0.6, # Momentum beta + ... edit_weights=[1, 1, 1, 1, 1], # Weights of the individual concepts against each other + ... ) + >>> image = out.images[0] + ``` + + Returns: + [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, + [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images and the second element + is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" + (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeddings is not None: + enable_edit_guidance = True + enabled_editing_prompts = editing_prompt_embeddings.shape[0] + else: + enabled_editing_prompts = 0 + enable_edit_guidance = False + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = text_embeddings.shape + text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) + text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if enable_edit_guidance: + # get safety text embeddings + if editing_prompt_embeddings is None: + edit_concepts_input = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + + edit_concepts_input_ids = edit_concepts_input.input_ids + + if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode( + edit_concepts_input_ids[:, self.tokenizer.model_max_length :] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + edit_concepts_input_ids = edit_concepts_input_ids[:, : self.tokenizer.model_max_length] + edit_concepts = self.text_encoder(edit_concepts_input_ids.to(self.device))[0] + else: + edit_concepts = editing_prompt_embeddings.to(self.device).repeat(batch_size, 1, 1) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed_edit, seq_len_edit, _ = edit_concepts.shape + edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1) + edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + # get unconditional embeddings for classifier free guidance + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_embeddings.shape[1] + uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) + uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if enable_edit_guidance: + text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts]) + else: + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + # get the initial random noise unless the user supplied it + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + self.device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # Initialize edit_momentum to None + edit_momentum = None + + self.uncond_estimates = None + self.text_estimates = None + self.edit_estimates = None + self.sem_guidance = None + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + noise_pred_edit_concepts = noise_pred_out[2:] + + # default text guidance + noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond) + # noise_guidance = (noise_pred_text - noise_pred_edit_concepts[0]) + + if self.uncond_estimates is None: + self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape)) + self.uncond_estimates[i] = noise_pred_uncond.detach().cpu() + + if self.text_estimates is None: + self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) + self.text_estimates[i] = noise_pred_text.detach().cpu() + + if self.edit_estimates is None and enable_edit_guidance: + self.edit_estimates = torch.zeros( + (num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) + ) + + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) + + if edit_momentum is None: + edit_momentum = torch.zeros_like(noise_guidance) + + if enable_edit_guidance: + concept_weights = torch.zeros( + (len(noise_pred_edit_concepts), noise_guidance.shape[0]), + device=self.device, + dtype=noise_guidance.dtype, + ) + noise_guidance_edit = torch.zeros( + (len(noise_pred_edit_concepts), *noise_guidance.shape), + device=self.device, + dtype=noise_guidance.dtype, + ) + # noise_guidance_edit = torch.zeros_like(noise_guidance) + warmup_inds = [] + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + self.edit_estimates[i, c] = noise_pred_edit_concept + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + if edit_weights: + edit_weight_c = edit_weights[c] + else: + edit_weight_c = 1.0 + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + if i >= edit_warmup_steps_c: + warmup_inds.append(c) + if i >= edit_cooldown_steps_c: + noise_guidance_edit[c, :, :, :, :] = torch.zeros_like(noise_pred_edit_concept) + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + # tmp_weights = (noise_pred_text - noise_pred_edit_concept).sum(dim=(1, 2, 3)) + tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2, 3)) + + tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts) + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + concept_weights[c, :] = tmp_weights + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp.dtype == torch.float32: + tmp = torch.quantile( + torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp.dtype) + + noise_guidance_edit_tmp = torch.where( + torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + noise_guidance_edit[c, :, :, :, :] = noise_guidance_edit_tmp + + # noise_guidance_edit = noise_guidance_edit + noise_guidance_edit_tmp + + warmup_inds = torch.tensor(warmup_inds).to(self.device) + if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0: + concept_weights = concept_weights.to("cpu") # Offload to cpu + noise_guidance_edit = noise_guidance_edit.to("cpu") + + concept_weights_tmp = torch.index_select(concept_weights.to(self.device), 0, warmup_inds) + concept_weights_tmp = torch.where( + concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp + ) + concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0) + # concept_weights_tmp = torch.nan_to_num(concept_weights_tmp) + + noise_guidance_edit_tmp = torch.index_select( + noise_guidance_edit.to(self.device), 0, warmup_inds + ) + noise_guidance_edit_tmp = torch.einsum( + "cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp + ) + noise_guidance_edit_tmp = noise_guidance_edit_tmp + noise_guidance = noise_guidance + noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() + + del noise_guidance_edit_tmp + del concept_weights_tmp + concept_weights = concept_weights.to(self.device) + noise_guidance_edit = noise_guidance_edit.to(self.device) + + concept_weights = torch.where( + concept_weights < 0, torch.zeros_like(concept_weights), concept_weights + ) + + concept_weights = torch.nan_to_num(concept_weights) + + noise_guidance_edit = torch.einsum("cb,cbijk->bijk", concept_weights, noise_guidance_edit) + + noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum + + edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit + + if warmup_inds.shape[0] == len(noise_pred_edit_concepts): + noise_guidance = noise_guidance + noise_guidance_edit + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + if sem_guidance is not None: + edit_guidance = sem_guidance[i].to(self.device) + noise_guidance = noise_guidance + edit_guidance + + noise_pred = noise_pred_uncond + noise_guidance + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if not return_dict: + return (image, has_nsfw_concept) + + return SemanticStableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/__init__.py new file mode 100644 index 000000000..4ed563c4a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/__init__.py @@ -0,0 +1,71 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["camera"] = ["create_pan_cameras"] + _import_structure["pipeline_shap_e"] = ["ShapEPipeline"] + _import_structure["pipeline_shap_e_img2img"] = ["ShapEImg2ImgPipeline"] + _import_structure["renderer"] = [ + "BoundingBoxVolume", + "ImportanceRaySampler", + "MLPNeRFModelOutput", + "MLPNeRSTFModel", + "ShapEParamsProjModel", + "ShapERenderer", + "StratifiedRaySampler", + "VoidNeRFModel", + ] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .camera import create_pan_cameras + from .pipeline_shap_e import ShapEPipeline + from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline + from .renderer import ( + BoundingBoxVolume, + ImportanceRaySampler, + MLPNeRFModelOutput, + MLPNeRSTFModel, + ShapEParamsProjModel, + ShapERenderer, + StratifiedRaySampler, + VoidNeRFModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/camera.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/camera.py new file mode 100644 index 000000000..d4b94c300 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/camera.py @@ -0,0 +1,147 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Tuple + +import numpy as np +import torch + + +@dataclass +class DifferentiableProjectiveCamera: + """ + Implements a batch, differentiable, standard pinhole camera + """ + + origin: torch.Tensor # [batch_size x 3] + x: torch.Tensor # [batch_size x 3] + y: torch.Tensor # [batch_size x 3] + z: torch.Tensor # [batch_size x 3] + width: int + height: int + x_fov: float + y_fov: float + shape: Tuple[int] + + def __post_init__(self): + assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] + assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 + assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2 + + def resolution(self): + return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32)) + + def fov(self): + return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32)) + + def get_image_coords(self) -> torch.Tensor: + """ + :return: coords of shape (width * height, 2) + """ + pixel_indices = torch.arange(self.height * self.width) + coords = torch.stack( + [ + pixel_indices % self.width, + torch.div(pixel_indices, self.width, rounding_mode="trunc"), + ], + axis=1, + ) + return coords + + @property + def camera_rays(self): + batch_size, *inner_shape = self.shape + inner_batch_size = int(np.prod(inner_shape)) + + coords = self.get_image_coords() + coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape]) + rays = self.get_camera_rays(coords) + + rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3) + + return rays + + def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor: + batch_size, *shape, n_coords = coords.shape + assert n_coords == 2 + assert batch_size == self.origin.shape[0] + + flat = coords.view(batch_size, -1, 2) + + res = self.resolution() + fov = self.fov() + + fracs = (flat.float() / (res - 1)) * 2 - 1 + fracs = fracs * torch.tan(fov / 2) + + fracs = fracs.view(batch_size, -1, 2) + directions = ( + self.z.view(batch_size, 1, 3) + + self.x.view(batch_size, 1, 3) * fracs[:, :, :1] + + self.y.view(batch_size, 1, 3) * fracs[:, :, 1:] + ) + directions = directions / directions.norm(dim=-1, keepdim=True) + rays = torch.stack( + [ + torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]), + directions, + ], + dim=2, + ) + return rays.view(batch_size, *shape, 2, 3) + + def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera": + """ + Creates a new camera for the resized view assuming the aspect ratio does not change. + """ + assert width * self.height == height * self.width, "The aspect ratio should not change." + return DifferentiableProjectiveCamera( + origin=self.origin, + x=self.x, + y=self.y, + z=self.z, + width=width, + height=height, + x_fov=self.x_fov, + y_fov=self.y_fov, + ) + + +def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera: + origins = [] + xs = [] + ys = [] + zs = [] + for theta in np.linspace(0, 2 * np.pi, num=20): + z = np.array([np.sin(theta), np.cos(theta), -0.5]) + z /= np.sqrt(np.sum(z**2)) + origin = -z * 4 + x = np.array([np.cos(theta), -np.sin(theta), 0.0]) + y = np.cross(z, x) + origins.append(origin) + xs.append(x) + ys.append(y) + zs.append(z) + return DifferentiableProjectiveCamera( + origin=torch.from_numpy(np.stack(origins, axis=0)).float(), + x=torch.from_numpy(np.stack(xs, axis=0)).float(), + y=torch.from_numpy(np.stack(ys, axis=0)).float(), + z=torch.from_numpy(np.stack(zs, axis=0)).float(), + width=size, + height=size, + x_fov=0.7, + y_fov=0.7, + shape=(1, len(xs)), + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e.py new file mode 100644 index 000000000..1ef10e17c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e.py @@ -0,0 +1,334 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...models import PriorTransformer +from ...schedulers import HeunDiscreteScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .renderer import ShapERenderer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from diffusers.utils import export_to_gif + + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + >>> repo = "openai/shap-e" + >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> guidance_scale = 15.0 + >>> prompt = "a shark" + + >>> images = pipe( + ... prompt, + ... guidance_scale=guidance_scale, + ... num_inference_steps=64, + ... frame_size=256, + ... ).images + + >>> gif_path = export_to_gif(images[0], "shark_3d.gif") + ``` +""" + + +@dataclass +class ShapEPipelineOutput(BaseOutput): + """ + Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. + + Args: + images (`torch.FloatTensor`) + A list of images for 3D rendering. + """ + + images: Union[List[List[PIL.Image.Image]], List[List[np.ndarray]]] + + +class ShapEPipeline(DiffusionPipeline): + """ + Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + scheduler ([`HeunDiscreteScheduler`]): + A scheduler to be used in combination with the `prior` model to generate image embedding. + shap_e_renderer ([`ShapERenderer`]): + Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF + rendering method. + """ + + model_cpu_offload_seq = "text_encoder->prior" + _exclude_from_cpu_offload = ["shap_e_renderer"] + + def __init__( + self, + prior: PriorTransformer, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: HeunDiscreteScheduler, + shap_e_renderer: ShapERenderer, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + shap_e_renderer=shap_e_renderer, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + ): + len(prompt) if isinstance(prompt, list) else 1 + + # YiYi Notes: set pad_token_id to be 0, not sure why I can't set in the config file + self.tokenizer.pad_token_id = 0 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + prompt_embeds = text_encoder_output.text_embeds + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + # in Shap-E it normalize the prompt_embeds and then later rescale it + prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # Rescale the features to have unit variance + prompt_embeds = math.sqrt(prompt_embeds.shape[1]) * prompt_embeds + + return prompt_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: str, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 4.0, + frame_size: int = 64, + output_type: Optional[str] = "pil", # pil, np, latent, mesh + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + frame_size (`int`, *optional*, default to 64): + The width and height of each image frame of the generated 3D output. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain + tuple. + + Examples: + + Returns: + [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) + + # prior + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_embeddings = self.prior.config.num_embeddings + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, num_embeddings * embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim + latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + noise_pred = self.prior( + scaled_model_input, + timestep=t, + proj_embedding=prompt_embeds, + ).predicted_image_embedding + + # remove the variance + noise_pred, _ = noise_pred.split( + scaled_model_input.shape[2], dim=2 + ) # batch_size, num_embeddings, embedding_dim + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + timestep=t, + sample=latents, + ).prev_sample + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["np", "pil", "latent", "mesh"]: + raise ValueError( + f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" + ) + + if output_type == "latent": + return ShapEPipelineOutput(images=latents) + + images = [] + if output_type == "mesh": + for i, latent in enumerate(latents): + mesh = self.shap_e_renderer.decode_to_mesh( + latent[None, :], + device, + ) + images.append(mesh) + + else: + # np, pil + for i, latent in enumerate(latents): + image = self.shap_e_renderer.decode_to_image( + latent[None, :], + device, + size=frame_size, + ) + images.append(image) + + images = torch.stack(images) + + images = images.cpu().numpy() + + if output_type == "pil": + images = [self.numpy_to_pil(image) for image in images] + + if not return_dict: + return (images,) + + return ShapEPipelineOutput(images=images) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py new file mode 100644 index 000000000..641ec56a1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py @@ -0,0 +1,321 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPVisionModel + +from ...models import PriorTransformer +from ...schedulers import HeunDiscreteScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .renderer import ShapERenderer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from PIL import Image + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from diffusers.utils import export_to_gif, load_image + + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + >>> repo = "openai/shap-e-img2img" + >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> guidance_scale = 3.0 + >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png" + >>> image = load_image(image_url).convert("RGB") + + >>> images = pipe( + ... image, + ... guidance_scale=guidance_scale, + ... num_inference_steps=64, + ... frame_size=256, + ... ).images + + >>> gif_path = export_to_gif(images[0], "corgi_3d.gif") + ``` +""" + + +@dataclass +class ShapEPipelineOutput(BaseOutput): + """ + Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. + + Args: + images (`torch.FloatTensor`) + A list of images for 3D rendering. + """ + + images: Union[PIL.Image.Image, np.ndarray] + + +class ShapEImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method from an image. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`~transformers.CLIPVisionModel`]): + Frozen image-encoder. + image_processor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to process images. + scheduler ([`HeunDiscreteScheduler`]): + A scheduler to be used in combination with the `prior` model to generate image embedding. + shap_e_renderer ([`ShapERenderer`]): + Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF + rendering method. + """ + + model_cpu_offload_seq = "image_encoder->prior" + _exclude_from_cpu_offload = ["shap_e_renderer"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModel, + image_processor: CLIPImageProcessor, + scheduler: HeunDiscreteScheduler, + shap_e_renderer: ShapERenderer, + ): + super().__init__() + + self.register_modules( + prior=prior, + image_encoder=image_encoder, + image_processor=image_processor, + scheduler=scheduler, + shap_e_renderer=shap_e_renderer, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_image( + self, + image, + device, + num_images_per_prompt, + do_classifier_free_guidance, + ): + if isinstance(image, List) and isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + if not isinstance(image, torch.Tensor): + image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0) + + image = image.to(dtype=self.image_encoder.dtype, device=device) + + image_embeds = self.image_encoder(image)["last_hidden_state"] + image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 + + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + negative_image_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image]], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + guidance_scale: float = 4.0, + frame_size: int = 64, + output_type: Optional[str] = "pil", # pil, np, latent, mesh + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + latents as image, but if passing latents directly it is not encoded again. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + frame_size (`int`, *optional*, default to 64): + The width and height of each image frame of the generated 3D output. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain + tuple. + + Examples: + + Returns: + [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, torch.Tensor): + batch_size = image.shape[0] + elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)): + batch_size = len(image) + else: + raise ValueError( + f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}" + ) + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) + + # prior + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_embeddings = self.prior.config.num_embeddings + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, num_embeddings * embedding_dim), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim + latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + noise_pred = self.prior( + scaled_model_input, + timestep=t, + proj_embedding=image_embeds, + ).predicted_image_embedding + + # remove the variance + noise_pred, _ = noise_pred.split( + scaled_model_input.shape[2], dim=2 + ) # batch_size, num_embeddings, embedding_dim + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + timestep=t, + sample=latents, + ).prev_sample + + if output_type not in ["np", "pil", "latent", "mesh"]: + raise ValueError( + f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" + ) + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "latent": + return ShapEPipelineOutput(images=latents) + + images = [] + if output_type == "mesh": + for i, latent in enumerate(latents): + mesh = self.shap_e_renderer.decode_to_mesh( + latent[None, :], + device, + ) + images.append(mesh) + + else: + # np, pil + for i, latent in enumerate(latents): + image = self.shap_e_renderer.decode_to_image( + latent[None, :], + device, + size=frame_size, + ) + images.append(image) + + images = torch.stack(images) + + images = images.cpu().numpy() + + if output_type == "pil": + images = [self.numpy_to_pil(image) for image in images] + + if not return_dict: + return (images,) + + return ShapEPipelineOutput(images=images) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/renderer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/renderer.py new file mode 100644 index 000000000..047c6f7dd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/shap_e/renderer.py @@ -0,0 +1,1050 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Dict, Optional, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin +from ...utils import BaseOutput +from .camera import create_pan_cameras + + +def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor: + r""" + Sample from the given discrete probability distribution with replacement. + + The i-th bin is assumed to have mass pmf[i]. + + Args: + pmf: [batch_size, *shape, n_samples, 1] where (pmf.sum(dim=-2) == 1).all() + n_samples: number of samples + + Return: + indices sampled with replacement + """ + + *shape, support_size, last_dim = pmf.shape + assert last_dim == 1 + + cdf = torch.cumsum(pmf.view(-1, support_size), dim=1) + inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device)) + + return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1) + + +def posenc_nerf(x: torch.Tensor, min_deg: int = 0, max_deg: int = 15) -> torch.Tensor: + """ + Concatenate x and its positional encodings, following NeRF. + + Reference: https://arxiv.org/pdf/2210.04628.pdf + """ + if min_deg == max_deg: + return x + + scales = 2.0 ** torch.arange(min_deg, max_deg, dtype=x.dtype, device=x.device) + *shape, dim = x.shape + xb = (x.reshape(-1, 1, dim) * scales.view(1, -1, 1)).reshape(*shape, -1) + assert xb.shape[-1] == dim * (max_deg - min_deg) + emb = torch.cat([xb, xb + math.pi / 2.0], axis=-1).sin() + return torch.cat([x, emb], dim=-1) + + +def encode_position(position): + return posenc_nerf(position, min_deg=0, max_deg=15) + + +def encode_direction(position, direction=None): + if direction is None: + return torch.zeros_like(posenc_nerf(position, min_deg=0, max_deg=8)) + else: + return posenc_nerf(direction, min_deg=0, max_deg=8) + + +def _sanitize_name(x: str) -> str: + return x.replace(".", "__") + + +def integrate_samples(volume_range, ts, density, channels): + r""" + Function integrating the model output. + + Args: + volume_range: Specifies the integral range [t0, t1] + ts: timesteps + density: torch.Tensor [batch_size, *shape, n_samples, 1] + channels: torch.Tensor [batch_size, *shape, n_samples, n_channels] + returns: + channels: integrated rgb output weights: torch.Tensor [batch_size, *shape, n_samples, 1] (density + *transmittance)[i] weight for each rgb output at [..., i, :]. transmittance: transmittance of this volume + ) + """ + + # 1. Calculate the weights + _, _, dt = volume_range.partition(ts) + ddensity = density * dt + + mass = torch.cumsum(ddensity, dim=-2) + transmittance = torch.exp(-mass[..., -1, :]) + + alphas = 1.0 - torch.exp(-ddensity) + Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2)) + # This is the probability of light hitting and reflecting off of + # something at depth [..., i, :]. + weights = alphas * Ts + + # 2. Integrate channels + channels = torch.sum(channels * weights, dim=-2) + + return channels, weights, transmittance + + +def volume_query_points(volume, grid_size): + indices = torch.arange(grid_size**3, device=volume.bbox_min.device) + zs = indices % grid_size + ys = torch.div(indices, grid_size, rounding_mode="trunc") % grid_size + xs = torch.div(indices, grid_size**2, rounding_mode="trunc") % grid_size + combined = torch.stack([xs, ys, zs], dim=1) + return (combined.float() / (grid_size - 1)) * (volume.bbox_max - volume.bbox_min) + volume.bbox_min + + +def _convert_srgb_to_linear(u: torch.Tensor): + return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4) + + +def _create_flat_edge_indices( + flat_cube_indices: torch.Tensor, + grid_size: Tuple[int, int, int], +): + num_xs = (grid_size[0] - 1) * grid_size[1] * grid_size[2] + y_offset = num_xs + num_ys = grid_size[0] * (grid_size[1] - 1) * grid_size[2] + z_offset = num_xs + num_ys + return torch.stack( + [ + # Edges spanning x-axis. + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2], + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + (flat_cube_indices[:, 1] + 1) * grid_size[2] + + flat_cube_indices[:, 2], + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1, + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + (flat_cube_indices[:, 1] + 1) * grid_size[2] + + flat_cube_indices[:, 2] + + 1, + # Edges spanning y-axis. + ( + y_offset + + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + ), + ( + y_offset + + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + ), + ( + y_offset + + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1 + ), + ( + y_offset + + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1 + ), + # Edges spanning z-axis. + ( + z_offset + + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + + flat_cube_indices[:, 1] * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + + flat_cube_indices[:, 1] * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ], + dim=-1, + ) + + +class VoidNeRFModel(nn.Module): + """ + Implements the default empty space model where all queries are rendered as background. + """ + + def __init__(self, background, channel_scale=255.0): + super().__init__() + background = nn.Parameter(torch.from_numpy(np.array(background)).to(dtype=torch.float32) / channel_scale) + + self.register_buffer("background", background) + + def forward(self, position): + background = self.background[None].to(position.device) + + shape = position.shape[:-1] + ones = [1] * (len(shape) - 1) + n_channels = background.shape[-1] + background = torch.broadcast_to(background.view(background.shape[0], *ones, n_channels), [*shape, n_channels]) + + return background + + +@dataclass +class VolumeRange: + t0: torch.Tensor + t1: torch.Tensor + intersected: torch.Tensor + + def __post_init__(self): + assert self.t0.shape == self.t1.shape == self.intersected.shape + + def partition(self, ts): + """ + Partitions t0 and t1 into n_samples intervals. + + Args: + ts: [batch_size, *shape, n_samples, 1] + + Return: + + lower: [batch_size, *shape, n_samples, 1] upper: [batch_size, *shape, n_samples, 1] delta: [batch_size, + *shape, n_samples, 1] + + where + ts \\in [lower, upper] deltas = upper - lower + """ + + mids = (ts[..., 1:, :] + ts[..., :-1, :]) * 0.5 + lower = torch.cat([self.t0[..., None, :], mids], dim=-2) + upper = torch.cat([mids, self.t1[..., None, :]], dim=-2) + delta = upper - lower + assert lower.shape == upper.shape == delta.shape == ts.shape + return lower, upper, delta + + +class BoundingBoxVolume(nn.Module): + """ + Axis-aligned bounding box defined by the two opposite corners. + """ + + def __init__( + self, + *, + bbox_min, + bbox_max, + min_dist: float = 0.0, + min_t_range: float = 1e-3, + ): + """ + Args: + bbox_min: the left/bottommost corner of the bounding box + bbox_max: the other corner of the bounding box + min_dist: all rays should start at least this distance away from the origin. + """ + super().__init__() + + self.min_dist = min_dist + self.min_t_range = min_t_range + + self.bbox_min = torch.tensor(bbox_min) + self.bbox_max = torch.tensor(bbox_max) + self.bbox = torch.stack([self.bbox_min, self.bbox_max]) + assert self.bbox.shape == (2, 3) + assert min_dist >= 0.0 + assert min_t_range > 0.0 + + def intersect( + self, + origin: torch.Tensor, + direction: torch.Tensor, + t0_lower: Optional[torch.Tensor] = None, + epsilon=1e-6, + ): + """ + Args: + origin: [batch_size, *shape, 3] + direction: [batch_size, *shape, 3] + t0_lower: Optional [batch_size, *shape, 1] lower bound of t0 when intersecting this volume. + params: Optional meta parameters in case Volume is parametric + epsilon: to stabilize calculations + + Return: + A tuple of (t0, t1, intersected) where each has a shape [batch_size, *shape, 1]. If a ray intersects with + the volume, `o + td` is in the volume for all t in [t0, t1]. If the volume is bounded, t1 is guaranteed to + be on the boundary of the volume. + """ + + batch_size, *shape, _ = origin.shape + ones = [1] * len(shape) + bbox = self.bbox.view(1, *ones, 2, 3).to(origin.device) + + def _safe_divide(a, b, epsilon=1e-6): + return a / torch.where(b < 0, b - epsilon, b + epsilon) + + ts = _safe_divide(bbox - origin[..., None, :], direction[..., None, :], epsilon=epsilon) + + # Cases to think about: + # + # 1. t1 <= t0: the ray does not pass through the AABB. + # 2. t0 < t1 <= 0: the ray intersects but the BB is behind the origin. + # 3. t0 <= 0 <= t1: the ray starts from inside the BB + # 4. 0 <= t0 < t1: the ray is not inside and intersects with the BB twice. + # + # 1 and 4 are clearly handled from t0 < t1 below. + # Making t0 at least min_dist (>= 0) takes care of 2 and 3. + t0 = ts.min(dim=-2).values.max(dim=-1, keepdim=True).values.clamp(self.min_dist) + t1 = ts.max(dim=-2).values.min(dim=-1, keepdim=True).values + assert t0.shape == t1.shape == (batch_size, *shape, 1) + if t0_lower is not None: + assert t0.shape == t0_lower.shape + t0 = torch.maximum(t0, t0_lower) + + intersected = t0 + self.min_t_range < t1 + t0 = torch.where(intersected, t0, torch.zeros_like(t0)) + t1 = torch.where(intersected, t1, torch.ones_like(t1)) + + return VolumeRange(t0=t0, t1=t1, intersected=intersected) + + +class StratifiedRaySampler(nn.Module): + """ + Instead of fixed intervals, a sample is drawn uniformly at random from each interval. + """ + + def __init__(self, depth_mode: str = "linear"): + """ + :param depth_mode: linear samples ts linearly in depth. harmonic ensures + closer points are sampled more densely. + """ + self.depth_mode = depth_mode + assert self.depth_mode in ("linear", "geometric", "harmonic") + + def sample( + self, + t0: torch.Tensor, + t1: torch.Tensor, + n_samples: int, + epsilon: float = 1e-3, + ) -> torch.Tensor: + """ + Args: + t0: start time has shape [batch_size, *shape, 1] + t1: finish time has shape [batch_size, *shape, 1] + n_samples: number of ts to sample + Return: + sampled ts of shape [batch_size, *shape, n_samples, 1] + """ + ones = [1] * (len(t0.shape) - 1) + ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device) + + if self.depth_mode == "linear": + ts = t0 * (1.0 - ts) + t1 * ts + elif self.depth_mode == "geometric": + ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp() + elif self.depth_mode == "harmonic": + # The original NeRF recommends this interpolation scheme for + # spherical scenes, but there could be some weird edge cases when + # the observer crosses from the inner to outer volume. + ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts) + + mids = 0.5 * (ts[..., 1:] + ts[..., :-1]) + upper = torch.cat([mids, t1], dim=-1) + lower = torch.cat([t0, mids], dim=-1) + # yiyi notes: add a random seed here for testing, don't forget to remove + torch.manual_seed(0) + t_rand = torch.rand_like(ts) + + ts = lower + (upper - lower) * t_rand + return ts.unsqueeze(-1) + + +class ImportanceRaySampler(nn.Module): + """ + Given the initial estimate of densities, this samples more from regions/bins expected to have objects. + """ + + def __init__( + self, + volume_range: VolumeRange, + ts: torch.Tensor, + weights: torch.Tensor, + blur_pool: bool = False, + alpha: float = 1e-5, + ): + """ + Args: + volume_range: the range in which a ray intersects the given volume. + ts: earlier samples from the coarse rendering step + weights: discretized version of density * transmittance + blur_pool: if true, use 2-tap max + 2-tap blur filter from mip-NeRF. + alpha: small value to add to weights. + """ + self.volume_range = volume_range + self.ts = ts.clone().detach() + self.weights = weights.clone().detach() + self.blur_pool = blur_pool + self.alpha = alpha + + @torch.no_grad() + def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor: + """ + Args: + t0: start time has shape [batch_size, *shape, 1] + t1: finish time has shape [batch_size, *shape, 1] + n_samples: number of ts to sample + Return: + sampled ts of shape [batch_size, *shape, n_samples, 1] + """ + lower, upper, _ = self.volume_range.partition(self.ts) + + batch_size, *shape, n_coarse_samples, _ = self.ts.shape + + weights = self.weights + if self.blur_pool: + padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2) + maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :]) + weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :]) + weights = weights + self.alpha + pmf = weights / weights.sum(dim=-2, keepdim=True) + inds = sample_pmf(pmf, n_samples) + assert inds.shape == (batch_size, *shape, n_samples, 1) + assert (inds >= 0).all() and (inds < n_coarse_samples).all() + + t_rand = torch.rand(inds.shape, device=inds.device) + lower_ = torch.gather(lower, -2, inds) + upper_ = torch.gather(upper, -2, inds) + + ts = lower_ + (upper_ - lower_) * t_rand + ts = torch.sort(ts, dim=-2).values + return ts + + +@dataclass +class MeshDecoderOutput(BaseOutput): + """ + A 3D triangle mesh with optional data at the vertices and faces. + + Args: + verts (`torch.Tensor` of shape `(N, 3)`): + array of vertext coordinates + faces (`torch.Tensor` of shape `(N, 3)`): + array of triangles, pointing to indices in verts. + vertext_channels (Dict): + vertext coordinates for each color channel + """ + + verts: torch.Tensor + faces: torch.Tensor + vertex_channels: Dict[str, torch.Tensor] + + +class MeshDecoder(nn.Module): + """ + Construct meshes from Signed distance functions (SDFs) using marching cubes method + """ + + def __init__(self): + super().__init__() + cases = torch.zeros(256, 5, 3, dtype=torch.long) + masks = torch.zeros(256, 5, dtype=torch.bool) + + self.register_buffer("cases", cases) + self.register_buffer("masks", masks) + + def forward(self, field: torch.Tensor, min_point: torch.Tensor, size: torch.Tensor): + """ + For a signed distance field, produce a mesh using marching cubes. + + :param field: a 3D tensor of field values, where negative values correspond + to the outside of the shape. The dimensions correspond to the x, y, and z directions, respectively. + :param min_point: a tensor of shape [3] containing the point corresponding + to (0, 0, 0) in the field. + :param size: a tensor of shape [3] containing the per-axis distance from the + (0, 0, 0) field corner and the (-1, -1, -1) field corner. + """ + assert len(field.shape) == 3, "input must be a 3D scalar field" + dev = field.device + + cases = self.cases.to(dev) + masks = self.masks.to(dev) + + min_point = min_point.to(dev) + size = size.to(dev) + + grid_size = field.shape + grid_size_tensor = torch.tensor(grid_size).to(size) + + # Create bitmasks between 0 and 255 (inclusive) indicating the state + # of the eight corners of each cube. + bitmasks = (field > 0).to(torch.uint8) + bitmasks = bitmasks[:-1, :, :] | (bitmasks[1:, :, :] << 1) + bitmasks = bitmasks[:, :-1, :] | (bitmasks[:, 1:, :] << 2) + bitmasks = bitmasks[:, :, :-1] | (bitmasks[:, :, 1:] << 4) + + # Compute corner coordinates across the entire grid. + corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype) + corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[ + :, None, None + ] + corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[ + :, None + ] + corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype) + + # Compute all vertices across all edges in the grid, even though we will + # throw some out later. We have (X-1)*Y*Z + X*(Y-1)*Z + X*Y*(Z-1) vertices. + # These are all midpoints, and don't account for interpolation (which is + # done later based on the used edge midpoints). + edge_midpoints = torch.cat( + [ + ((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3), + ((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3), + ((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3), + ], + dim=0, + ) + + # Create a flat array of [X, Y, Z] indices for each cube. + cube_indices = torch.zeros( + grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long + ) + cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None] + cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None] + cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev) + flat_cube_indices = cube_indices.reshape(-1, 3) + + # Create a flat array mapping each cube to 12 global edge indices. + edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size) + + # Apply the LUT to figure out the triangles. + flat_bitmasks = bitmasks.reshape(-1).long() # must cast to long for indexing to believe this not a mask + local_tris = cases[flat_bitmasks] + local_masks = masks[flat_bitmasks] + # Compute the global edge indices for the triangles. + global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape( + local_tris.shape + ) + # Select the used triangles for each cube. + selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)] + + # Now we have a bunch of indices into the full list of possible vertices, + # but we want to reduce this list to only the used vertices. + used_vertex_indices = torch.unique(selected_tris.view(-1)) + used_edge_midpoints = edge_midpoints[used_vertex_indices] + old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long) + old_index_to_new_index[used_vertex_indices] = torch.arange( + len(used_vertex_indices), device=dev, dtype=torch.long + ) + + # Rewrite the triangles to use the new indices + faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape) + + # Compute the actual interpolated coordinates corresponding to edge midpoints. + v1 = torch.floor(used_edge_midpoints).to(torch.long) + v2 = torch.ceil(used_edge_midpoints).to(torch.long) + s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]] + s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]] + p1 = (v1.float() / (grid_size_tensor - 1)) * size + min_point + p2 = (v2.float() / (grid_size_tensor - 1)) * size + min_point + # The signs of s1 and s2 should be different. We want to find + # t such that t*s2 + (1-t)*s1 = 0. + t = (s1 / (s1 - s2))[:, None] + verts = t * p2 + (1 - t) * p1 + + return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None) + + +@dataclass +class MLPNeRFModelOutput(BaseOutput): + density: torch.Tensor + signed_distance: torch.Tensor + channels: torch.Tensor + ts: torch.Tensor + + +class MLPNeRSTFModel(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + d_hidden: int = 256, + n_output: int = 12, + n_hidden_layers: int = 6, + act_fn: str = "swish", + insert_direction_at: int = 4, + ): + super().__init__() + + # Instantiate the MLP + + # Find out the dimension of encoded position and direction + dummy = torch.eye(1, 3) + d_posenc_pos = encode_position(position=dummy).shape[-1] + d_posenc_dir = encode_direction(position=dummy).shape[-1] + + mlp_widths = [d_hidden] * n_hidden_layers + input_widths = [d_posenc_pos] + mlp_widths + output_widths = mlp_widths + [n_output] + + if insert_direction_at is not None: + input_widths[insert_direction_at] += d_posenc_dir + + self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out in zip(input_widths, output_widths)]) + + if act_fn == "swish": + # self.activation = swish + # yiyi testing: + self.activation = lambda x: F.silu(x) + else: + raise ValueError(f"Unsupported activation function {act_fn}") + + self.sdf_activation = torch.tanh + self.density_activation = torch.nn.functional.relu + self.channel_activation = torch.sigmoid + + def map_indices_to_keys(self, output): + h_map = { + "sdf": (0, 1), + "density_coarse": (1, 2), + "density_fine": (2, 3), + "stf": (3, 6), + "nerf_coarse": (6, 9), + "nerf_fine": (9, 12), + } + + mapped_output = {k: output[..., start:end] for k, (start, end) in h_map.items()} + + return mapped_output + + def forward(self, *, position, direction, ts, nerf_level="coarse", rendering_mode="nerf"): + h = encode_position(position) + + h_preact = h + h_directionless = None + for i, layer in enumerate(self.mlp): + if i == self.config.insert_direction_at: # 4 in the config + h_directionless = h_preact + h_direction = encode_direction(position, direction=direction) + h = torch.cat([h, h_direction], dim=-1) + + h = layer(h) + + h_preact = h + + if i < len(self.mlp) - 1: + h = self.activation(h) + + h_final = h + if h_directionless is None: + h_directionless = h_preact + + activation = self.map_indices_to_keys(h_final) + + if nerf_level == "coarse": + h_density = activation["density_coarse"] + else: + h_density = activation["density_fine"] + + if rendering_mode == "nerf": + if nerf_level == "coarse": + h_channels = activation["nerf_coarse"] + else: + h_channels = activation["nerf_fine"] + + elif rendering_mode == "stf": + h_channels = activation["stf"] + + density = self.density_activation(h_density) + signed_distance = self.sdf_activation(activation["sdf"]) + channels = self.channel_activation(h_channels) + + # yiyi notes: I think signed_distance is not used + return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts) + + +class ChannelsProj(nn.Module): + def __init__( + self, + *, + vectors: int, + channels: int, + d_latent: int, + ): + super().__init__() + self.proj = nn.Linear(d_latent, vectors * channels) + self.norm = nn.LayerNorm(channels) + self.d_latent = d_latent + self.vectors = vectors + self.channels = channels + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_bvd = x + w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent) + b_vc = self.proj.bias.view(1, self.vectors, self.channels) + h = torch.einsum("bvd,vcd->bvc", x_bvd, w_vcd) + h = self.norm(h) + + h = h + b_vc + return h + + +class ShapEParamsProjModel(ModelMixin, ConfigMixin): + """ + project the latent representation of a 3D asset to obtain weights of a multi-layer perceptron (MLP). + + For more details, see the original paper: + """ + + @register_to_config + def __init__( + self, + *, + param_names: Tuple[str] = ( + "nerstf.mlp.0.weight", + "nerstf.mlp.1.weight", + "nerstf.mlp.2.weight", + "nerstf.mlp.3.weight", + ), + param_shapes: Tuple[Tuple[int]] = ( + (256, 93), + (256, 256), + (256, 256), + (256, 256), + ), + d_latent: int = 1024, + ): + super().__init__() + + # check inputs + if len(param_names) != len(param_shapes): + raise ValueError("Must provide same number of `param_names` as `param_shapes`") + self.projections = nn.ModuleDict({}) + for k, (vectors, channels) in zip(param_names, param_shapes): + self.projections[_sanitize_name(k)] = ChannelsProj( + vectors=vectors, + channels=channels, + d_latent=d_latent, + ) + + def forward(self, x: torch.Tensor): + out = {} + start = 0 + for k, shape in zip(self.config.param_names, self.config.param_shapes): + vectors, _ = shape + end = start + vectors + x_bvd = x[:, start:end] + out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape) + start = end + return out + + +class ShapERenderer(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + *, + param_names: Tuple[str] = ( + "nerstf.mlp.0.weight", + "nerstf.mlp.1.weight", + "nerstf.mlp.2.weight", + "nerstf.mlp.3.weight", + ), + param_shapes: Tuple[Tuple[int]] = ( + (256, 93), + (256, 256), + (256, 256), + (256, 256), + ), + d_latent: int = 1024, + d_hidden: int = 256, + n_output: int = 12, + n_hidden_layers: int = 6, + act_fn: str = "swish", + insert_direction_at: int = 4, + background: Tuple[float] = ( + 255.0, + 255.0, + 255.0, + ), + ): + super().__init__() + + self.params_proj = ShapEParamsProjModel( + param_names=param_names, + param_shapes=param_shapes, + d_latent=d_latent, + ) + self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at) + self.void = VoidNeRFModel(background=background, channel_scale=255.0) + self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0]) + self.mesh_decoder = MeshDecoder() + + @torch.no_grad() + def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False): + """ + Perform volumetric rendering over a partition of possible t's in the union of rendering volumes (written below + with some abuse of notations) + + C(r) := sum( + transmittance(t[i]) * integrate( + lambda t: density(t) * channels(t) * transmittance(t), [t[i], t[i + 1]], + ) for i in range(len(parts)) + ) + transmittance(t[-1]) * void_model(t[-1]).channels + + where + + 1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the probability of light passing through + the volume specified by [t[0], s]. (transmittance of 1 means light can pass freely) 2) density and channels are + obtained by evaluating the appropriate part.model at time t. 3) [t[i], t[i + 1]] is defined as the range of t + where the ray intersects (parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface of the + shell (if bounded). If the ray does not intersect, the integral over this segment is evaluated as 0 and + transmittance(t[i + 1]) := transmittance(t[i]). 4) The last term is integration to infinity (e.g. [t[-1], + math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty). + + args: + rays: [batch_size x ... x 2 x 3] origin and direction. sampler: disjoint volume integrals. n_samples: + number of ts to sample. prev_model_outputs: model outputs from the previous rendering step, including + + :return: A tuple of + - `channels` + - A importance samplers for additional fine-grained rendering + - raw model output + """ + origin, direction = rays[..., 0, :], rays[..., 1, :] + + # Integrate over [t[i], t[i + 1]] + + # 1 Intersect the rays with the current volume and sample ts to integrate along. + vrange = self.volume.intersect(origin, direction, t0_lower=None) + ts = sampler.sample(vrange.t0, vrange.t1, n_samples) + ts = ts.to(rays.dtype) + + if prev_model_out is not None: + # Append the previous ts now before fprop because previous + # rendering used a different model and we can't reuse the output. + ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values + + batch_size, *_shape, _t0_dim = vrange.t0.shape + _, *ts_shape, _ts_dim = ts.shape + + # 2. Get the points along the ray and query the model + directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3]) + positions = origin.unsqueeze(-2) + ts * directions + + directions = directions.to(self.mlp.dtype) + positions = positions.to(self.mlp.dtype) + + optional_directions = directions if render_with_direction else None + + model_out = self.mlp( + position=positions, + direction=optional_directions, + ts=ts, + nerf_level="coarse" if prev_model_out is None else "fine", + ) + + # 3. Integrate the model results + channels, weights, transmittance = integrate_samples( + vrange, model_out.ts, model_out.density, model_out.channels + ) + + # 4. Clean up results that do not intersect with the volume. + transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance)) + channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels)) + # 5. integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty). + channels = channels + transmittance * self.void(origin) + + weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights) + + return channels, weighted_sampler, model_out + + @torch.no_grad() + def decode_to_image( + self, + latents, + device, + size: int = 64, + ray_batch_size: int = 4096, + n_coarse_samples=64, + n_fine_samples=128, + ): + # project the parameters from the generated latents + projected_params = self.params_proj(latents) + + # update the mlp layers of the renderer + for name, param in self.mlp.state_dict().items(): + if f"nerstf.{name}" in projected_params.keys(): + param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) + + # create cameras object + camera = create_pan_cameras(size) + rays = camera.camera_rays + rays = rays.to(device) + n_batches = rays.shape[1] // ray_batch_size + + coarse_sampler = StratifiedRaySampler() + + images = [] + + for idx in range(n_batches): + rays_batch = rays[:, idx * ray_batch_size : (idx + 1) * ray_batch_size] + + # render rays with coarse, stratified samples. + _, fine_sampler, coarse_model_out = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples) + # Then, render with additional importance-weighted ray samples. + channels, _, _ = self.render_rays( + rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out + ) + + images.append(channels) + + images = torch.cat(images, dim=1) + images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0) + + return images + + @torch.no_grad() + def decode_to_mesh( + self, + latents, + device, + grid_size: int = 128, + query_batch_size: int = 4096, + texture_channels: Tuple = ("R", "G", "B"), + ): + # 1. project the parameters from the generated latents + projected_params = self.params_proj(latents) + + # 2. update the mlp layers of the renderer + for name, param in self.mlp.state_dict().items(): + if f"nerstf.{name}" in projected_params.keys(): + param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) + + # 3. decoding with STF rendering + # 3.1 query the SDF values at vertices along a regular 128**3 grid + + query_points = volume_query_points(self.volume, grid_size) + query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype) + + fields = [] + + for idx in range(0, query_positions.shape[1], query_batch_size): + query_batch = query_positions[:, idx : idx + query_batch_size] + + model_out = self.mlp( + position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" + ) + fields.append(model_out.signed_distance) + + # predicted SDF values + fields = torch.cat(fields, dim=1) + fields = fields.float() + + assert ( + len(fields.shape) == 3 and fields.shape[-1] == 1 + ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" + + fields = fields.reshape(1, *([grid_size] * 3)) + + # create grid 128 x 128 x 128 + # - force a negative border around the SDFs to close off all the models. + full_grid = torch.zeros( + 1, + grid_size + 2, + grid_size + 2, + grid_size + 2, + device=fields.device, + dtype=fields.dtype, + ) + full_grid.fill_(-1.0) + full_grid[:, 1:-1, 1:-1, 1:-1] = fields + fields = full_grid + + # apply a differentiable implementation of Marching Cubes to construct meshs + raw_meshes = [] + mesh_mask = [] + + for field in fields: + raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min) + mesh_mask.append(True) + raw_meshes.append(raw_mesh) + + mesh_mask = torch.tensor(mesh_mask, device=fields.device) + max_vertices = max(len(m.verts) for m in raw_meshes) + + # 3.2. query the texture color head at each vertex of the resulting mesh. + texture_query_positions = torch.stack( + [m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes], + dim=0, + ) + texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype) + + textures = [] + + for idx in range(0, texture_query_positions.shape[1], query_batch_size): + query_batch = texture_query_positions[:, idx : idx + query_batch_size] + + texture_model_out = self.mlp( + position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" + ) + textures.append(texture_model_out.channels) + + # predict texture color + textures = torch.cat(textures, dim=1) + + textures = _convert_srgb_to_linear(textures) + textures = textures.float() + + # 3.3 augument the mesh with texture data + assert len(textures.shape) == 3 and textures.shape[-1] == len( + texture_channels + ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" + + for m, texture in zip(raw_meshes, textures): + texture = texture[: len(m.verts)] + m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1))) + + return raw_meshes[0] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/__init__.py new file mode 100644 index 000000000..5270cb94a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_cascade"] = ["StableCascadeDecoderPipeline"] + _import_structure["pipeline_stable_cascade_combined"] = ["StableCascadeCombinedPipeline"] + _import_structure["pipeline_stable_cascade_prior"] = ["StableCascadePriorPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_cascade import StableCascadeDecoderPipeline + from .pipeline_stable_cascade_combined import StableCascadeCombinedPipeline + from .pipeline_stable_cascade_prior import StableCascadePriorPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py new file mode 100644 index 000000000..a05fb9001 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -0,0 +1,482 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import is_torch_version, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadePriorPipeline, StableCascadeDecoderPipeline + + >>> prior_pipe = StableCascadePriorPipeline.from_pretrained( + ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16 + ... ).to("cuda") + >>> gen_pipe = StableCascadeDecoderPipeline.from_pretrain( + ... "stabilityai/stable-cascade", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) + ``` +""" + + +class StableCascadeDecoderPipeline(DiffusionPipeline): + """ + Pipeline for generating images from the Stable Cascade model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The CLIP tokenizer. + text_encoder (`CLIPTextModel`): + The CLIP text encoder. + decoder ([`StableCascadeUNet`]): + The Stable Cascade decoder unet. + vqgan ([`PaellaVQModel`]): + The VQGAN model. + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_dim_scale (float, `optional`, defaults to 10.67): + Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are + height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and + width=int(24*10.67)=256 in order to match the training conditions. + """ + + unet_name = "decoder" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "text_encoder->decoder->vqgan" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds_pooled", + "negative_prompt_embeds", + "image_embeddings", + ] + + def __init__( + self, + decoder: StableCascadeUNet, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + latent_dim_scale: float = 10.67, + ) -> None: + super().__init__() + self.register_modules( + decoder=decoder, + tokenizer=tokenizer, + text_encoder=text_encoder, + scheduler=scheduler, + vqgan=vqgan, + ) + self.register_to_config(latent_dim_scale=latent_dim_scale) + + def prepare_latents(self, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, scheduler): + batch_size, channels, height, width = image_embeddings.shape + latents_shape = ( + batch_size * num_images_per_prompt, + 4, + int(height * self.config.latent_dim_scale), + int(width * self.config.latent_dim_scale), + ) + + if latents is None: + latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + ): + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True + ) + prompt_embeds = text_encoder_output.hidden_states[-1] + if prompt_embeds_pooled is None: + prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=uncond_input.attention_mask.to(device), + output_hidden_states=True, + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] + negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + seq_len = negative_prompt_embeds_pooled.shape[1] + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to( + dtype=self.text_encoder.dtype, device=device + ) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled + + def check_inputs( + self, + prompt, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeddings: Union[torch.FloatTensor, List[torch.FloatTensor]], + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 10, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embedding (`torch.FloatTensor` or `List[torch.FloatTensor]`): + Image Embeddings either extracted from an image or generated by a Prior Model. + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_inference_steps (`int`, *optional*, defaults to 12): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` input + argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image + embeddings. + """ + + # 0. Define commonly used variables + device = self._execution_device + dtype = self.decoder.dtype + self._guidance_scale = guidance_scale + if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16: + raise ValueError("`StableCascadeDecoderPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.") + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + if isinstance(image_embeddings, list): + image_embeddings = torch.cat(image_embeddings, dim=0) + batch_size = image_embeddings.shape[0] + + # 2. Encode caption + if prompt_embeds is None and negative_prompt_embeds is None: + _, prompt_embeds_pooled, _, negative_prompt_embeds_pooled = self.encode_prompt( + prompt=prompt, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) + + # The pooled embeds from the prior are pooled again before being passed to the decoder + prompt_embeds_pooled = ( + torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) + if self.do_classifier_free_guidance + else prompt_embeds_pooled + ) + effnet = ( + torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) + if self.do_classifier_free_guidance + else image_embeddings + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents( + image_embeddings, num_images_per_prompt, dtype, device, generator, latents, self.scheduler + ) + + # 6. Run denoising loop + self._num_timesteps = len(timesteps[:-1]) + for i, t in enumerate(self.progress_bar(timesteps[:-1])): + timestep_ratio = t.expand(latents.size(0)).to(dtype) + + # 7. Denoise latents + predicted_latents = self.decoder( + sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, + clip_text_pooled=prompt_embeds_pooled, + effnet=effnet, + return_dict=False, + )[0] + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) + predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) + + # 9. Renoise latents to next timestep + latents = self.scheduler.step( + model_output=predicted_latents, + timestep=timestep_ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # 10. Scale and decode the image latents with vq-vae + latents = self.vqgan.config.scale_factor * latents + images = self.vqgan.decode(latents).sample.clamp(0, 1) + if output_type == "np": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + elif output_type == "pil": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + images = self.numpy_to_pil(images) + else: + images = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return images + return ImagePipelineOutput(images) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py new file mode 100644 index 000000000..07afdedac --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -0,0 +1,311 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Dict, List, Optional, Union + +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import is_torch_version, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel +from .pipeline_stable_cascade import StableCascadeDecoderPipeline +from .pipeline_stable_cascade_prior import StableCascadePriorPipeline + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadeCombinedPipeline + >>> pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16) + >>> pipe.enable_model_cpu_offload() + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> images = pipe(prompt=prompt) + ``` +""" + + +class StableCascadeCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Stable Cascade. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The decoder tokenizer to be used for text inputs. + text_encoder (`CLIPTextModel`): + The decoder text encoder to be used for text inputs. + decoder (`StableCascadeUNet`): + The decoder model to be used for decoder image generation pipeline. + scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for decoder image generation pipeline. + vqgan (`PaellaVQModel`): + The VQGAN model to be used for decoder image generation pipeline. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + prior_prior (`StableCascadeUNet`): + The prior model to be used for prior pipeline. + prior_scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for prior pipeline. + """ + + _load_connected_pipes = True + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: StableCascadeUNet, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + prior_prior: StableCascadeUNet, + prior_text_encoder: CLIPTextModel, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: DDPMWuerstchenScheduler, + prior_feature_extractor: Optional[CLIPImageProcessor] = None, + prior_image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_prior=prior_prior, + prior_scheduler=prior_scheduler, + prior_feature_extractor=prior_feature_extractor, + prior_image_encoder=prior_image_encoder, + ) + self.prior_pipe = StableCascadePriorPipeline( + prior=prior_prior, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_encoder=prior_image_encoder, + feature_extractor=prior_feature_extractor, + ) + self.decoder_pipe = StableCascadeDecoderPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None, + height: int = 512, + width: int = 512, + prior_num_inference_steps: int = 60, + prior_guidance_scale: float = 4.0, + num_inference_steps: int = 12, + decoder_guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation for the prior and decoder. + images (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, *optional*): + The images to guide the image generation for the prior. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked + to the text `prompt`, usually at the expense of lower image quality. + prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60): + The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. For more specific timestep spacing, you can pass customized + `prior_timesteps` + num_inference_steps (`int`, *optional*, defaults to 12): + The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. For more specific timestep spacing, you can pass customized + `timesteps` + decoder_guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeine class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + dtype = self.decoder_pipe.decoder.dtype + if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16: + raise ValueError( + "`StableCascadeCombinedPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype." + ) + + prior_outputs = self.prior_pipe( + prompt=prompt if prompt_embeds is None else None, + images=images, + height=height, + width=width, + num_inference_steps=prior_num_inference_steps, + guidance_scale=prior_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + output_type="pt", + return_dict=True, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeddings = prior_outputs.image_embeddings + prompt_embeds = prior_outputs.get("prompt_embeds", None) + prompt_embeds_pooled = prior_outputs.get("prompt_embeds_pooled", None) + negative_prompt_embeds = prior_outputs.get("negative_prompt_embeds", None) + negative_prompt_embeds_pooled = prior_outputs.get("negative_prompt_embeds_pooled", None) + + outputs = self.decoder_pipe( + image_embeddings=image_embeddings, + prompt=prompt if prompt_embeds is None else None, + num_inference_steps=num_inference_steps, + guidance_scale=decoder_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + generator=generator, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + return outputs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py new file mode 100644 index 000000000..24ccc4b88 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py @@ -0,0 +1,638 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from math import ceil +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import BaseOutput, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadePriorPipeline + + >>> prior_pipe = StableCascadePriorPipeline.from_pretrained( + ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + ``` +""" + + +@dataclass +class StableCascadePriorPipelineOutput(BaseOutput): + """ + Output class for WuerstchenPriorPipeline. + + Args: + image_embeddings (`torch.FloatTensor` or `np.ndarray`) + Prior image embeddings for text prompt + prompt_embeds (`torch.FloatTensor`): + Text embeddings for the prompt. + negative_prompt_embeds (`torch.FloatTensor`): + Text embeddings for the negative prompt. + """ + + image_embeddings: Union[torch.FloatTensor, np.ndarray] + prompt_embeds: Union[torch.FloatTensor, np.ndarray] + prompt_embeds_pooled: Union[torch.FloatTensor, np.ndarray] + negative_prompt_embeds: Union[torch.FloatTensor, np.ndarray] + negative_prompt_embeds_pooled: Union[torch.FloatTensor, np.ndarray] + + +class StableCascadePriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Stable Cascade. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`StableCascadeUNet`]): + The Stable Cascade prior to approximate the image embedding from the text and/or image embedding. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + resolution_multiple ('float', *optional*, defaults to 42.67): + Default resolution for multiple images generated. + """ + + unet_name = "prior" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "image_encoder->text_encoder->prior" + _optional_components = ["image_encoder", "feature_extractor"] + _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + prior: StableCascadeUNet, + scheduler: DDPMWuerstchenScheduler, + resolution_multiple: float = 42.67, + feature_extractor: Optional[CLIPImageProcessor] = None, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + prior=prior, + scheduler=scheduler, + ) + self.register_to_config(resolution_multiple=resolution_multiple) + + def prepare_latents( + self, batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, scheduler + ): + latent_shape = ( + num_images_per_prompt * batch_size, + self.prior.config.in_channels, + ceil(height / self.config.resolution_multiple), + ceil(width / self.config.resolution_multiple), + ) + + if latents is None: + latents = randn_tensor(latent_shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != latent_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latent_shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + ): + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True + ) + prompt_embeds = text_encoder_output.hidden_states[-1] + if prompt_embeds_pooled is None: + prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=uncond_input.attention_mask.to(device), + output_hidden_states=True, + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] + negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + seq_len = negative_prompt_embeds_pooled.shape[1] + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to( + dtype=self.text_encoder.dtype, device=device + ) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled + + def encode_image(self, images, device, dtype, batch_size, num_images_per_prompt): + image_embeds = [] + for image in images: + image = self.feature_extractor(image, return_tensors="pt").pixel_values + image = image.to(device=device, dtype=dtype) + image_embed = self.image_encoder(image).image_embeds.unsqueeze(1) + image_embeds.append(image_embed) + image_embeds = torch.cat(image_embeds, dim=1) + + image_embeds = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) + negative_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, negative_image_embeds + + def check_inputs( + self, + prompt, + images=None, + image_embeds=None, + negative_prompt=None, + prompt_embeds=None, + prompt_embeds_pooled=None, + negative_prompt_embeds=None, + negative_prompt_embeds_pooled=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and prompt_embeds_pooled is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`" + ) + + if negative_prompt_embeds is not None and negative_prompt_embeds_pooled is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`" + ) + + if prompt_embeds_pooled is not None and negative_prompt_embeds_pooled is not None: + if prompt_embeds_pooled.shape != negative_prompt_embeds_pooled.shape: + raise ValueError( + "`prompt_embeds_pooled` and `negative_prompt_embeds_pooled` must have the same shape when passed" + f"directly, but got: `prompt_embeds_pooled` {prompt_embeds_pooled.shape} !=" + f"`negative_prompt_embeds_pooled` {negative_prompt_embeds_pooled.shape}." + ) + + if image_embeds is not None and images is not None: + raise ValueError( + f"Cannot forward both `images`: {images} and `image_embeds`: {image_embeds}. Please make sure to" + " only forward one of the two." + ) + + if images: + for i, image in enumerate(images): + if not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): + raise TypeError( + f"'images' must contain images of type 'torch.Tensor' or 'PIL.Image.Image, but got" + f"{type(image)} for image number {i}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + def get_timestep_ratio_conditioning(self, t, alphas_cumprod): + s = torch.tensor([0.003]) + clamp_range = [0, 1] + min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 + var = alphas_cumprod[t] + var = var.clamp(*clamp_range) + s, min_var = s.to(var.device), min_var.to(var.device) + ratio = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s + return ratio + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None, + height: int = 1024, + width: int = 1024, + num_inference_steps: int = 20, + timesteps: List[float] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pt", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to 1024): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 1024): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 60): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 8.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` input + argument. + image_embeds (`torch.FloatTensor`, *optional*): + Pre-generated image embeddings. Can be used to easily tweak image inputs, *e.g.* prompt weighting. + If not provided, image embeddings will be generated from `image` input argument if existing. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`StableCascadePriorPipelineOutput`] or `tuple` [`StableCascadePriorPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated image embeddings. + """ + + # 0. Define commonly used variables + device = self._execution_device + dtype = next(self.prior.parameters()).dtype + self._guidance_scale = guidance_scale + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + images=images, + image_embeds=image_embeds, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + # 2. Encode caption + images + ( + prompt_embeds, + prompt_embeds_pooled, + negative_prompt_embeds, + negative_prompt_embeds_pooled, + ) = self.encode_prompt( + prompt=prompt, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) + + if images is not None: + image_embeds_pooled, uncond_image_embeds_pooled = self.encode_image( + images=images, + device=device, + dtype=dtype, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + ) + elif image_embeds is not None: + image_embeds_pooled = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) + uncond_image_embeds_pooled = torch.zeros_like(image_embeds_pooled) + else: + image_embeds_pooled = torch.zeros( + batch_size * num_images_per_prompt, + 1, + self.prior.config.clip_image_in_channels, + device=device, + dtype=dtype, + ) + uncond_image_embeds_pooled = torch.zeros( + batch_size * num_images_per_prompt, + 1, + self.prior.config.clip_image_in_channels, + device=device, + dtype=dtype, + ) + + if self.do_classifier_free_guidance: + image_embeds = torch.cat([image_embeds_pooled, uncond_image_embeds_pooled], dim=0) + else: + image_embeds = image_embeds_pooled + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + text_encoder_pooled = ( + torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) + if negative_prompt_embeds is not None + else prompt_embeds_pooled + ) + + # 4. Prepare and set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents( + batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, self.scheduler + ) + + if isinstance(self.scheduler, DDPMWuerstchenScheduler): + timesteps = timesteps[:-1] + else: + if self.scheduler.config.clip_sample: + self.scheduler.config.clip_sample = False # disample sample clipping + logger.warning(" set `clip_sample` to be False") + # 6. Run denoising loop + if hasattr(self.scheduler, "betas"): + alphas = 1.0 - self.scheduler.betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + else: + alphas_cumprod = [] + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + if len(alphas_cumprod) > 0: + timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) + timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) + else: + timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) + else: + timestep_ratio = t.expand(latents.size(0)).to(dtype) + # 7. Denoise image embeddings + predicted_image_embedding = self.prior( + sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, + clip_text_pooled=text_encoder_pooled, + clip_text=text_encoder_hidden_states, + clip_img=image_embeds, + return_dict=False, + )[0] + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_image_embedding_text, predicted_image_embedding_uncond = predicted_image_embedding.chunk(2) + predicted_image_embedding = torch.lerp( + predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale + ) + + # 9. Renoise latents to next timestep + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + timestep_ratio = t + latents = self.scheduler.step( + model_output=predicted_image_embedding, timestep=timestep_ratio, sample=latents, generator=generator + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "np": + latents = latents.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + prompt_embeds = prompt_embeds.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + negative_prompt_embeds = ( + negative_prompt_embeds.cpu().float().numpy() if negative_prompt_embeds is not None else None + ) # float() as bfloat16-> numpy doesnt work + + if not return_dict: + return ( + latents, + prompt_embeds, + prompt_embeds_pooled, + negative_prompt_embeds, + negative_prompt_embeds_pooled, + ) + + return StableCascadePriorPipelineOutput( + image_embeddings=latents, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/README.md new file mode 100644 index 000000000..5b6424308 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/README.md @@ -0,0 +1,176 @@ +# Stable Diffusion + +## Overview + +Stable Diffusion was proposed in [Stable Diffusion Announcement](https://stability.ai/blog/stable-diffusion-announcement) by Patrick Esser and Robin Rombach and the Stability AI team. + +The summary of the model is the following: + +*Stable Diffusion is a text-to-image model that will empower billions of people to create stunning art within seconds. It is a breakthrough in speed and quality meaning that it can run on consumer GPUs. You can see some of the amazing output that has been created by this model without pre or post-processing on this page. The model itself builds upon the work of the team at CompVis and Runway in their widely used latent diffusion model combined with insights from the conditional diffusion models by our lead generative AI developer Katherine Crowson, Dall-E 2 by Open AI, Imagen by Google Brain and many others. We are delighted that AI media generation is a cooperative field and hope it can continue this way to bring the gift of creativity to all.* + +## Tips: + +- Stable Diffusion has the same architecture as [Latent Diffusion](https://arxiv.org/abs/2112.10752) but uses a frozen CLIP Text Encoder instead of training the text encoder jointly with the diffusion model. +- An in-detail explanation of the Stable Diffusion model can be found under [Stable Diffusion with 🧨 Diffusers](https://huggingface.co/blog/stable_diffusion). +- If you don't want to rely on the Hugging Face Hub and having to pass a authentication token, you can +download the weights with `git lfs install; git clone https://huggingface.co/runwayml/stable-diffusion-v1-5` and instead pass the local path to the cloned folder to `from_pretrained` as shown below. +- Stable Diffusion can work with a variety of different samplers as is shown below. + +## Available Pipelines: + +| Pipeline | Tasks | Colab +|---|---|:---:| +| [pipeline_stable_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) | *Text-to-Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) +| [pipeline_stable_diffusion_img2img](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) | *Image-to-Image Text-Guided Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) +| [pipeline_stable_diffusion_inpaint](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | *Text-Guided Image Inpainting* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) + +## Examples: + +### Using Stable Diffusion without being logged into the Hub. + +If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`. + +```python +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +``` + +This however can make it difficult to build applications on top of `diffusers` as you will always have to pass the token around. A potential way to solve this issue is by downloading the weights to a local path `"./stable-diffusion-v1-5"`: + +``` +git lfs install +git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 +``` + +and simply passing the local path to `from_pretrained`: + +```python +from diffusers import StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") +``` + +### Text-to-Image with default PLMS scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Text-to-Image with DDIM scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, DDIMScheduler + +scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") + +pipe = StableDiffusionPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + scheduler=scheduler, +).to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Text-to-Image with K-LMS scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler + +lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") + +pipe = StableDiffusionPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + scheduler=lms, +).to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### CycleDiffusion using Stable Diffusion and DDIM scheduler + +```python +import requests +import torch +from PIL import Image +from io import BytesIO + +from diffusers import CycleDiffusionPipeline, DDIMScheduler + + +# load the scheduler. CycleDiffusion only supports stochastic schedulers. + +# load the pipeline +# make sure you're logged in with `huggingface-cli login` +model_id_or_path = "CompVis/stable-diffusion-v1-4" +scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") +pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") + +# let's download an initial image +url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png" +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((512, 512)) +init_image.save("horse.png") + +# let's specify a prompt +source_prompt = "An astronaut riding a horse" +prompt = "An astronaut riding an elephant" + +# call the pipeline +image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.8, + guidance_scale=2, + source_guidance_scale=1, +).images[0] + +image.save("horse_to_elephant.png") + +# let's try another example +# See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion +url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png" +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((512, 512)) +init_image.save("black.png") + +source_prompt = "A black colored car" +prompt = "A blue colored car" + +# call the pipeline +torch.manual_seed(0) +image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.85, + guidance_scale=3, + source_guidance_scale=1, +).images[0] + +image.save("black_to_blue.png") +``` diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/__init__.py new file mode 100644 index 000000000..0eda32d33 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/__init__.py @@ -0,0 +1,203 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_k_diffusion_available, + is_k_diffusion_version, + is_onnx_available, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["StableDiffusionPipelineOutput"]} + +if is_transformers_available() and is_flax_available(): + _import_structure["pipeline_output"].extend(["FlaxStableDiffusionPipelineOutput"]) +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["clip_image_project_model"] = ["CLIPImageProjection"] + _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion"] = ["StableDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"] + _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"] + _import_structure["pipeline_stable_diffusion_img2img"] = ["StableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint"] = ["StableDiffusionInpaintPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_stable_diffusion_instruct_pix2pix"] = ["StableDiffusionInstructPix2PixPipeline"] + _import_structure["pipeline_stable_diffusion_latent_upscale"] = ["StableDiffusionLatentUpscalePipeline"] + _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] + _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] + _import_structure["pipeline_stable_diffusion_upscale"] = ["StableDiffusionUpscalePipeline"] + _import_structure["pipeline_stable_unclip"] = ["StableUnCLIPPipeline"] + _import_structure["pipeline_stable_unclip_img2img"] = ["StableUnCLIPImg2ImgPipeline"] + _import_structure["safety_checker"] = ["StableDiffusionSafetyChecker"] + _import_structure["stable_unclip_image_normalizer"] = ["StableUnCLIPImageNormalizer"] +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionImageVariationPipeline, + ) + + _dummy_objects.update({"StableDiffusionImageVariationPipeline": StableDiffusionImageVariationPipeline}) +else: + _import_structure["pipeline_stable_diffusion_image_variation"] = ["StableDiffusionImageVariationPipeline"] +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionDepth2ImgPipeline, + ) + + _dummy_objects.update( + { + "StableDiffusionDepth2ImgPipeline": StableDiffusionDepth2ImgPipeline, + } + ) +else: + _import_structure["pipeline_stable_diffusion_depth2img"] = ["StableDiffusionDepth2ImgPipeline"] + +try: + if not (is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) +else: + _import_structure["pipeline_onnx_stable_diffusion"] = [ + "OnnxStableDiffusionPipeline", + "StableDiffusionOnnxPipeline", + ] + _import_structure["pipeline_onnx_stable_diffusion_img2img"] = ["OnnxStableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_onnx_stable_diffusion_inpaint"] = ["OnnxStableDiffusionInpaintPipeline"] + _import_structure["pipeline_onnx_stable_diffusion_inpaint_legacy"] = ["OnnxStableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_onnx_stable_diffusion_upscale"] = ["OnnxStableDiffusionUpscalePipeline"] + +if is_transformers_available() and is_flax_available(): + from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState + + _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) + _import_structure["pipeline_flax_stable_diffusion"] = ["FlaxStableDiffusionPipeline"] + _import_structure["pipeline_flax_stable_diffusion_img2img"] = ["FlaxStableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_flax_stable_diffusion_inpaint"] = ["FlaxStableDiffusionInpaintPipeline"] + _import_structure["safety_checker_flax"] = ["FlaxStableDiffusionSafetyChecker"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .clip_image_project_model import CLIPImageProjection + from .pipeline_stable_diffusion import ( + StableDiffusionPipeline, + StableDiffusionPipelineOutput, + StableDiffusionSafetyChecker, + ) + from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline + from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline + from .pipeline_stable_diffusion_instruct_pix2pix import ( + StableDiffusionInstructPix2PixPipeline, + ) + from .pipeline_stable_diffusion_latent_upscale import ( + StableDiffusionLatentUpscalePipeline, + ) + from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline + from .pipeline_stable_unclip import StableUnCLIPPipeline + from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline + from .safety_checker import StableDiffusionSafetyChecker + from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionImageVariationPipeline, + ) + else: + from .pipeline_stable_diffusion_image_variation import ( + StableDiffusionImageVariationPipeline, + ) + + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline + else: + from .pipeline_stable_diffusion_depth2img import ( + StableDiffusionDepth2ImgPipeline, + ) + + try: + if not (is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_onnx_objects import * + else: + from .pipeline_onnx_stable_diffusion import ( + OnnxStableDiffusionPipeline, + StableDiffusionOnnxPipeline, + ) + from .pipeline_onnx_stable_diffusion_img2img import ( + OnnxStableDiffusionImg2ImgPipeline, + ) + from .pipeline_onnx_stable_diffusion_inpaint import ( + OnnxStableDiffusionInpaintPipeline, + ) + from .pipeline_onnx_stable_diffusion_upscale import ( + OnnxStableDiffusionUpscalePipeline, + ) + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_objects import * + else: + from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline + from .pipeline_flax_stable_diffusion_img2img import ( + FlaxStableDiffusionImg2ImgPipeline, + ) + from .pipeline_flax_stable_diffusion_inpaint import ( + FlaxStableDiffusionInpaintPipeline, + ) + from .pipeline_output import FlaxStableDiffusionPipelineOutput + from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py new file mode 100644 index 000000000..71f9d9714 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py @@ -0,0 +1,29 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin + + +class CLIPImageProjection(ModelMixin, ConfigMixin): + @register_to_config + def __init__(self, hidden_size: int = 768): + super().__init__() + self.hidden_size = hidden_size + self.project = nn.Linear(self.hidden_size, self.hidden_size, bias=False) + + def forward(self, x): + return self.project(x) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py new file mode 100644 index 000000000..30c3c5b51 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -0,0 +1,1860 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Conversion script for the Stable Diffusion checkpoints.""" + +import re +from contextlib import nullcontext +from io import BytesIO +from typing import Dict, Optional, Union + +import requests +import torch +import yaml +from transformers import ( + AutoFeatureExtractor, + BertTokenizerFast, + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from ...models import ( + AutoencoderKL, + ControlNetModel, + PriorTransformer, + UNet2DConditionModel, +) +from ...schedulers import ( + DDIMScheduler, + DDPMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + UnCLIPScheduler, +) +from ...utils import is_accelerate_available, logging +from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel +from ..paint_by_example import PaintByExampleImageEncoder +from ..pipeline_utils import DiffusionPipeline +from .safety_checker import StableDiffusionSafetyChecker +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +if is_accelerate_available(): + from accelerate import init_empty_weights + from accelerate.utils import set_module_tensor_to_device + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def shave_segments(path, n_shave_prefix_segments=1): + """ + Removes segments. Positive values shave the first segments, negative shave the last segments. + """ + if n_shave_prefix_segments >= 0: + return ".".join(path.split(".")[n_shave_prefix_segments:]) + else: + return ".".join(path.split(".")[:n_shave_prefix_segments]) + + +def renew_resnet_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside resnets to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item.replace("in_layers.0", "norm1") + new_item = new_item.replace("in_layers.2", "conv1") + + new_item = new_item.replace("out_layers.0", "norm2") + new_item = new_item.replace("out_layers.3", "conv2") + + new_item = new_item.replace("emb_layers.1", "time_emb_proj") + new_item = new_item.replace("skip_connection", "conv_shortcut") + + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside resnets to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + new_item = new_item.replace("nin_shortcut", "conv_shortcut") + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_attention_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside attentions to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + # new_item = new_item.replace('norm.weight', 'group_norm.weight') + # new_item = new_item.replace('norm.bias', 'group_norm.bias') + + # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') + # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') + + # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside attentions to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + new_item = new_item.replace("norm.weight", "group_norm.weight") + new_item = new_item.replace("norm.bias", "group_norm.bias") + + new_item = new_item.replace("q.weight", "to_q.weight") + new_item = new_item.replace("q.bias", "to_q.bias") + + new_item = new_item.replace("k.weight", "to_k.weight") + new_item = new_item.replace("k.bias", "to_k.bias") + + new_item = new_item.replace("v.weight", "to_v.weight") + new_item = new_item.replace("v.bias", "to_v.bias") + + new_item = new_item.replace("proj_out.weight", "to_out.0.weight") + new_item = new_item.replace("proj_out.bias", "to_out.0.bias") + + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def assign_to_checkpoint( + paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None +): + """ + This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits + attention layers, and takes into account additional replacements that may arise. + + Assigns the weights to the new checkpoint. + """ + assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." + + # Splits the attention layers into three variables. + if attention_paths_to_split is not None: + for path, path_map in attention_paths_to_split.items(): + old_tensor = old_checkpoint[path] + channels = old_tensor.shape[0] // 3 + + target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) + + num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 + + old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) + query, key, value = old_tensor.split(channels // num_heads, dim=1) + + checkpoint[path_map["query"]] = query.reshape(target_shape) + checkpoint[path_map["key"]] = key.reshape(target_shape) + checkpoint[path_map["value"]] = value.reshape(target_shape) + + for path in paths: + new_path = path["new"] + + # These have already been assigned + if attention_paths_to_split is not None and new_path in attention_paths_to_split: + continue + + # Global renaming happens here + new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") + new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") + new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") + + if additional_replacements is not None: + for replacement in additional_replacements: + new_path = new_path.replace(replacement["old"], replacement["new"]) + + # proj_attn.weight has to be converted from conv 1D to linear + is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path) + shape = old_checkpoint[path["old"]].shape + if is_attn_weight and len(shape) == 3: + checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] + elif is_attn_weight and len(shape) == 4: + checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0] + else: + checkpoint[new_path] = old_checkpoint[path["old"]] + + +def conv_attn_to_linear(checkpoint): + keys = list(checkpoint.keys()) + attn_keys = ["query.weight", "key.weight", "value.weight"] + for key in keys: + if ".".join(key.split(".")[-2:]) in attn_keys: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0, 0] + elif "proj_attn.weight" in key: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0] + + +def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + if controlnet: + unet_params = original_config["model"]["params"]["control_stage_config"]["params"] + else: + if ( + "unet_config" in original_config["model"]["params"] + and original_config["model"]["params"]["unet_config"] is not None + ): + unet_params = original_config["model"]["params"]["unet_config"]["params"] + else: + unet_params = original_config["model"]["params"]["network_config"]["params"] + + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + + block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] + + down_block_types = [] + resolution = 1 + for i in range(len(block_out_channels)): + block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" + down_block_types.append(block_type) + if i != len(block_out_channels) - 1: + resolution *= 2 + + up_block_types = [] + for i in range(len(block_out_channels)): + block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" + up_block_types.append(block_type) + resolution //= 2 + + if unet_params["transformer_depth"] is not None: + transformer_layers_per_block = ( + unet_params["transformer_depth"] + if isinstance(unet_params["transformer_depth"], int) + else list(unet_params["transformer_depth"]) + ) + else: + transformer_layers_per_block = 1 + + vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) + + head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None + use_linear_projection = ( + unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False + ) + if use_linear_projection: + # stable diffusion 2-base-512 and 2-768 + if head_dim is None: + head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] + head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] + + class_embed_type = None + addition_embed_type = None + addition_time_embed_dim = None + projection_class_embeddings_input_dim = None + context_dim = None + + if unet_params["context_dim"] is not None: + context_dim = ( + unet_params["context_dim"] + if isinstance(unet_params["context_dim"], int) + else unet_params["context_dim"][0] + ) + + if "num_classes" in unet_params: + if unet_params["num_classes"] == "sequential": + if context_dim in [2048, 1280]: + # SDXL + addition_embed_type = "text_time" + addition_time_embed_dim = 256 + else: + class_embed_type = "projection" + assert "adm_in_channels" in unet_params + projection_class_embeddings_input_dim = unet_params["adm_in_channels"] + + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": unet_params["in_channels"], + "down_block_types": tuple(down_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params["num_res_blocks"], + "cross_attention_dim": context_dim, + "attention_head_dim": head_dim, + "use_linear_projection": use_linear_projection, + "class_embed_type": class_embed_type, + "addition_embed_type": addition_embed_type, + "addition_time_embed_dim": addition_time_embed_dim, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + "transformer_layers_per_block": transformer_layers_per_block, + } + + if "disable_self_attentions" in unet_params: + config["only_cross_attention"] = unet_params["disable_self_attentions"] + + if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): + config["num_class_embeds"] = unet_params["num_classes"] + + if controlnet: + config["conditioning_channels"] = unet_params["hint_channels"] + else: + config["out_channels"] = unet_params["out_channels"] + config["up_block_types"] = tuple(up_block_types) + + return config + + +def create_vae_diffusers_config(original_config, image_size: int): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"] + + block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] + down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) + up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) + + config = { + "sample_size": image_size, + "in_channels": vae_params["in_channels"], + "out_channels": vae_params["out_ch"], + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "latent_channels": vae_params["z_channels"], + "layers_per_block": vae_params["num_res_blocks"], + } + return config + + +def create_diffusers_schedular(original_config): + schedular = DDIMScheduler( + num_train_timesteps=original_config["model"]["params"]["timesteps"], + beta_start=original_config["model"]["params"]["linear_start"], + beta_end=original_config["model"]["params"]["linear_end"], + beta_schedule="scaled_linear", + ) + return schedular + + +def create_ldm_bert_config(original_config): + bert_params = original_config["model"]["params"]["cond_stage_config"]["params"] + config = LDMBertConfig( + d_model=bert_params.n_embed, + encoder_layers=bert_params.n_layer, + encoder_ffn_dim=bert_params.n_embed * 4, + ) + return config + + +def convert_ldm_unet_checkpoint( + checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False +): + """ + Takes a state dict and a config, and returns a converted checkpoint. + """ + + if skip_extract_state_dict: + unet_state_dict = checkpoint + else: + # extract state_dict for UNet + unet_state_dict = {} + keys = list(checkpoint.keys()) + + if controlnet: + unet_key = "control_model." + else: + unet_key = "model.diffusion_model." + + # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA + if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: + logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.") + logger.warning( + "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" + " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." + ) + for key in keys: + if key.startswith("model.diffusion_model"): + flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + else: + if sum(k.startswith("model_ema") for k in keys) > 100: + logger.warning( + "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" + " weights (usually better for inference), please make sure to add the `--extract_ema` flag." + ) + + for key in keys: + if key.startswith(unet_key): + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) + + new_checkpoint = {} + + new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] + new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] + new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] + new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] + + if config["class_embed_type"] is None: + # No parameters to port + ... + elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": + new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] + new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] + new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] + new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] + else: + raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") + + if config["addition_embed_type"] == "text_time": + new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] + new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] + new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] + new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] + + # Relevant to StableDiffusionUpscalePipeline + if "num_class_embeds" in config: + if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): + new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] + + new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] + new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] + + if not controlnet: + new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] + new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] + new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] + new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] + + # Retrieves the keys for the input blocks only + num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) + input_blocks = { + layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) + middle_blocks = { + layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + + # Retrieves the keys for the output blocks only + num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) + output_blocks = { + layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] + for layer_id in range(num_output_blocks) + } + + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + + if f"input_blocks.{i}.0.op.weight" in unet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.bias" + ) + + paths = renew_resnet_paths(resnets) + meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + if len(attentions): + paths = renew_attention_paths(attentions) + + meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + resnet_0 = middle_blocks[0] + attentions = middle_blocks[1] + resnet_1 = middle_blocks[2] + + resnet_0_paths = renew_resnet_paths(resnet_0) + assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) + + resnet_1_paths = renew_resnet_paths(resnet_1) + assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) + + attentions_paths = renew_attention_paths(attentions) + meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} + assign_to_checkpoint( + attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + for i in range(num_output_blocks): + block_id = i // (config["layers_per_block"] + 1) + layer_in_block_id = i % (config["layers_per_block"] + 1) + output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] + output_block_list = {} + + for layer in output_block_layers: + layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) + if layer_id in output_block_list: + output_block_list[layer_id].append(layer_name) + else: + output_block_list[layer_id] = [layer_name] + + if len(output_block_list) > 1: + resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] + attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] + + resnet_0_paths = renew_resnet_paths(resnets) + paths = renew_resnet_paths(resnets) + + meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + output_block_list = {k: sorted(v) for k, v in output_block_list.items()} + if ["conv.bias", "conv.weight"] in output_block_list.values(): + index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.{index}.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.{index}.conv.bias" + ] + + # Clear attentions as they have been attributed above. + if len(attentions) == 2: + attentions = [] + + if len(attentions): + paths = renew_attention_paths(attentions) + meta_path = { + "old": f"output_blocks.{i}.1", + "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", + } + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + else: + resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) + for path in resnet_0_paths: + old_path = ".".join(["output_blocks", str(i), path["old"]]) + new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) + + new_checkpoint[new_path] = unet_state_dict[old_path] + + if controlnet: + # conditioning embedding + + orig_index = 0 + + new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + + orig_index += 2 + + diffusers_index = 0 + + while diffusers_index < 6: + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + diffusers_index += 1 + orig_index += 2 + + new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + + # down blocks + for i in range(num_input_blocks): + new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight") + new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias") + + # mid block + new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight") + new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias") + + return new_checkpoint + + +def convert_ldm_vae_checkpoint(checkpoint, config): + # extract state dict for VAE + vae_state_dict = {} + keys = list(checkpoint.keys()) + vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else "" + for key in keys: + if key.startswith(vae_key): + vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + + new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] + new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] + new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] + new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] + new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] + new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] + + new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] + new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] + new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] + new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] + new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] + new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] + + new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] + new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] + new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] + new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] + + # Retrieves the keys for the encoder down blocks only + num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) + down_blocks = { + layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) + } + + # Retrieves the keys for the decoder up blocks only + num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) + up_blocks = { + layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) + } + + for i in range(num_down_blocks): + resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] + + if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.weight" + ) + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.bias" + ) + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] + paths = renew_vae_attention_paths(mid_attentions) + meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + conv_attn_to_linear(new_checkpoint) + + for i in range(num_up_blocks): + block_id = num_up_blocks - 1 - i + resnets = [ + key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key + ] + + if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.weight" + ] + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.bias" + ] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] + paths = renew_vae_attention_paths(mid_attentions) + meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + conv_attn_to_linear(new_checkpoint) + return new_checkpoint + + +def convert_ldm_bert_checkpoint(checkpoint, config): + def _copy_attn_layer(hf_attn_layer, pt_attn_layer): + hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight + hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight + hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight + + hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight + hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias + + def _copy_linear(hf_linear, pt_linear): + hf_linear.weight = pt_linear.weight + hf_linear.bias = pt_linear.bias + + def _copy_layer(hf_layer, pt_layer): + # copy layer norms + _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0]) + _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0]) + + # copy attn + _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1]) + + # copy MLP + pt_mlp = pt_layer[1][1] + _copy_linear(hf_layer.fc1, pt_mlp.net[0][0]) + _copy_linear(hf_layer.fc2, pt_mlp.net[2]) + + def _copy_layers(hf_layers, pt_layers): + for i, hf_layer in enumerate(hf_layers): + if i != 0: + i += i + pt_layer = pt_layers[i : i + 2] + _copy_layer(hf_layer, pt_layer) + + hf_model = LDMBertModel(config).eval() + + # copy embeds + hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight + hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight + + # copy layer norm + _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) + + # copy hidden layers + _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers) + + _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits) + + return hf_model + + +def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None): + if text_encoder is None: + config_name = "openai/clip-vit-large-patch14" + try: + config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModel(config) + else: + text_model = text_encoder + + keys = list(checkpoint.keys()) + + text_model_dict = {} + + remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"] + + for key in keys: + for prefix in remove_prefixes: + if key.startswith(prefix): + text_model_dict[key[len(prefix + ".") :]] = checkpoint[key] + + if is_accelerate_available(): + for param_name, param in text_model_dict.items(): + set_module_tensor_to_device(text_model, param_name, "cpu", value=param) + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + return text_model + + +textenc_conversion_lst = [ + ("positional_embedding", "text_model.embeddings.position_embedding.weight"), + ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"), + ("ln_final.weight", "text_model.final_layer_norm.weight"), + ("ln_final.bias", "text_model.final_layer_norm.bias"), + ("text_projection", "text_projection.weight"), +] +textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst} + +textenc_transformer_conversion_lst = [ + # (stable-diffusion, HF Diffusers) + ("resblocks.", "text_model.encoder.layers."), + ("ln_1", "layer_norm1"), + ("ln_2", "layer_norm2"), + (".c_fc.", ".fc1."), + (".c_proj.", ".fc2."), + (".attn", ".self_attn"), + ("ln_final.", "transformer.text_model.final_layer_norm."), + ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), + ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), +] +protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} +textenc_pattern = re.compile("|".join(protected.keys())) + + +def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False): + config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) + model = PaintByExampleImageEncoder(config) + + keys = list(checkpoint.keys()) + + text_model_dict = {} + + for key in keys: + if key.startswith("cond_stage_model.transformer"): + text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] + + # load clip vision + model.model.load_state_dict(text_model_dict) + + # load mapper + keys_mapper = { + k[len("cond_stage_model.mapper.res") :]: v + for k, v in checkpoint.items() + if k.startswith("cond_stage_model.mapper") + } + + MAPPING = { + "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"], + "attn.c_proj": ["attn1.to_out.0"], + "ln_1": ["norm1"], + "ln_2": ["norm3"], + "mlp.c_fc": ["ff.net.0.proj"], + "mlp.c_proj": ["ff.net.2"], + } + + mapped_weights = {} + for key, value in keys_mapper.items(): + prefix = key[: len("blocks.i")] + suffix = key.split(prefix)[-1].split(".")[-1] + name = key.split(prefix)[-1].split(suffix)[0][1:-1] + mapped_names = MAPPING[name] + + num_splits = len(mapped_names) + for i, mapped_name in enumerate(mapped_names): + new_name = ".".join([prefix, mapped_name, suffix]) + shape = value.shape[0] // num_splits + mapped_weights[new_name] = value[i * shape : (i + 1) * shape] + + model.mapper.load_state_dict(mapped_weights) + + # load final layer norm + model.final_layer_norm.load_state_dict( + { + "bias": checkpoint["cond_stage_model.final_ln.bias"], + "weight": checkpoint["cond_stage_model.final_ln.weight"], + } + ) + + # load final proj + model.proj_out.load_state_dict( + { + "bias": checkpoint["proj_out.bias"], + "weight": checkpoint["proj_out.weight"], + } + ) + + # load uncond vector + model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"]) + return model + + +def convert_open_clip_checkpoint( + checkpoint, + config_name, + prefix="cond_stage_model.model.", + has_projection=False, + local_files_only=False, + **config_kwargs, +): + # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder") + # text_model = CLIPTextModelWithProjection.from_pretrained( + # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280 + # ) + try: + config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) + + keys = list(checkpoint.keys()) + + keys_to_ignore = [] + if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23: + # make sure to remove all keys > 22 + keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")] + keys_to_ignore += ["cond_stage_model.model.text_projection"] + + text_model_dict = {} + + if prefix + "text_projection" in checkpoint: + d_model = int(checkpoint[prefix + "text_projection"].shape[0]) + else: + d_model = 1024 + + text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") + + for key in keys: + if key in keys_to_ignore: + continue + if key[len(prefix) :] in textenc_conversion_map: + if key.endswith("text_projection"): + value = checkpoint[key].T.contiguous() + else: + value = checkpoint[key] + + text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value + + if key.startswith(prefix + "transformer."): + new_key = key[len(prefix + "transformer.") :] + if new_key.endswith(".in_proj_weight"): + new_key = new_key[: -len(".in_proj_weight")] + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] + text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] + text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] + elif new_key.endswith(".in_proj_bias"): + new_key = new_key[: -len(".in_proj_bias")] + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] + text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] + text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] + else: + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + + text_model_dict[new_key] = checkpoint[key] + + if is_accelerate_available(): + for param_name, param in text_model_dict.items(): + set_module_tensor_to_device(text_model, param_name, "cpu", value=param) + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + return text_model + + +def stable_unclip_image_encoder(original_config, local_files_only=False): + """ + Returns the image processor and clip image encoder for the img2img unclip pipeline. + + We currently know of two types of stable unclip models which separately use the clip and the openclip image + encoders. + """ + + image_embedder_config = original_config["model"]["params"]["embedder_config"] + + sd_clip_image_embedder_class = image_embedder_config["target"] + sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1] + + if sd_clip_image_embedder_class == "ClipImageEmbedder": + clip_model_name = image_embedder_config.params.model + + if clip_model_name == "ViT-L/14": + feature_extractor = CLIPImageProcessor() + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + else: + raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}") + + elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder": + feature_extractor = CLIPImageProcessor() + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", local_files_only=local_files_only + ) + else: + raise NotImplementedError( + f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}" + ) + + return feature_extractor, image_encoder + + +def stable_unclip_image_noising_components( + original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None +): + """ + Returns the noising components for the img2img and txt2img unclip pipelines. + + Converts the stability noise augmentor into + 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats + 2. a `DDPMScheduler` for holding the noise schedule + + If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided. + """ + noise_aug_config = original_config["model"]["params"]["noise_aug_config"] + noise_aug_class = noise_aug_config["target"] + noise_aug_class = noise_aug_class.split(".")[-1] + + if noise_aug_class == "CLIPEmbeddingNoiseAugmentation": + noise_aug_config = noise_aug_config.params + embedding_dim = noise_aug_config.timestep_dim + max_noise_level = noise_aug_config.noise_schedule_config.timesteps + beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule + + image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim) + image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule) + + if "clip_stats_path" in noise_aug_config: + if clip_stats_path is None: + raise ValueError("This stable unclip config requires a `clip_stats_path`") + + clip_mean, clip_std = torch.load(clip_stats_path, map_location=device) + clip_mean = clip_mean[None, :] + clip_std = clip_std[None, :] + + clip_stats_state_dict = { + "mean": clip_mean, + "std": clip_std, + } + + image_normalizer.load_state_dict(clip_stats_state_dict) + else: + raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}") + + return image_normalizer, image_noising_scheduler + + +def convert_controlnet_checkpoint( + checkpoint, + original_config, + checkpoint_path, + image_size, + upcast_attention, + extract_ema, + use_linear_projection=None, + cross_attention_dim=None, +): + ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) + ctrlnet_config["upcast_attention"] = upcast_attention + + ctrlnet_config.pop("sample_size") + + if use_linear_projection is not None: + ctrlnet_config["use_linear_projection"] = use_linear_projection + + if cross_attention_dim is not None: + ctrlnet_config["cross_attention_dim"] = cross_attention_dim + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + controlnet = ControlNetModel(**ctrlnet_config) + + # Some controlnet ckpt files are distributed independently from the rest of the + # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ + if "time_embed.0.weight" in checkpoint: + skip_extract_state_dict = True + else: + skip_extract_state_dict = False + + converted_ctrl_checkpoint = convert_ldm_unet_checkpoint( + checkpoint, + ctrlnet_config, + path=checkpoint_path, + extract_ema=extract_ema, + controlnet=True, + skip_extract_state_dict=skip_extract_state_dict, + ) + + if is_accelerate_available(): + for param_name, param in converted_ctrl_checkpoint.items(): + set_module_tensor_to_device(controlnet, param_name, "cpu", value=param) + else: + controlnet.load_state_dict(converted_ctrl_checkpoint) + + return controlnet + + +def download_from_original_stable_diffusion_ckpt( + checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]], + original_config_file: str = None, + image_size: Optional[int] = None, + prediction_type: str = None, + model_type: str = None, + extract_ema: bool = False, + scheduler_type: str = "pndm", + num_in_channels: Optional[int] = None, + upcast_attention: Optional[bool] = None, + device: str = None, + from_safetensors: bool = False, + stable_unclip: Optional[str] = None, + stable_unclip_prior: Optional[str] = None, + clip_stats_path: Optional[str] = None, + controlnet: Optional[bool] = None, + adapter: Optional[bool] = None, + load_safety_checker: bool = True, + pipeline_class: DiffusionPipeline = None, + local_files_only=False, + vae_path=None, + vae=None, + text_encoder=None, + text_encoder_2=None, + tokenizer=None, + tokenizer_2=None, + config_files=None, +) -> DiffusionPipeline: + """ + Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` + config file. + + Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the + global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is + recommended that you override the default values and/or supply an `original_config_file` wherever possible. + + Args: + checkpoint_path_or_dict (`str` or `dict`): Path to `.ckpt` file, or the state dict. + original_config_file (`str`): + Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically + inferred by looking for a key that only exists in SD2.0 models. + image_size (`int`, *optional*, defaults to 512): + The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2 + Base. Use 768 for Stable Diffusion v2. + prediction_type (`str`, *optional*): + The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable + Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2. + num_in_channels (`int`, *optional*, defaults to None): + The number of input channels. If `None`, it will be automatically inferred. + scheduler_type (`str`, *optional*, defaults to 'pndm'): + Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", + "ddim"]`. + model_type (`str`, *optional*, defaults to `None`): + The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", + "FrozenCLIPEmbedder", "PaintByExample"]`. + is_img2img (`bool`, *optional*, defaults to `False`): + Whether the model should be loaded as an img2img pipeline. + extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for + checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to + `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for + inference. Non-EMA weights are usually better to continue fine-tuning. + upcast_attention (`bool`, *optional*, defaults to `None`): + Whether the attention computation should always be upcasted. This is necessary when running stable + diffusion 2.1. + device (`str`, *optional*, defaults to `None`): + The device to use. Pass `None` to determine automatically. + from_safetensors (`str`, *optional*, defaults to `False`): + If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. + load_safety_checker (`bool`, *optional*, defaults to `True`): + Whether to load the safety checker or not. Defaults to `True`. + pipeline_class (`str`, *optional*, defaults to `None`): + The pipeline class to use. Pass `None` to determine automatically. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + vae (`AutoencoderKL`, *optional*, defaults to `None`): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If + this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. + text_encoder (`CLIPTextModel`, *optional*, defaults to `None`): + An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel) + to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) + variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. + tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`): + An instance of + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer) + to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if + needed. + config_files (`Dict[str, str]`, *optional*, defaults to `None`): + A dictionary mapping from config file names to their contents. If this parameter is `None`, the function + will load the config files by itself, if needed. Valid keys are: + - `v1`: Config file for Stable Diffusion v1 + - `v2`: Config file for Stable Diffusion v2 + - `xl`: Config file for Stable Diffusion XL + - `xl_refiner`: Config file for Stable Diffusion XL Refiner + return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file. + """ + + # import pipelines here to avoid circular import error when using from_single_file method + from diffusers import ( + LDMTextToImagePipeline, + PaintByExamplePipeline, + StableDiffusionControlNetPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, + StableDiffusionUpscalePipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + ) + + if prediction_type == "v-prediction": + prediction_type = "v_prediction" + + if isinstance(checkpoint_path_or_dict, str): + if from_safetensors: + from safetensors.torch import load_file as safe_load + + checkpoint = safe_load(checkpoint_path_or_dict, device="cpu") + else: + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) + else: + checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) + elif isinstance(checkpoint_path_or_dict, dict): + checkpoint = checkpoint_path_or_dict + + # Sometimes models don't have the global_step item + if "global_step" in checkpoint: + global_step = checkpoint["global_step"] + else: + logger.debug("global_step key not found in model") + global_step = None + + # NOTE: this while loop isn't great but this controlnet checkpoint has one additional + # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + if original_config_file is None: + key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias" + key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias" + is_upscale = pipeline_class == StableDiffusionUpscalePipeline + + config_url = None + + # model_type = "v1" + if config_files is not None and "v1" in config_files: + original_config_file = config_files["v1"] + else: + config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + + if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024: + # model_type = "v2" + if config_files is not None and "v2" in config_files: + original_config_file = config_files["v2"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" + if global_step == 110000: + # v2.1 needs to upcast attention + upcast_attention = True + elif key_name_sd_xl_base in checkpoint: + # only base xl has two text embedders + if config_files is not None and "xl" in config_files: + original_config_file = config_files["xl"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + elif key_name_sd_xl_refiner in checkpoint: + # only refiner xl has embedder and one text embedders + if config_files is not None and "xl_refiner" in config_files: + original_config_file = config_files["xl_refiner"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" + + if is_upscale: + config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml" + + if config_url is not None: + original_config_file = BytesIO(requests.get(config_url).content) + else: + with open(original_config_file, "r") as f: + original_config_file = f.read() + else: + with open(original_config_file, "r") as f: + original_config_file = f.read() + + original_config = yaml.safe_load(original_config_file) + + # Convert the text model. + if ( + model_type is None + and "cond_stage_config" in original_config["model"]["params"] + and original_config["model"]["params"]["cond_stage_config"] is not None + ): + model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1] + logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}") + elif model_type is None and original_config["model"]["params"]["network_config"] is not None: + if original_config["model"]["params"]["network_config"]["params"]["context_dim"] == 2048: + model_type = "SDXL" + else: + model_type = "SDXL-Refiner" + if image_size is None: + image_size = 1024 + + if pipeline_class is None: + # Check if we have a SDXL or SD model and initialize default pipeline + if model_type not in ["SDXL", "SDXL-Refiner"]: + pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline + else: + pipeline_class = StableDiffusionXLPipeline if model_type == "SDXL" else StableDiffusionXLImg2ImgPipeline + + if num_in_channels is None and pipeline_class in [ + StableDiffusionInpaintPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLControlNetInpaintPipeline, + ]: + num_in_channels = 9 + if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline: + num_in_channels = 7 + elif num_in_channels is None: + num_in_channels = 4 + + if "unet_config" in original_config["model"]["params"]: + original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels + + if ( + "parameterization" in original_config["model"]["params"] + and original_config["model"]["params"]["parameterization"] == "v" + ): + if prediction_type is None: + # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` + # as it relies on a brittle global step parameter here + prediction_type = "epsilon" if global_step == 875000 else "v_prediction" + if image_size is None: + # NOTE: For stable diffusion 2 base one has to pass `image_size==512` + # as it relies on a brittle global step parameter here + image_size = 512 if global_step == 875000 else 768 + else: + if prediction_type is None: + prediction_type = "epsilon" + if image_size is None: + image_size = 512 + + if controlnet is None and "control_stage_config" in original_config["model"]["params"]: + path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" + controlnet = convert_controlnet_checkpoint( + checkpoint, original_config, path, image_size, upcast_attention, extract_ema + ) + + if "timesteps" in original_config["model"]["params"]: + num_train_timesteps = original_config["model"]["params"]["timesteps"] + else: + num_train_timesteps = 1000 + + if model_type in ["SDXL", "SDXL-Refiner"]: + scheduler_dict = { + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "interpolation_type": "linear", + "num_train_timesteps": num_train_timesteps, + "prediction_type": "epsilon", + "sample_max_value": 1.0, + "set_alpha_to_one": False, + "skip_prk_steps": True, + "steps_offset": 1, + "timestep_spacing": "leading", + } + scheduler = EulerDiscreteScheduler.from_config(scheduler_dict) + scheduler_type = "euler" + else: + if "linear_start" in original_config["model"]["params"]: + beta_start = original_config["model"]["params"]["linear_start"] + else: + beta_start = 0.02 + + if "linear_end" in original_config["model"]["params"]: + beta_end = original_config["model"]["params"]["linear_end"] + else: + beta_end = 0.085 + scheduler = DDIMScheduler( + beta_end=beta_end, + beta_schedule="scaled_linear", + beta_start=beta_start, + num_train_timesteps=num_train_timesteps, + steps_offset=1, + clip_sample=False, + set_alpha_to_one=False, + prediction_type=prediction_type, + ) + # make sure scheduler works correctly with DDIM + scheduler.register_to_config(clip_sample=False) + + if scheduler_type == "pndm": + config = dict(scheduler.config) + config["skip_prk_steps"] = True + scheduler = PNDMScheduler.from_config(config) + elif scheduler_type == "lms": + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "heun": + scheduler = HeunDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "euler": + scheduler = EulerDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "euler-ancestral": + scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "dpm": + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + elif scheduler_type == "ddim": + scheduler = scheduler + else: + raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") + + if pipeline_class == StableDiffusionUpscalePipeline: + image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"] + + # Convert the UNet2DConditionModel model. + unet_config = create_unet_diffusers_config(original_config, image_size=image_size) + unet_config["upcast_attention"] = upcast_attention + + path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" + converted_unet_checkpoint = convert_ldm_unet_checkpoint( + checkpoint, unet_config, path=path, extract_ema=extract_ema + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + unet = UNet2DConditionModel(**unet_config) + + if is_accelerate_available(): + if model_type not in ["SDXL", "SDXL-Refiner"]: # SBM Delay this. + for param_name, param in converted_unet_checkpoint.items(): + set_module_tensor_to_device(unet, param_name, "cpu", value=param) + else: + unet.load_state_dict(converted_unet_checkpoint) + + # Convert the VAE model. + if vae_path is None and vae is None: + vae_config = create_vae_diffusers_config(original_config, image_size=image_size) + converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) + + if ( + "model" in original_config + and "params" in original_config["model"] + and "scale_factor" in original_config["model"]["params"] + ): + vae_scaling_factor = original_config["model"]["params"]["scale_factor"] + else: + vae_scaling_factor = 0.18215 # default SD scaling factor + + vae_config["scaling_factor"] = vae_scaling_factor + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + vae = AutoencoderKL(**vae_config) + + if is_accelerate_available(): + for param_name, param in converted_vae_checkpoint.items(): + set_module_tensor_to_device(vae, param_name, "cpu", value=param) + else: + vae.load_state_dict(converted_vae_checkpoint) + elif vae is None: + vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only) + + if model_type == "FrozenOpenCLIPEmbedder": + config_name = "stabilityai/stable-diffusion-2" + config_kwargs = {"subfolder": "text_encoder"} + + if text_encoder is None: + text_model = convert_open_clip_checkpoint( + checkpoint, config_name, local_files_only=local_files_only, **config_kwargs + ) + else: + text_model = text_encoder + + try: + tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2", subfolder="tokenizer", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'." + ) + + if stable_unclip is None: + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + controlnet=controlnet, + safety_checker=None, + feature_extractor=None, + ) + if hasattr(pipe, "requires_safety_checker"): + pipe.requires_safety_checker = False + + elif pipeline_class == StableDiffusionUpscalePipeline: + scheduler = DDIMScheduler.from_pretrained( + "stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler" + ) + low_res_scheduler = DDPMScheduler.from_pretrained( + "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler" + ) + + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + low_res_scheduler=low_res_scheduler, + safety_checker=None, + feature_extractor=None, + ) + + else: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=None, + feature_extractor=None, + ) + if hasattr(pipe, "requires_safety_checker"): + pipe.requires_safety_checker = False + + else: + image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components( + original_config, clip_stats_path=clip_stats_path, device=device + ) + + if stable_unclip == "img2img": + feature_extractor, image_encoder = stable_unclip_image_encoder(original_config) + + pipe = StableUnCLIPImg2ImgPipeline( + # image encoding components + feature_extractor=feature_extractor, + image_encoder=image_encoder, + # image noising components + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + # regular denoising components + tokenizer=tokenizer, + text_encoder=text_model, + unet=unet, + scheduler=scheduler, + # vae + vae=vae, + ) + elif stable_unclip == "txt2img": + if stable_unclip_prior is None or stable_unclip_prior == "karlo": + karlo_model = "kakaobrain/karlo-v1-alpha" + prior = PriorTransformer.from_pretrained( + karlo_model, subfolder="prior", local_files_only=local_files_only + ) + + try: + prior_tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + prior_text_model = CLIPTextModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + + prior_scheduler = UnCLIPScheduler.from_pretrained( + karlo_model, subfolder="prior_scheduler", local_files_only=local_files_only + ) + prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config) + else: + raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}") + + pipe = StableUnCLIPPipeline( + # prior components + prior_tokenizer=prior_tokenizer, + prior_text_encoder=prior_text_model, + prior=prior, + prior_scheduler=prior_scheduler, + # image noising components + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + # regular denoising components + tokenizer=tokenizer, + text_encoder=text_model, + unet=unet, + scheduler=scheduler, + # vae + vae=vae, + ) + else: + raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}") + elif model_type == "PaintByExample": + vision_model = convert_paint_by_example_checkpoint(checkpoint) + try: + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + try: + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'." + ) + pipe = PaintByExamplePipeline( + vae=vae, + image_encoder=vision_model, + unet=unet, + scheduler=scheduler, + safety_checker=None, + feature_extractor=feature_extractor, + ) + elif model_type == "FrozenCLIPEmbedder": + text_model = convert_ldm_clip_checkpoint( + checkpoint, local_files_only=local_files_only, text_encoder=text_encoder + ) + try: + tokenizer = ( + CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) + if tokenizer is None + else tokenizer + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + + if load_safety_checker: + safety_checker = StableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + else: + safety_checker = None + feature_extractor = None + + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + else: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + elif model_type in ["SDXL", "SDXL-Refiner"]: + is_refiner = model_type == "SDXL-Refiner" + + if (is_refiner is False) and (tokenizer is None): + try: + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + + if (is_refiner is False) and (text_encoder is None): + text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only) + + if tokenizer_2 is None: + try: + tokenizer_2 = CLIPTokenizer.from_pretrained( + "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'." + ) + + if text_encoder_2 is None: + config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config_kwargs = {"projection_dim": 1280} + prefix = "conditioner.embedders.0.model." if is_refiner else "conditioner.embedders.1.model." + + text_encoder_2 = convert_open_clip_checkpoint( + checkpoint, + config_name, + prefix=prefix, + has_projection=True, + local_files_only=local_files_only, + **config_kwargs, + ) + + if is_accelerate_available(): # SBM Now move model to cpu. + for param_name, param in converted_unet_checkpoint.items(): + set_module_tensor_to_device(unet, param_name, "cpu", value=param) + + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_encoder_2=text_encoder_2, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + force_zeros_for_empty_prompt=True, + ) + elif adapter: + pipe = pipeline_class( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_encoder_2=text_encoder_2, + tokenizer_2=tokenizer_2, + unet=unet, + adapter=adapter, + scheduler=scheduler, + force_zeros_for_empty_prompt=True, + ) + + else: + pipeline_kwargs = { + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "unet": unet, + "scheduler": scheduler, + } + + if (pipeline_class == StableDiffusionXLImg2ImgPipeline) or ( + pipeline_class == StableDiffusionXLInpaintPipeline + ): + pipeline_kwargs.update({"requires_aesthetics_score": is_refiner}) + + if is_refiner: + pipeline_kwargs.update({"force_zeros_for_empty_prompt": False}) + + pipe = pipeline_class(**pipeline_kwargs) + else: + text_config = create_ldm_bert_config(original_config) + text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) + tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", local_files_only=local_files_only) + pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) + + return pipe + + +def download_controlnet_from_original_ckpt( + checkpoint_path: str, + original_config_file: str, + image_size: int = 512, + extract_ema: bool = False, + num_in_channels: Optional[int] = None, + upcast_attention: Optional[bool] = None, + device: str = None, + from_safetensors: bool = False, + use_linear_projection: Optional[bool] = None, + cross_attention_dim: Optional[bool] = None, +) -> DiffusionPipeline: + if from_safetensors: + from safetensors import safe_open + + checkpoint = {} + with safe_open(checkpoint_path, framework="pt", device="cpu") as f: + for key in f.keys(): + checkpoint[key] = f.get_tensor(key) + else: + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + checkpoint = torch.load(checkpoint_path, map_location=device) + else: + checkpoint = torch.load(checkpoint_path, map_location=device) + + # NOTE: this while loop isn't great but this controlnet checkpoint has one additional + # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + original_config = yaml.safe_load(original_config_file) + + if num_in_channels is not None: + original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels + + if "control_stage_config" not in original_config["model"]["params"]: + raise ValueError("`control_stage_config` not present in original config") + + controlnet = convert_controlnet_checkpoint( + checkpoint, + original_config, + checkpoint_path, + image_size, + upcast_attention, + extract_ema, + use_linear_projection=use_linear_projection, + cross_attention_dim=cross_attention_dim, + ) + + return controlnet diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py new file mode 100644 index 000000000..55ff51c62 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py @@ -0,0 +1,473 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from packaging import version +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import deprecate, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + + >>> from diffusers import FlaxStableDiffusionPipeline + + >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jax.numpy.bfloat16 + ... ) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + + >>> prng_seed = jax.random.PRNGKey(0) + >>> num_inference_steps = 50 + + >>> num_samples = jax.device_count() + >>> prompt = num_samples * [prompt] + >>> prompt_ids = pipeline.prepare_inputs(prompt) + # shard inputs and rng + + >>> params = replicate(params) + >>> prng_seed = jax.random.split(prng_seed, jax.device_count()) + >>> prompt_ids = shard(prompt_ids) + + >>> images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + # Ensure model output will be `float32` before going into the scheduler + guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images).copy() + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i, 0] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 4, 5, 6), +) +def _p_generate( + pipe, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py new file mode 100644 index 000000000..7792bc097 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py @@ -0,0 +1,532 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> import jax.numpy as jnp + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline + + + >>> def create_key(seed=0): + ... return jax.random.PRNGKey(seed) + + + >>> rng = create_key(0) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> init_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_img = init_img.resize((768, 512)) + + >>> prompts = "A fantasy landscape, trending on artstation" + + >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", + ... revision="flax", + ... dtype=jnp.bfloat16, + ... ) + + >>> num_samples = jax.device_count() + >>> rng = jax.random.split(rng, jax.device_count()) + >>> prompt_ids, processed_image = pipeline.prepare_inputs( + ... prompt=[prompts] * num_samples, image=[init_img] * num_samples + ... ) + >>> p_params = replicate(params) + >>> prompt_ids = shard(prompt_ids) + >>> processed_image = shard(processed_image) + + >>> output = pipeline( + ... prompt_ids=prompt_ids, + ... image=processed_image, + ... params=p_params, + ... prng_seed=rng, + ... strength=0.75, + ... num_inference_steps=50, + ... jit=True, + ... height=512, + ... width=768, + ... ).images + + >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids, processed_images + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def get_timestep_start(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + + return t_start + + def _generate( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + start_timestep: int, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + noise: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if noise is None: + noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if noise.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}") + + # Create init_latents + init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist + init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2)) + init_latents = self.vae.config.scaling_factor * init_latents + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape + ) + + latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size) + + latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(start_timestep, num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + strength: float = 0.8, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + noise: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt_ids (`jnp.ndarray`): + The prompt or prompts to guide image generation. + image (`jnp.ndarray`): + Array representing an image batch to be used as the starting point. + params (`Dict` or `FrozenDict`): + Dictionary containing the model parameters/weights. + prng_seed (`jax.Array` or `jax.Array`): + Array containing random number generator key. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + noise (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. The array is generated by + sampling using the supplied random `generator`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + start_timestep = self.get_timestep_start(num_inference_steps, strength) + + if jit: + images = _p_generate( + self, + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 5, 6, 7, 8), +) +def _p_generate( + pipe, + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess(image, dtype): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py new file mode 100644 index 000000000..f6bb0ac29 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py @@ -0,0 +1,589 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from packaging import version +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> import PIL + >>> import requests + >>> from io import BytesIO + >>> from diffusers import FlaxStableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained( + ... "xvjiarui/stable-diffusion-2-inpainting" + ... ) + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> prng_seed = jax.random.PRNGKey(0) + >>> num_inference_steps = 50 + + >>> num_samples = jax.device_count() + >>> prompt = num_samples * [prompt] + >>> init_image = num_samples * [init_image] + >>> mask_image = num_samples * [mask_image] + >>> prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs( + ... prompt, init_image, mask_image + ... ) + # shard inputs and rng + + >>> params = replicate(params) + >>> prng_seed = jax.random.split(prng_seed, jax.device_count()) + >>> prompt_ids = shard(prompt_ids) + >>> processed_masked_images = shard(processed_masked_images) + >>> processed_masks = shard(processed_masks) + + >>> images = pipeline( + ... prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True + ... ).images + >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-guided image inpainting using Stable Diffusion. + + + + 🧪 This is an experimental feature! + + + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs( + self, + prompt: Union[str, List[str]], + image: Union[Image.Image, List[Image.Image]], + mask: Union[Image.Image, List[Image.Image]], + ): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + if not isinstance(mask, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(mask, Image.Image): + mask = [mask] + + processed_images = jnp.concatenate([preprocess_image(img, jnp.float32) for img in image]) + processed_masks = jnp.concatenate([preprocess_mask(m, jnp.float32) for m in mask]) + # processed_masks[processed_masks < 0.5] = 0 + processed_masks = processed_masks.at[processed_masks < 0.5].set(0) + # processed_masks[processed_masks >= 0.5] = 1 + processed_masks = processed_masks.at[processed_masks >= 0.5].set(1) + + processed_masked_images = processed_images * (processed_masks < 0.5) + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids, processed_masked_images, processed_masks + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.ndarray, + mask: jnp.ndarray, + masked_image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + latents_shape = ( + batch_size, + self.vae.config.latent_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=self.dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + prng_seed, mask_prng_seed = jax.random.split(prng_seed) + + masked_image_latent_dist = self.vae.apply( + {"params": params["vae"]}, masked_image, method=self.vae.encode + ).latent_dist + masked_image_latents = masked_image_latent_dist.sample(key=mask_prng_seed).transpose((0, 3, 1, 2)) + masked_image_latents = self.vae.config.scaling_factor * masked_image_latents + del mask_prng_seed + + mask = jax.image.resize(mask, (*mask.shape[:-2], *masked_image_latents.shape[-2:]), method="nearest") + + # 8. Check that sizes of mask, masked image and latents match + num_channels_latents = self.vae.config.latent_channels + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + def loop_body(step, args): + latents, mask, masked_image_latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + mask_input = jnp.concatenate([mask] * 2) + masked_image_latents_input = jnp.concatenate([masked_image_latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + # concat latents, mask, masked_image_latents in the channel dimension + latents_input = jnp.concatenate([latents_input, mask_input, masked_image_latents_input], axis=1) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, mask, masked_image_latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, mask, masked_image_latents, scheduler_state = loop_body( + i, (latents, mask, masked_image_latents, scheduler_state) + ) + else: + latents, _, _, _ = jax.lax.fori_loop( + 0, num_inference_steps, loop_body, (latents, mask, masked_image_latents, scheduler_state) + ) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + mask: jnp.ndarray, + masked_image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + masked_image = jax.image.resize(masked_image, (*masked_image.shape[:-2], height, width), method="bicubic") + mask = jax.image.resize(mask, (*mask.shape[:-2], height, width), method="nearest") + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, 0, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 6, 7, 8), +) +def _p_generate( + pipe, + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess_image(image, dtype): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, dtype): + w, h = mask.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = mask.resize((w, h)) + mask = jnp.array(mask.convert("L")).astype(dtype) / 255.0 + mask = jnp.expand_dims(mask, axis=(0, 1)) + + return mask diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py new file mode 100644 index 000000000..311347dcc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -0,0 +1,487 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) + + +class OnnxStableDiffusionPipeline(DiffusionPipeline): + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt: Union[str, List[str]], + height: Optional[int], + width: Optional[int], + callback_steps: int, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = 512, + width: Optional[int] = 512, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`): + `Image`, or tensor representing an image batch which will be upscaled. * + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + One or a list of [numpy generator(s)](TODO) to make generation deterministic. + latents (`np.ndarray`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # get the initial random noise unless the user supplied it + latents_dtype = prompt_embeds.dtype + latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8) + if latents is None: + latents = generator.randn(*latents_shape).astype(latents_dtype) + elif latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) + noise_pred = noise_pred[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + +class StableDiffusionOnnxPipeline(OnnxStableDiffusionPipeline): + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + ): + deprecation_message = "Please use `OnnxStableDiffusionPipeline` instead of `StableDiffusionOnnxPipeline`." + deprecate("StableDiffusionOnnxPipeline", "1.0.0", deprecation_message) + super().__init__( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py new file mode 100644 index 000000000..c39409886 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py @@ -0,0 +1,549 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess with 8->64 +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): + r""" + Pipeline for text-guided image to image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt: Union[str, List[str]], + callback_steps: int, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`np.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + image = preprocess(image).cpu().numpy() + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = image.astype(latents_dtype) + # encode the init image into latents and scale the latents + init_latents = self.vae_encoder(sample=image)[0] + init_latents = 0.18215 * init_latents + + if isinstance(prompt, str): + prompt = [prompt] + if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = len(prompt) // init_latents.shape[0] + init_latents = np.concatenate([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0) + elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts." + ) + else: + init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) + + # get the original timestep using init_timestep + offset = self.scheduler.config.get("steps_offset", 0) + init_timestep = int(num_inference_steps * strength) + offset + init_timestep = min(init_timestep, num_inference_steps) + + timesteps = self.scheduler.timesteps.numpy()[-init_timestep] + timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) + + # add noise to latents using the timesteps + noise = generator.randn(*init_latents.shape).astype(latents_dtype) + init_latents = self.scheduler.add_noise( + torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps) + ) + init_latents = init_latents.numpy() + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + latents = init_latents + + t_start = max(num_inference_steps - init_timestep + offset, 0) + timesteps = self.scheduler.timesteps[t_start:].numpy() + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # safety_checker does not support batched inputs yet + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py new file mode 100644 index 000000000..18d805082 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -0,0 +1,563 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +NUM_UNET_INPUT_CHANNELS = 9 +NUM_LATENT_CHANNELS = 4 + + +def prepare_mask_and_masked_image(image, mask, latents_shape): + image = np.array(image.convert("RGB").resize((latents_shape[1] * 8, latents_shape[0] * 8))) + image = image[None].transpose(0, 3, 1, 2) + image = image.astype(np.float32) / 127.5 - 1.0 + + image_mask = np.array(mask.convert("L").resize((latents_shape[1] * 8, latents_shape[0] * 8))) + masked_image = image * (image_mask < 127.5) + + mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION["nearest"]) + mask = np.array(mask.convert("L")) + mask = mask.astype(np.float32) / 255.0 + mask = mask[None, None] + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + return mask, masked_image + + +class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + logger.info("`OnnxStableDiffusionInpaintPipeline` is experimental and will very likely change in the future.") + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt: Union[str, List[str]], + height: Optional[int], + width: Optional[int], + callback_steps: int, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + image: PIL.Image.Image, + mask_image: PIL.Image.Image, + height: Optional[int] = 512, + width: Optional[int] = 512, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[np.random.RandomState] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + latents (`np.ndarray`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + num_channels_latents = NUM_LATENT_CHANNELS + latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8) + latents_dtype = prompt_embeds.dtype + if latents is None: + latents = generator.randn(*latents_shape).astype(latents_dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # prepare mask and masked_image + mask, masked_image = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:]) + mask = mask.astype(latents.dtype) + masked_image = masked_image.astype(latents.dtype) + + masked_image_latents = self.vae_encoder(sample=masked_image)[0] + masked_image_latents = 0.18215 * masked_image_latents + + # duplicate mask and masked_image_latents for each generation per prompt + mask = mask.repeat(batch_size * num_images_per_prompt, 0) + masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0) + + mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + + unet_input_channels = NUM_UNET_INPUT_CHANNELS + if num_channels_latents + num_channels_mask + num_channels_masked_image != unet_input_channels: + raise ValueError( + "Incorrect configuration settings! The config of `pipeline.unet` expects" + f" {unet_input_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + # concat latents, mask, masked_image_latnets in the channel dimension + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1) + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # safety_checker does not support batched inputs yet + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py new file mode 100644 index 000000000..58d83de0d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -0,0 +1,586 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) + + +def preprocess(image): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 32 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + + return image + + +class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): + vae: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + low_res_scheduler: DDPMScheduler + scheduler: KarrasDiffusionSchedulers + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: Any, + unet: OnnxRuntimeModel, + low_res_scheduler: DDPMScheduler, + scheduler: KarrasDiffusionSchedulers, + safety_checker: Optional[OnnxRuntimeModel] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + max_noise_level: int = 350, + num_latent_channels=4, + num_unet_input_channels=7, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + low_res_scheduler=low_res_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config( + max_noise_level=max_noise_level, + num_latent_channels=num_latent_channels, + num_unet_input_channels=num_unet_input_channels, + ) + + def check_inputs( + self, + prompt: Union[str, List[str]], + image, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, np.ndarray) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor or numpy array + if isinstance(image, list) or isinstance(image, np.ndarray): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + # check noise level + if noise_level > self.config.max_noise_level: + raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = generator.randn(*shape).astype(dtype) + elif latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + + return latents + + def decode_latents(self, latents): + latents = 1 / 0.08333 * latents + image = self.vae(latent_sample=latents)[0] + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + return image + + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image, List[PIL.Image.Image]], + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + noise_level: int = 20, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[np.random.RandomState, List[np.random.RandomState]]] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: Optional[int] = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`np.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + noise_level (`float`, defaults to 0.2): + Deteremines the amount of noise to add to the initial image before performing upscaling. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + image, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = preprocess(image).cpu().numpy() + height, width = image.shape[2:] + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + self.num_latent_channels, + height, + width, + latents_dtype, + generator, + ) + image = image.astype(latents_dtype) + + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # Scale the initial noise by the standard deviation required by the scheduler + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # 5. Add noise to image + noise_level = np.array([noise_level]).astype(np.int64) + noise = generator.randn(*image.shape).astype(latents_dtype) + + image = self.low_res_scheduler.add_noise( + torch.from_numpy(image), torch.from_numpy(noise), torch.from_numpy(noise_level) + ) + image = image.numpy() + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = np.concatenate([image] * batch_multiplier * num_images_per_prompt) + noise_level = np.concatenate([noise_level] * image.shape[0]) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if self.num_latent_channels + num_channels_image != self.num_unet_input_channels: + raise ValueError( + "Incorrect configuration settings! The config of `pipeline.unet` expects" + f" {self.num_unet_input_channels} but received `num_channels_latents`: {self.num_latent_channels} +" + f" `num_channels_image`: {num_channels_image} " + f" = {self.num_latent_channels + num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = np.concatenate([latent_model_input, image], axis=1) + + # timestep to tensor + timestep = np.array([t], dtype=timestep_dtype) + + # predict the noise residual + noise_pred = self.unet( + sample=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ).prev_sample + latents = latents.numpy() + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 10. Post-processing + image = self.decode_latents(latents) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_output.py new file mode 100644 index 000000000..5fb9b1a14 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_output.py @@ -0,0 +1,45 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput, is_flax_available + + +@dataclass +class StableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +if is_flax_available(): + import flax + + @flax.struct.dataclass + class FlaxStableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Flax-based Stable Diffusion pipelines. + + Args: + images (`np.ndarray`): + Denoised images of array shape of `(batch_size, height, width, num_channels)`. + nsfw_content_detected (`List[bool]`): + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content + or `None` if safety checking could not be performed. + """ + + images: np.ndarray + nsfw_content_detected: List[bool] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py new file mode 100644 index 000000000..9e4e6c186 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -0,0 +1,1032 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + LoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) + else None + ) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py new file mode 100644 index 000000000..c410acbed --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py @@ -0,0 +1,860 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPTextModel, CLIPTokenizer, DPTFeatureExtractor, DPTForDepthEstimation + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): + r""" + Pipeline for text-guided depth-based image-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + depth_estimator: DPTForDepthEstimation, + feature_extractor: DPTFeatureExtractor, + ): + super().__init__() + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + depth_estimator=depth_estimator, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_guidance, dtype, device): + if isinstance(image, PIL.Image.Image): + image = [image] + else: + image = list(image) + + if isinstance(image[0], PIL.Image.Image): + width, height = image[0].size + elif isinstance(image[0], np.ndarray): + width, height = image[0].shape[:-1] + else: + height, width = image[0].shape[-2:] + + if depth_map is None: + pixel_values = self.feature_extractor(images=image, return_tensors="pt").pixel_values + pixel_values = pixel_values.to(device=device) + # The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16. + # So we use `torch.autocast` here for half precision inference. + context_manger = torch.autocast("cuda", dtype=dtype) if device.type == "cuda" else contextlib.nullcontext() + with context_manger: + depth_map = self.depth_estimator(pixel_values).predicted_depth + else: + depth_map = depth_map.to(device=device, dtype=dtype) + + depth_map = torch.nn.functional.interpolate( + depth_map.unsqueeze(1), + size=(height // self.vae_scale_factor, width // self.vae_scale_factor), + mode="bicubic", + align_corners=False, + ) + + depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0 + depth_map = depth_map.to(dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if depth_map.shape[0] < batch_size: + repeat_by = batch_size // depth_map.shape[0] + depth_map = depth_map.repeat(repeat_by, 1, 1, 1) + + depth_map = torch.cat([depth_map] * 2) if do_classifier_free_guidance else depth_map + return depth_map + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + depth_map: Optional[torch.FloatTensor] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can accept image + latents as `image` only if `depth_map` is not `None`. + depth_map (`torch.FloatTensor`, *optional*): + Depth prediction to be used as additional conditioning for the image generation process. If not + defined, it automatically predicts the depth with `self.depth_estimator`. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + ```py + >>> import torch + >>> import requests + >>> from PIL import Image + + >>> from diffusers import StableDiffusionDepth2ImgPipeline + + >>> pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-depth", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.to("cuda") + + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> init_image = Image.open(requests.get(url, stream=True).raw) + >>> prompt = "two tigers" + >>> n_propmt = "bad, deformed, ugly, bad anotomy" + >>> image = pipe(prompt=prompt, image=init_image, negative_prompt=n_propmt, strength=0.7).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare depth mask + depth_mask = self.prepare_depth_map( + image, + depth_map, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + prompt_embeds.dtype, + device, + ) + + # 5. Preprocess image + image = self.image_processor.preprocess(image) + + # 6. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 7. Prepare latent variables + latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, depth_mask], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + depth_mask = callback_outputs.pop("depth_mask", depth_mask) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py new file mode 100644 index 000000000..afd872904 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py @@ -0,0 +1,420 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class StableDiffusionImageVariationPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline to generate image variations from an input image using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + # TODO: feature_extractor is required to encode images (if they are in PIL format), + # we should give a descriptive message if the pipeline doesn't have one. + _optional_components = ["safety_checker"] + model_cpu_offload_seq = "image_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + image_encoder: CLIPVisionModelWithProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeddings) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): + Image or images to guide image generation. If you provide a tensor, it needs to be compatible with + [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + + Examples: + + ```py + from diffusers import StableDiffusionImageVariationPipeline + from PIL import Image + from io import BytesIO + import requests + + pipe = StableDiffusionImageVariationPipeline.from_pretrained( + "lambdalabs/sd-image-variations-diffusers", revision="v2.0" + ) + pipe = pipe.to("cuda") + + url = "https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + + out = pipe(image, num_images_per_prompt=3, guidance_scale=15) + out["images"][0].save("result.jpg") + ``` + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width, callback_steps) + + # 2. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input image + image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py new file mode 100644 index 000000000..b43e0eb2a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -0,0 +1,1113 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionImg2ImgPipeline + + >>> device = "cuda" + >>> model_id_or_path = "runwayml/stable-diffusion-v1-5" + >>> pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> prompt = "A fantasy landscape, trending on artstation" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + >>> images[0].save("fantasy_landscape.png") + ``` +""" + + +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. set timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py new file mode 100644 index 000000000..221d5c2cf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py @@ -0,0 +1,1430 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AsymmetricAutoencoderKL, AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead" + deprecate( + "prepare_mask_and_masked_image", + "0.30.0", + deprecation_message, + ) + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "mask", "masked_image_latents"] + + def __init__( + self, + vae: Union[AutoencoderKL, AsymmetricAutoencoderKL], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4 + if unet.config.in_channels != 9: + logger.info(f"You have loaded a UNet with {unet.config.in_channels} input channels which.") + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: torch.FloatTensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be inpainted (which parts of the image to + be masked out with `mask_image` and repainted according to `prompt`). For both numpy array and pytorch + tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the + expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the + expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but + if passing latents directly it is not encoded again. + mask_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If + `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and + contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on + the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large + and contain information inreleant for inpainging, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. set timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask_condition = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is None: + masked_image = init_image * (mask_condition < 0.5) + else: + masked_image = masked_image_latents + + mask, masked_image_latents = self.prepare_mask_latents( + mask_condition, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + condition_kwargs = {} + if isinstance(self.vae, AsymmetricAutoencoderKL): + init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) + init_image_condition = init_image.clone() + init_image = self._encode_vae_image(init_image, generator=generator) + mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) + condition_kwargs = {"image": init_image_condition, "mask": mask_condition} + image = self.vae.decode( + latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs + )[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py new file mode 100644 index 000000000..cbb6ed4fa --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py @@ -0,0 +1,807 @@ +# Copyright 2024 The InstructPix2Pix Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionInstructPix2PixPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin +): + r""" + Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "image_latents"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 100, + guidance_scale: float = 7.5, + image_guidance_scale: float = 1.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept + image latents as `image`, but if passing latents directly it is not encoded again. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + image_guidance_scale (`float`, *optional*, defaults to 1.5): + Push the generated image towards the inital `image`. Image guidance scale is enabled by setting + `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely + linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a + value of at least `1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInstructPix2PixPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + + >>> image = download_image(img_url).resize((512, 512)) + + >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + ... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "make the mountains snowy" + >>> image = pipe(prompt=prompt, image=image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Check inputs + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._image_guidance_scale = image_guidance_scale + + device = self._execution_device + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([image_embeds, negative_image_embeds, negative_image_embeds]) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 1. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 2. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare Image latents + image_latents = self.prepare_image_latents( + image, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + self.do_classifier_free_guidance, + ) + + height, width = image_latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that shapes of latents and image match the UNet channels + num_channels_image = image_latents.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance\ + # is applied for both the text and the input image. + latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents + + # concat latents, image_latents in the channel dimension + scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + scaled_latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + self.guidance_scale * (noise_pred_text - noise_pred_image) + + self.image_guidance_scale * (noise_pred_image - noise_pred_uncond) + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + image_latents = callback_outputs.pop("image_latents", image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_ prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + else: + prompt_embeds_dtype = self.unet.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds] + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents( + self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + return image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def image_guidance_scale(self): + return self._image_guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self.guidance_scale > 1.0 and self.image_guidance_scale >= 1.0 diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py new file mode 100644 index 000000000..918dffe51 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py @@ -0,0 +1,495 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import EulerDiscreteScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.preprocess +def preprocess(image): + warnings.warn( + "The preprocess method is deprecated and will be removed in a future version. Please" + " use VaeImageProcessor.preprocess instead", + FutureWarning, + ) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionLatentUpscalePipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin): + r""" + Pipeline for upscaling Stable Diffusion output image resolution by a factor of 2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A [`EulerDiscreteScheduler`] to be used in combination with `unet` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: EulerDiscreteScheduler, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic") + + def _encode_prompt(self, prompt, device, do_classifier_free_guidance, negative_prompt): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `list(int)`): + prompt to be encoded + device: (`torch.device`): + torch device + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_length=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_encoder_out = self.text_encoder( + text_input_ids.to(device), + output_hidden_states=True, + ) + text_embeddings = text_encoder_out.hidden_states[-1] + text_pooler_out = text_encoder_out.pooler_output + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_length=True, + return_tensors="pt", + ) + + uncond_encoder_out = self.text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + + uncond_embeddings = uncond_encoder_out.hidden_states[-1] + uncond_pooler_out = uncond_encoder_out.pooler_output + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + text_pooler_out = torch.cat([uncond_pooler_out, text_pooler_out]) + + return text_embeddings, text_pooler_out + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs(self, prompt, image, callback_steps): + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor + if isinstance(image, list) or isinstance(image, torch.Tensor): + if isinstance(prompt, str): + batch_size = 1 + else: + batch_size = len(prompt) + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] if image.ndim == 4 else 1 + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + image: PipelineImageInput = None, + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image upscaling. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be upscaled. If it's a tensor, it can be either a + latent output from a Stable Diffusion model or an image tensor in the range `[-1, 1]`. It is considered + a `latent` if `image.shape[1]` is `4`; otherwise, it is considered to be an image representation and + encoded using this pipeline's `vae` encoder. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + ```py + >>> from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline + >>> import torch + + + >>> pipeline = StableDiffusionPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 + ... ) + >>> pipeline.to("cuda") + + >>> model_id = "stabilityai/sd-x2-latent-upscaler" + >>> upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) + >>> upscaler.to("cuda") + + >>> prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" + >>> generator = torch.manual_seed(33) + + >>> low_res_latents = pipeline(prompt, generator=generator, output_type="latent").images + + >>> with torch.no_grad(): + ... image = pipeline.decode_latents(low_res_latents) + >>> image = pipeline.numpy_to_pil(image)[0] + + >>> image.save("../images/a1.png") + + >>> upscaled_image = upscaler( + ... prompt=prompt, + ... image=low_res_latents, + ... num_inference_steps=20, + ... guidance_scale=0, + ... generator=generator, + ... ).images[0] + + >>> upscaled_image.save("../images/a2.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + # 1. Check inputs + self.check_inputs(prompt, image, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + if guidance_scale == 0: + prompt = [""] * batch_size + + # 3. Encode input prompt + text_embeddings, text_pooler_out = self._encode_prompt( + prompt, device, do_classifier_free_guidance, negative_prompt + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + image = image.to(dtype=text_embeddings.dtype, device=device) + if image.shape[1] == 3: + # encode image if not in latent-space yet + image = self.vae.encode(image).latent_dist.sample() * self.vae.config.scaling_factor + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = image[None, :] if image.ndim == 3 else image + image = torch.cat([image] * batch_multiplier) + + # 5. Add noise to image (set to be 0): + # (see below notes from the author): + # "the This step theoretically can make the model work better on out-of-distribution inputs, but mostly just seems to make it match the input less, so it's turned off by default." + noise_level = torch.tensor([0.0], dtype=torch.float32, device=device) + noise_level = torch.cat([noise_level] * image.shape[0]) + inv_noise_level = (noise_level**2 + 1) ** (-0.5) + + image_cond = F.interpolate(image, scale_factor=2, mode="nearest") * inv_noise_level[:, None, None, None] + image_cond = image_cond.to(text_embeddings.dtype) + + noise_level_embed = torch.cat( + [ + torch.ones(text_pooler_out.shape[0], 64, dtype=text_pooler_out.dtype, device=device), + torch.zeros(text_pooler_out.shape[0], 64, dtype=text_pooler_out.dtype, device=device), + ], + dim=1, + ) + + timestep_condition = torch.cat([noise_level_embed, text_pooler_out], dim=1) + + # 6. Prepare latent variables + height, width = image.shape[2:] + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size, + num_channels_latents, + height * 2, # 2x upscale + width * 2, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 9. Denoising loop + num_warmup_steps = 0 + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + sigma = self.scheduler.sigmas[i] + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + scaled_model_input = torch.cat([scaled_model_input, image_cond], dim=1) + # preconditioning parameter based on Karras et al. (2022) (table 1) + timestep = torch.log(sigma) * 0.25 + + noise_pred = self.unet( + scaled_model_input, + timestep, + encoder_hidden_states=text_embeddings, + timestep_cond=timestep_condition, + ).sample + + # in original repo, the output contains a variance channel that's not used + noise_pred = noise_pred[:, :-1] + + # apply preconditioning, based on table 1 in Karras et al. (2022) + inv_sigma = 1 / (sigma**2 + 1) + noise_pred = inv_sigma * latent_model_input + self.scheduler.scale_model_input(sigma, t) * noise_pred + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py new file mode 100644 index 000000000..2d04cf41d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py @@ -0,0 +1,808 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def preprocess(image): + warnings.warn( + "The preprocess method is deprecated and will be removed in a future version. Please" + " use VaeImageProcessor.preprocess instead", + FutureWarning, + ) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionUpscalePipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-guided image super-resolution using Stable Diffusion 2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + low_res_scheduler ([`SchedulerMixin`]): + A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of + [`DDPMScheduler`]. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["watermarker", "safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + low_res_scheduler: DDPMScheduler, + scheduler: KarrasDiffusionSchedulers, + safety_checker: Optional[Any] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + watermarker: Optional[Any] = None, + max_noise_level: int = 350, + ): + super().__init__() + + if hasattr( + vae, "config" + ): # check if vae has a config attribute `scaling_factor` and if it is set to 0.08333, else set it to 0.08333 and deprecate + is_vae_scaling_factor_set_to_0_08333 = ( + hasattr(vae.config, "scaling_factor") and vae.config.scaling_factor == 0.08333 + ) + if not is_vae_scaling_factor_set_to_0_08333: + deprecation_message = ( + "The configuration file of the vae does not contain `scaling_factor` or it is set to" + f" {vae.config.scaling_factor}, which seems highly unlikely. If your checkpoint is a fine-tuned" + " version of `stabilityai/stable-diffusion-x4-upscaler` you should change 'scaling_factor' to" + " 0.08333 Please make sure to update the config accordingly, as not doing so might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging" + " Face Hub, it would be very nice if you could open a Pull Request for the `vae/config.json` file" + ) + deprecate("wrong scaling_factor", "1.0.0", deprecation_message, standard_warn=False) + vae.register_to_config(scaling_factor=0.08333) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + safety_checker=safety_checker, + watermarker=watermarker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic") + self.register_to_config(max_noise_level=max_noise_level) + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + image, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, np.ndarray) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor or numpy array + if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + # check noise level + if noise_level > self.config.max_noise_level: + raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + noise_level: int = 20, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be upscaled. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + ```py + >>> import requests + >>> from PIL import Image + >>> from io import BytesIO + >>> from diffusers import StableDiffusionUpscalePipeline + >>> import torch + + >>> # load model and scheduler + >>> model_id = "stabilityai/stable-diffusion-x4-upscaler" + >>> pipeline = StableDiffusionUpscalePipeline.from_pretrained( + ... model_id, revision="fp16", torch_dtype=torch.float16 + ... ) + >>> pipeline = pipeline.to("cuda") + + >>> # let's download an image + >>> url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png" + >>> response = requests.get(url) + >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> low_res_img = low_res_img.resize((128, 128)) + >>> prompt = "a white cat" + + >>> upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0] + >>> upscaled_image.save("upsampled_cat.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + image, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + image = image.to(dtype=prompt_embeds.dtype, device=device) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Add noise to image + noise_level = torch.tensor([noise_level], dtype=torch.long, device=device) + noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + image = self.low_res_scheduler.add_noise(image, noise, noise_level) + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = torch.cat([image] * batch_multiplier * num_images_per_prompt) + noise_level = torch.cat([noise_level] * image.shape[0]) + + # 6. Prepare latent variables + height, width = image.shape[2:] + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, image], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=noise_level, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + + # Ensure latents are always the same type as the VAE + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + image, has_nsfw_concept, _ = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # 11. Apply watermark + if output_type == "pil" and self.watermarker is not None: + image = self.watermarker.apply_watermark(image) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py new file mode 100644 index 000000000..c62e0f4ec --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -0,0 +1,932 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel +from ...models.embeddings import get_timestep_embedding +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableUnCLIPPipeline + + >>> pipe = StableUnCLIPPipeline.from_pretrained( + ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16 + ... ) # TODO update model path + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> images = pipe(prompt).images + >>> images[0].save("astronaut_horse.png") + ``` +""" + + +class StableUnCLIPPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): + """ + Pipeline for text-to-image generation using stable unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + prior_tokenizer ([`CLIPTokenizer`]): + A [`CLIPTokenizer`]. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen [`CLIPTextModelWithProjection`] text-encoder. + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + prior_scheduler ([`KarrasDiffusionSchedulers`]): + Scheduler used in the prior denoising process. + image_normalizer ([`StableUnCLIPImageNormalizer`]): + Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image + embeddings after the noise has been applied. + image_noising_scheduler ([`KarrasDiffusionSchedulers`]): + Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined + by the `noise_level`. + tokenizer ([`CLIPTokenizer`]): + A [`CLIPTokenizer`]. + text_encoder ([`CLIPTextModel`]): + Frozen [`CLIPTextModel`] text-encoder. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] to denoise the encoded image latents. + scheduler ([`KarrasDiffusionSchedulers`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + """ + + _exclude_from_cpu_offload = ["prior", "image_normalizer"] + model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae" + + # prior components + prior_tokenizer: CLIPTokenizer + prior_text_encoder: CLIPTextModelWithProjection + prior: PriorTransformer + prior_scheduler: KarrasDiffusionSchedulers + + # image noising components + image_normalizer: StableUnCLIPImageNormalizer + image_noising_scheduler: KarrasDiffusionSchedulers + + # regular denoising components + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModel + unet: UNet2DConditionModel + scheduler: KarrasDiffusionSchedulers + + vae: AutoencoderKL + + def __init__( + self, + # prior components + prior_tokenizer: CLIPTokenizer, + prior_text_encoder: CLIPTextModelWithProjection, + prior: PriorTransformer, + prior_scheduler: KarrasDiffusionSchedulers, + # image noising components + image_normalizer: StableUnCLIPImageNormalizer, + image_noising_scheduler: KarrasDiffusionSchedulers, + # regular denoising components + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + # vae + vae: AutoencoderKL, + ): + super().__init__() + + self.register_modules( + prior_tokenizer=prior_tokenizer, + prior_text_encoder=prior_text_encoder, + prior=prior, + prior_scheduler=prior_scheduler, + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + vae=vae, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder + def _encode_prior_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + ): + if text_model_output is None: + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.prior_tokenizer( + prompt, + padding="max_length", + max_length=self.prior_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.prior_tokenizer.batch_decode( + untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length] + + prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) + + prompt_embeds = prior_text_encoder_output.text_embeds + text_enc_hid_states = prior_text_encoder_output.last_hidden_state + + else: + batch_size = text_model_output[0].shape[0] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] + text_mask = text_attention_mask + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + uncond_input = self.prior_tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.prior_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder( + uncond_input.input_ids.to(device) + ) + + negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds + uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_enc_hid_states, text_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler + def prepare_prior_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the prior_scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + noise_level, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." + ) + + if prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + + if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." + ) + + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def noise_image_embeddings( + self, + image_embeds: torch.Tensor, + noise_level: int, + noise: Optional[torch.FloatTensor] = None, + generator: Optional[torch.Generator] = None, + ): + """ + Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher + `noise_level` increases the variance in the final un-noised images. + + The noise is applied in two ways: + 1. A noise schedule is applied directly to the embeddings. + 2. A vector of sinusoidal time embeddings are appended to the output. + + In both cases, the amount of noise is controlled by the same `noise_level`. + + The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. + """ + if noise is None: + noise = randn_tensor( + image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype + ) + + noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) + + self.image_normalizer.to(image_embeds.device) + image_embeds = self.image_normalizer.scale(image_embeds) + + image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) + + image_embeds = self.image_normalizer.unscale(image_embeds) + + noise_level = get_timestep_embedding( + timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 + ) + + # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, + # but we might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + noise_level = noise_level.to(image_embeds.dtype) + + image_embeds = torch.cat((image_embeds, noise_level), 1) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + # regular denoising process args + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 20, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + # prior args + prior_num_inference_steps: int = 25, + prior_guidance_scale: float = 4.0, + prior_latents: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 20): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to `0`): + The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in + the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. + prior_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps in the prior denoising process. More denoising steps usually lead to a + higher quality image at the expense of slower inference. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + prior_latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + embedding generation in the prior denoising process. Can be used to tweak the same generation with + different prompts. If not provided, a latents tensor is generated by sampling using the supplied random + `generator`. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning + a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + height=height, + width=width, + callback_steps=callback_steps, + noise_level=noise_level, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 + + # 3. Encode input prompt + prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=prior_do_classifier_free_guidance, + ) + + # 4. Prepare prior timesteps + self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) + prior_timesteps_tensor = self.prior_scheduler.timesteps + + # 5. Prepare prior latent variables + embedding_dim = self.prior.config.embedding_dim + prior_latents = self.prepare_latents( + (batch_size, embedding_dim), + prior_prompt_embeds.dtype, + device, + generator, + prior_latents, + self.prior_scheduler, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) + + # 7. Prior denoising loop + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents + latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prior_prompt_embeds, + encoder_hidden_states=prior_text_encoder_hidden_states, + attention_mask=prior_text_mask, + ).predicted_image_embedding + + if prior_do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + prior_latents = self.prior_scheduler.step( + predicted_image_embedding, + timestep=t, + sample=prior_latents, + **prior_extra_step_kwargs, + return_dict=False, + )[0] + + if callback is not None and i % callback_steps == 0: + callback(i, t, prior_latents) + + prior_latents = self.prior.post_process_latents(prior_latents) + + image_embeds = prior_latents + + # done prior + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 8. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 9. Prepare image embeddings + image_embeds = self.noise_image_embeddings( + image_embeds=image_embeds, + noise_level=noise_level, + generator=generator, + ) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) + + # 10. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 11. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + latents = self.prepare_latents( + shape=shape, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=latents, + scheduler=self.scheduler, + ) + + # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 13. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=image_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py new file mode 100644 index 000000000..9b85d9e6b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py @@ -0,0 +1,839 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.embeddings import get_timestep_embedding +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import StableUnCLIPImg2ImgPipeline + + >>> pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( + ... "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 + ... ) # TODO update model path + >>> pipe = pipe.to("cuda") + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> prompt = "A fantasy landscape, trending on artstation" + + >>> images = pipe(prompt, init_image).images + >>> images[0].save("fantasy_landscape.png") + ``` +""" + + +class StableUnCLIPImg2ImgPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin +): + """ + Pipeline for text-guided image-to-image generation using stable unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + feature_extractor ([`CLIPImageProcessor`]): + Feature extractor for image pre-processing before being encoded. + image_encoder ([`CLIPVisionModelWithProjection`]): + CLIP vision model for encoding images. + image_normalizer ([`StableUnCLIPImageNormalizer`]): + Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image + embeddings after the noise has been applied. + image_noising_scheduler ([`KarrasDiffusionSchedulers`]): + Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined + by the `noise_level`. + tokenizer (`~transformers.CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`)]. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen [`~transformers.CLIPTextModel`] text-encoder. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] to denoise the encoded image latents. + scheduler ([`KarrasDiffusionSchedulers`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _exclude_from_cpu_offload = ["image_normalizer"] + + # image encoding components + feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + + # image noising components + image_normalizer: StableUnCLIPImageNormalizer + image_noising_scheduler: KarrasDiffusionSchedulers + + # regular denoising components + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModel + unet: UNet2DConditionModel + scheduler: KarrasDiffusionSchedulers + + vae: AutoencoderKL + + def __init__( + self, + # image encoding components + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + # image noising components + image_normalizer: StableUnCLIPImageNormalizer, + image_noising_scheduler: KarrasDiffusionSchedulers, + # regular denoising components + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + # vae + vae: AutoencoderKL, + ): + super().__init__() + + self.register_modules( + feature_extractor=feature_extractor, + image_encoder=image_encoder, + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + vae=vae, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def _encode_image( + self, + image, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + noise_level, + generator, + image_embeds, + ): + dtype = next(self.image_encoder.parameters()).dtype + + if isinstance(image, PIL.Image.Image): + # the image embedding should repeated so it matches the total batch size of the prompt + repeat_by = batch_size + else: + # assume the image input is already properly batched and just needs to be repeated so + # it matches the num_images_per_prompt. + # + # NOTE(will) this is probably missing a few number of side cases. I.e. batched/non-batched + # `image_embeds`. If those happen to be common use cases, let's think harder about + # what the expected dimensions of inputs should be and how we handle the encoding. + repeat_by = num_images_per_prompt + + if image_embeds is None: + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeds = self.image_encoder(image).image_embeds + + image_embeds = self.noise_image_embeddings( + image_embeds=image_embeds, + noise_level=noise_level, + generator=generator, + ) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + image_embeds = image_embeds.unsqueeze(1) + bs_embed, seq_len, _ = image_embeds.shape + image_embeds = image_embeds.repeat(1, repeat_by, 1) + image_embeds = image_embeds.view(bs_embed * repeat_by, seq_len, -1) + image_embeds = image_embeds.squeeze(1) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + height, + width, + callback_steps, + noise_level, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + image_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." + ) + + if prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + + if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." + ) + + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." + ) + + if image is not None and image_embeds is not None: + raise ValueError( + "Provide either `image` or `image_embeds`. Please make sure to define only one of the two." + ) + + if image is None and image_embeds is None: + raise ValueError( + "Provide either `image` or `image_embeds`. Cannot leave both `image` and `image_embeds` undefined." + ) + + if image is not None: + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_unclip.StableUnCLIPPipeline.noise_image_embeddings + def noise_image_embeddings( + self, + image_embeds: torch.Tensor, + noise_level: int, + noise: Optional[torch.FloatTensor] = None, + generator: Optional[torch.Generator] = None, + ): + """ + Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher + `noise_level` increases the variance in the final un-noised images. + + The noise is applied in two ways: + 1. A noise schedule is applied directly to the embeddings. + 2. A vector of sinusoidal time embeddings are appended to the output. + + In both cases, the amount of noise is controlled by the same `noise_level`. + + The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. + """ + if noise is None: + noise = randn_tensor( + image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype + ) + + noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) + + self.image_normalizer.to(image_embeds.device) + image_embeds = self.image_normalizer.scale(image_embeds) + + image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) + + image_embeds = self.image_normalizer.unscale(image_embeds) + + noise_level = get_timestep_embedding( + timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 + ) + + # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, + # but we might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + noise_level = noise_level.to(image_embeds.dtype) + + image_embeds = torch.cat((image_embeds, noise_level), 1) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 20, + guidance_scale: float = 10, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + image_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, either `prompt_embeds` will be + used or prompt is initialized to `""`. + image (`torch.FloatTensor` or `PIL.Image.Image`): + `Image` or tensor representing an image batch. The image is encoded to its CLIP embedding which the + `unet` is conditioned on. The image is _not_ encoded by the `vae` and then used as the latents in the + denoising process like it is in the standard Stable Diffusion text-guided image variation process. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 20): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to `0`): + The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in + the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. + image_embeds (`torch.FloatTensor`, *optional*): + Pre-generated CLIP embeddings to condition the `unet` on. These latents are not used in the denoising + process. If you want to provide pre-generated latents, pass them to `__call__` as `latents`. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning + a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if prompt is None and prompt_embeds is None: + prompt = len(image) * [""] if isinstance(image, list) else "" + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + image=image, + height=height, + width=width, + callback_steps=callback_steps, + noise_level=noise_level, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + image_embeds=image_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Encoder input image + noise_level = torch.tensor([noise_level], device=device) + image_embeds = self._encode_image( + image=image, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + noise_level=noise_level, + generator=generator, + image_embeds=image_embeds, + ) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size=batch_size, + num_channels_latents=num_channels_latents, + height=height, + width=width, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=latents, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=image_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker.py new file mode 100644 index 000000000..6cc4d26f2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker.py @@ -0,0 +1,125 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = nn.functional.normalize(image_embeds) + normalized_text_embeds = nn.functional.normalize(text_embeds) + return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) + + +class StableDiffusionSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModel(config.vision_config) + self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) + + self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) + self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) + + self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) + self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) + + @torch.no_grad() + def forward(self, clip_input, images): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() + cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() + + result = [] + batch_size = image_embeds.shape[0] + for i in range(batch_size): + result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + for concept_idx in range(len(special_cos_dist[0])): + concept_cos = special_cos_dist[i][concept_idx] + concept_threshold = self.special_care_embeds_weights[concept_idx].item() + result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["special_scores"][concept_idx] > 0: + result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) + adjustment = 0.01 + + for concept_idx in range(len(cos_dist[0])): + concept_cos = cos_dist[i][concept_idx] + concept_threshold = self.concept_embeds_weights[concept_idx].item() + result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["concept_scores"][concept_idx] > 0: + result_img["bad_concepts"].append(concept_idx) + + result.append(result_img) + + has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] + + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if torch.is_tensor(images) or torch.is_tensor(images[0]): + images[idx] = torch.zeros_like(images[idx]) # black image + else: + images[idx] = np.zeros(images[idx].shape) # black image + + if any(has_nsfw_concepts): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + @torch.no_grad() + def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nsfw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + # special_scores = special_scores.round(decimals=3) + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) + + concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment + # concept_scores = concept_scores.round(decimals=3) + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + images[has_nsfw_concepts] = 0.0 # black image + + return images, has_nsfw_concepts diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py new file mode 100644 index 000000000..571a4f2d7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py @@ -0,0 +1,112 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import jax +import jax.numpy as jnp +from flax import linen as nn +from flax.core.frozen_dict import FrozenDict +from transformers import CLIPConfig, FlaxPreTrainedModel +from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule + + +def jax_cosine_distance(emb_1, emb_2, eps=1e-12): + norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T + norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T + return jnp.matmul(norm_emb_1, norm_emb_2.T) + + +class FlaxStableDiffusionSafetyCheckerModule(nn.Module): + config: CLIPConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.vision_model = FlaxCLIPVisionModule(self.config.vision_config) + self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) + + self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim)) + self.special_care_embeds = self.param( + "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim) + ) + + self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,)) + self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,)) + + def __call__(self, clip_input): + pooled_output = self.vision_model(clip_input)[1] + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign image inputs + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment + special_scores = jnp.round(special_scores, 3) + is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True) + # Use a lower threshold if an image has any special care concept + special_adjustment = is_special_care * 0.01 + + concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment + concept_scores = jnp.round(concept_scores, 3) + has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1) + + return has_nsfw_concepts + + +class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel): + config_class = CLIPConfig + main_input_name = "clip_input" + module_class = FlaxStableDiffusionSafetyCheckerModule + + def __init__( + self, + config: CLIPConfig, + input_shape: Optional[Tuple] = None, + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + if input_shape is None: + input_shape = (1, 224, 224, 3) + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensor + clip_input = jax.random.normal(rng, input_shape) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, clip_input)["params"] + + return random_params + + def __call__( + self, + clip_input, + params: dict = None, + ): + clip_input = jnp.transpose(clip_input, (0, 2, 3, 1)) + + return self.module.apply( + {"params": params or self.params}, + jnp.array(clip_input, dtype=jnp.float32), + rngs={}, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py new file mode 100644 index 000000000..3fc6b3a3f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py @@ -0,0 +1,57 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin + + +class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin): + """ + This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP. + + It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image + embeddings. + """ + + @register_to_config + def __init__( + self, + embedding_dim: int = 768, + ): + super().__init__() + + self.mean = nn.Parameter(torch.zeros(1, embedding_dim)) + self.std = nn.Parameter(torch.ones(1, embedding_dim)) + + def to( + self, + torch_device: Optional[Union[str, torch.device]] = None, + torch_dtype: Optional[torch.dtype] = None, + ): + self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype)) + self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype)) + return self + + def scale(self, embeds): + embeds = (embeds - self.mean) * 1.0 / self.std + return embeds + + def unscale(self, embeds): + embeds = (embeds * self.std) + self.mean + return embeds diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py new file mode 100644 index 000000000..cce556fce --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py new file mode 100644 index 000000000..03c80b46b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py @@ -0,0 +1,1088 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from torch.nn import functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import Attention +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionAttendAndExcitePipeline + + >>> pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 + ... ).to("cuda") + + + >>> prompt = "a cat and a frog" + + >>> # use get_indices function to find out indices of the tokens you want to alter + >>> pipe.get_indices(prompt) + {0: '<|startoftext|>', 1: 'a', 2: 'cat', 3: 'and', 4: 'a', 5: 'frog', 6: '<|endoftext|>'} + + >>> token_indices = [2, 5] + >>> seed = 6141 + >>> generator = torch.Generator("cuda").manual_seed(seed) + + >>> images = pipe( + ... prompt=prompt, + ... token_indices=token_indices, + ... guidance_scale=7.5, + ... generator=generator, + ... num_inference_steps=50, + ... max_iter_to_alter=25, + ... ).images + + >>> image = images[0] + >>> image.save(f"../images/{prompt}_{seed}.png") + ``` +""" + + +class AttentionStore: + @staticmethod + def get_empty_store(): + return {"down": [], "mid": [], "up": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str): + if self.cur_att_layer >= 0 and is_cross: + if attn.shape[1] == np.prod(self.attn_res): + self.step_store[place_in_unet].append(attn) + + self.cur_att_layer += 1 + if self.cur_att_layer == self.num_att_layers: + self.cur_att_layer = 0 + self.between_steps() + + def between_steps(self): + self.attention_store = self.step_store + self.step_store = self.get_empty_store() + + def get_average_attention(self): + average_attention = self.attention_store + return average_attention + + def aggregate_attention(self, from_where: List[str]) -> torch.Tensor: + """Aggregates the attention across the different layers and heads at the specified resolution.""" + out = [] + attention_maps = self.get_average_attention() + for location in from_where: + for item in attention_maps[location]: + cross_maps = item.reshape(-1, self.attn_res[0], self.attn_res[1], item.shape[-1]) + out.append(cross_maps) + out = torch.cat(out, dim=0) + out = out.sum(0) / out.shape[0] + return out + + def reset(self): + self.cur_att_layer = 0 + self.step_store = self.get_empty_store() + self.attention_store = {} + + def __init__(self, attn_res): + """ + Initialize an empty AttentionStore :param step_index: used to visualize only a specific step in the diffusion + process + """ + self.num_att_layers = -1 + self.cur_att_layer = 0 + self.step_store = self.get_empty_store() + self.attention_store = {} + self.curr_step_index = 0 + self.attn_res = attn_res + + +class AttendExciteAttnProcessor: + def __init__(self, attnstore, place_in_unet): + super().__init__() + self.attnstore = attnstore + self.place_in_unet = place_in_unet + + def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + is_cross = encoder_hidden_states is not None + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + + # only need to store attention maps during the Attend and Excite process + if attention_probs.requires_grad: + self.attnstore(attention_probs, is_cross, self.place_in_unet) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion and Attend-and-Excite. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + indices, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + indices_is_list_ints = isinstance(indices, list) and isinstance(indices[0], int) + indices_is_list_list_ints = ( + isinstance(indices, list) and isinstance(indices[0], list) and isinstance(indices[0][0], int) + ) + + if not indices_is_list_ints and not indices_is_list_list_ints: + raise TypeError("`indices` must be a list of ints or a list of a list of ints") + + if indices_is_list_ints: + indices_batch_size = 1 + elif indices_is_list_list_ints: + indices_batch_size = len(indices) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if indices_batch_size != prompt_batch_size: + raise ValueError( + f"indices batch size must be same as prompt batch size. indices batch size: {indices_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @staticmethod + def _compute_max_attention_per_index( + attention_maps: torch.Tensor, + indices: List[int], + ) -> List[torch.Tensor]: + """Computes the maximum attention value for each of the tokens we wish to alter.""" + attention_for_text = attention_maps[:, :, 1:-1] + attention_for_text *= 100 + attention_for_text = torch.nn.functional.softmax(attention_for_text, dim=-1) + + # Shift indices since we removed the first token + indices = [index - 1 for index in indices] + + # Extract the maximum values + max_indices_list = [] + for i in indices: + image = attention_for_text[:, :, i] + smoothing = GaussianSmoothing().to(attention_maps.device) + input = F.pad(image.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode="reflect") + image = smoothing(input).squeeze(0).squeeze(0) + max_indices_list.append(image.max()) + return max_indices_list + + def _aggregate_and_get_max_attention_per_token( + self, + indices: List[int], + ): + """Aggregates the attention for each token and computes the max activation value for each token to alter.""" + attention_maps = self.attention_store.aggregate_attention( + from_where=("up", "down", "mid"), + ) + max_attention_per_index = self._compute_max_attention_per_index( + attention_maps=attention_maps, + indices=indices, + ) + return max_attention_per_index + + @staticmethod + def _compute_loss(max_attention_per_index: List[torch.Tensor]) -> torch.Tensor: + """Computes the attend-and-excite loss using the maximum attention value for each token.""" + losses = [max(0, 1.0 - curr_max) for curr_max in max_attention_per_index] + loss = max(losses) + return loss + + @staticmethod + def _update_latent(latents: torch.Tensor, loss: torch.Tensor, step_size: float) -> torch.Tensor: + """Update the latent according to the computed loss.""" + grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents], retain_graph=True)[0] + latents = latents - step_size * grad_cond + return latents + + def _perform_iterative_refinement_step( + self, + latents: torch.Tensor, + indices: List[int], + loss: torch.Tensor, + threshold: float, + text_embeddings: torch.Tensor, + step_size: float, + t: int, + max_refinement_steps: int = 20, + ): + """ + Performs the iterative latent refinement introduced in the paper. Here, we continuously update the latent code + according to our loss objective until the given threshold is reached for all tokens. + """ + iteration = 0 + target_loss = max(0, 1.0 - threshold) + while loss > target_loss: + iteration += 1 + + latents = latents.clone().detach().requires_grad_(True) + self.unet(latents, t, encoder_hidden_states=text_embeddings).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=indices, + ) + + loss = self._compute_loss(max_attention_per_index) + + if loss != 0: + latents = self._update_latent(latents, loss, step_size) + + logger.info(f"\t Try {iteration}. loss: {loss}") + + if iteration >= max_refinement_steps: + logger.info(f"\t Exceeded max number of iterations ({max_refinement_steps})! ") + break + + # Run one more time but don't compute gradients and update the latents. + # We just need to compute the new loss - the grad update will occur below + latents = latents.clone().detach().requires_grad_(True) + _ = self.unet(latents, t, encoder_hidden_states=text_embeddings).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=indices, + ) + loss = self._compute_loss(max_attention_per_index) + logger.info(f"\t Finished with loss of: {loss}") + return loss, latents, max_attention_per_index + + def register_attention_control(self): + attn_procs = {} + cross_att_count = 0 + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + cross_att_count += 1 + attn_procs[name] = AttendExciteAttnProcessor(attnstore=self.attention_store, place_in_unet=place_in_unet) + + self.unet.set_attn_processor(attn_procs) + self.attention_store.num_att_layers = cross_att_count + + def get_indices(self, prompt: str) -> Dict[str, int]: + """Utility function to list the indices of the tokens you wish to alte""" + ids = self.tokenizer(prompt).input_ids + indices = {i: tok for tok, i in zip(self.tokenizer.convert_ids_to_tokens(ids), range(len(ids)))} + return indices + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + token_indices: Union[List[int], List[List[int]]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + max_iter_to_alter: int = 25, + thresholds: dict = {0: 0.05, 10: 0.5, 20: 0.8}, + scale_factor: int = 20, + attn_res: Optional[Tuple[int]] = (16, 16), + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + token_indices (`List[int]`): + The token indices to alter with attend-and-excite. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + max_iter_to_alter (`int`, *optional*, defaults to `25`): + Number of denoising steps to apply attend-and-excite. The `max_iter_to_alter` denoising steps are when + attend-and-excite is applied. For example, if `max_iter_to_alter` is `25` and there are a total of `30` + denoising steps, the first `25` denoising steps applies attend-and-excite and the last `5` will not. + thresholds (`dict`, *optional*, defaults to `{0: 0.05, 10: 0.5, 20: 0.8}`): + Dictionary defining the iterations and desired thresholds to apply iterative latent refinement in. + scale_factor (`int`, *optional*, default to 20): + Scale factor to control the step size of each attend-and-excite update. + attn_res (`tuple`, *optional*, default computed from width and height): + The 2D resolution of the semantic attention map. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + token_indices, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + if attn_res is None: + attn_res = int(np.ceil(width / 32)), int(np.ceil(height / 32)) + self.attention_store = AttentionStore(attn_res) + self.register_attention_control() + + # default config for step size from original repo + scale_range = np.linspace(1.0, 0.5, len(self.scheduler.timesteps)) + step_size = scale_factor * np.sqrt(scale_range) + + text_embeddings = ( + prompt_embeds[batch_size * num_images_per_prompt :] if do_classifier_free_guidance else prompt_embeds + ) + + if isinstance(token_indices[0], int): + token_indices = [token_indices] + + indices = [] + + for ind in token_indices: + indices = indices + [ind] * num_images_per_prompt + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Attend and excite process + with torch.enable_grad(): + latents = latents.clone().detach().requires_grad_(True) + updated_latents = [] + for latent, index, text_embedding in zip(latents, indices, text_embeddings): + # Forward pass of denoising with text conditioning + latent = latent.unsqueeze(0) + text_embedding = text_embedding.unsqueeze(0) + + self.unet( + latent, + t, + encoder_hidden_states=text_embedding, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=index, + ) + + loss = self._compute_loss(max_attention_per_index=max_attention_per_index) + + # If this is an iterative refinement step, verify we have reached the desired threshold for all + if i in thresholds.keys() and loss > 1.0 - thresholds[i]: + loss, latent, max_attention_per_index = self._perform_iterative_refinement_step( + latents=latent, + indices=index, + loss=loss, + threshold=thresholds[i], + text_embeddings=text_embedding, + step_size=step_size[i], + t=t, + ) + + # Perform gradient update + if i < max_iter_to_alter: + if loss != 0: + latent = self._update_latent( + latents=latent, + loss=loss, + step_size=step_size[i], + ) + logger.info(f"Iteration {i} | Loss: {loss:0.4f}") + + updated_latents.append(latent) + + latents = torch.cat(updated_latents, dim=0) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + +class GaussianSmoothing(torch.nn.Module): + """ + Arguments: + Apply gaussian smoothing on a 1d, 2d or 3d tensor. Filtering is performed seperately for each channel in the input + using a depthwise convolution. + channels (int, sequence): Number of channels of the input tensors. Output will + have this number of channels as well. + kernel_size (int, sequence): Size of the gaussian kernel. sigma (float, sequence): Standard deviation of the + gaussian kernel. dim (int, optional): The number of dimensions of the data. + Default value is 2 (spatial). + """ + + # channels=1, kernel_size=kernel_size, sigma=sigma, dim=2 + def __init__( + self, + channels: int = 1, + kernel_size: int = 3, + sigma: float = 0.5, + dim: int = 2, + ): + super().__init__() + + if isinstance(kernel_size, int): + kernel_size = [kernel_size] * dim + if isinstance(sigma, float): + sigma = [sigma] * dim + + # The gaussian kernel is the product of the + # gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) + + self.register_buffer("weight", kernel) + self.groups = channels + + if dim == 1: + self.conv = F.conv1d + elif dim == 2: + self.conv = F.conv2d + elif dim == 3: + self.conv = F.conv3d + else: + raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim)) + + def forward(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py new file mode 100644 index 000000000..e2145edb9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py new file mode 100644 index 000000000..4c90ce064 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py @@ -0,0 +1,1530 @@ +# Copyright 2024 DiffEdit Authors and Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class DiffEditInversionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + latents (`torch.FloatTensor`) + inverted latents tensor + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `num_timesteps * batch_size` or numpy array of shape `(num_timesteps, + batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the + diffusion pipeline. + """ + + latents: torch.FloatTensor + images: Union[List[PIL.Image.Image], np.ndarray] + + +EXAMPLE_DOC_STRING = """ + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionDiffEditPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" + + >>> init_image = download_image(img_url).resize((768, 768)) + + >>> pipe = StableDiffusionDiffEditPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> mask_prompt = "A bowl of fruits" + >>> prompt = "A bowl of pears" + + >>> mask_image = pipe.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt) + >>> image_latents = pipe.invert(image=init_image, prompt=mask_prompt).latents + >>> image = pipe(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0] + ``` +""" + +EXAMPLE_INVERT_DOC_STRING = """ + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionDiffEditPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" + + >>> init_image = download_image(img_url).resize((768, 768)) + + >>> pipe = StableDiffusionDiffEditPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> prompt = "A bowl of fruits" + + >>> inverted_latents = pipe.invert(image=init_image, prompt=prompt).latents + ``` +""" + + +def auto_corr_loss(hidden_states, generator=None): + reg_loss = 0.0 + for i in range(hidden_states.shape[0]): + for j in range(hidden_states.shape[1]): + noise = hidden_states[i : i + 1, j : j + 1, :, :] + while True: + roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 + + if noise.shape[2] <= 8: + break + noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2) + return reg_loss + + +def kl_divergence(hidden_states): + return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-7) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def preprocess_mask(mask, batch_size: int = 1): + if not isinstance(mask, torch.Tensor): + # preprocess mask + if isinstance(mask, PIL.Image.Image) or isinstance(mask, np.ndarray): + mask = [mask] + + if isinstance(mask, list): + if isinstance(mask[0], PIL.Image.Image): + mask = [np.array(m.convert("L")).astype(np.float32) / 255.0 for m in mask] + if isinstance(mask[0], np.ndarray): + mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0) + mask = torch.from_numpy(mask) + elif isinstance(mask[0], torch.Tensor): + mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask shape + if batch_size > 1: + if mask.shape[0] == 1: + mask = torch.cat([mask] * batch_size) + elif mask.shape[0] > 1 and mask.shape[0] != batch_size: + raise ValueError( + f"`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} " + f"inferred by prompt inputs" + ) + + if mask.shape[1] != 1: + raise ValueError(f"`mask_image` must have 1 channel, but has {mask.shape[1]} channels") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("`mask_image` should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + return mask + + +class StableDiffusionDiffEditPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin +): + r""" + + + This is an experimental feature! + + + + Pipeline for text-guided image inpainting using Stable Diffusion and DiffEdit. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading and saving methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + inverse_scheduler ([`DDIMInverseScheduler`]): + A scheduler to be used in combination with `unet` to fill in the unmasked part of the input latents. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + inverse_scheduler: DDIMInverseScheduler, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + inverse_scheduler=inverse_scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (strength is None) or (strength is not None and (strength < 0 or strength > 1)): + raise ValueError( + f"The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def check_source_inputs( + self, + source_prompt=None, + source_negative_prompt=None, + source_prompt_embeds=None, + source_negative_prompt_embeds=None, + ): + if source_prompt is not None and source_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}." + " Please make sure to only forward one of the two." + ) + elif source_prompt is None and source_prompt_embeds is None: + raise ValueError( + "Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined." + ) + elif source_prompt is not None and ( + not isinstance(source_prompt, str) and not isinstance(source_prompt, list) + ): + raise ValueError(f"`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}") + + if source_negative_prompt is not None and source_negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`:" + f" {source_negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if source_prompt_embeds is not None and source_negative_prompt_embeds is not None: + if source_prompt_embeds.shape != source_negative_prompt_embeds.shape: + raise ValueError( + "`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed" + f" directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} !=" + f" `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def get_inverse_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + + # safety for t_start overflow to prevent empty timsteps slice + if t_start == 0: + return self.inverse_scheduler.timesteps, num_inference_steps + timesteps = self.inverse_scheduler.timesteps[:-t_start] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0) + else: + latents = self.vae.encode(image).latent_dist.sample(generator) + + latents = self.vae.config.scaling_factor * latents + + if batch_size != latents.shape[0]: + if batch_size % latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_latents_per_image = batch_size // latents.shape[0] + latents = torch.cat([latents] * additional_latents_per_image, dim=0) + else: + raise ValueError( + f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts." + ) + else: + latents = torch.cat([latents], dim=0) + + return latents + + def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): + pred_type = self.inverse_scheduler.config.prediction_type + alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + + if pred_type == "epsilon": + return model_output + elif pred_type == "sample": + return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5) + elif pred_type == "v_prediction": + return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`" + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def generate_mask( + self, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + target_prompt: Optional[Union[str, List[str]]] = None, + target_negative_prompt: Optional[Union[str, List[str]]] = None, + target_prompt_embeds: Optional[torch.FloatTensor] = None, + target_negative_prompt_embeds: Optional[torch.FloatTensor] = None, + source_prompt: Optional[Union[str, List[str]]] = None, + source_negative_prompt: Optional[Union[str, List[str]]] = None, + source_prompt_embeds: Optional[torch.FloatTensor] = None, + source_negative_prompt_embeds: Optional[torch.FloatTensor] = None, + num_maps_per_mask: Optional[int] = 10, + mask_encode_strength: Optional[float] = 0.5, + mask_thresholding_ratio: Optional[float] = 3.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "np", + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + r""" + Generate a latent mask given a mask prompt, a target prompt, and an image. + + Args: + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to be used for computing the mask. + target_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation. If not defined, you need to pass + `prompt_embeds`. + target_negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + target_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + target_negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + source_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation using DiffEdit. If not defined, you need to + pass `source_prompt_embeds` or `source_image` instead. + source_negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation away from using DiffEdit. If not defined, you + need to pass `source_negative_prompt_embeds` or `source_image` instead. + source_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text + inputs (prompt weighting). If not provided, text embeddings are generated from `source_prompt` input + argument. + source_negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily + tweak text inputs (prompt weighting). If not provided, text embeddings are generated from + `source_negative_prompt` input argument. + num_maps_per_mask (`int`, *optional*, defaults to 10): + The number of noise maps sampled to generate the semantic mask using DiffEdit. + mask_encode_strength (`float`, *optional*, defaults to 0.5): + The strength of the noise maps sampled to generate the semantic mask using DiffEdit. Must be between 0 + and 1. + mask_thresholding_ratio (`float`, *optional*, defaults to 3.0): + The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before + mask binarization. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the + [`~models.attention_processor.AttnProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + `List[PIL.Image.Image]` or `np.array`: + When returning a `List[PIL.Image.Image]`, the list consists of a batch of single-channel binary images + with dimensions `(height // self.vae_scale_factor, width // self.vae_scale_factor)`. If it's + `np.array`, the shape is `(batch_size, height // self.vae_scale_factor, width // + self.vae_scale_factor)`. + """ + + # 1. Check inputs (Provide dummy argument for callback_steps) + self.check_inputs( + target_prompt, + mask_encode_strength, + 1, + target_negative_prompt, + target_prompt_embeds, + target_negative_prompt_embeds, + ) + + self.check_source_inputs( + source_prompt, + source_negative_prompt, + source_prompt_embeds, + source_negative_prompt_embeds, + ) + + if (num_maps_per_mask is None) or ( + num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0) + ): + raise ValueError( + f"`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type" + f" {type(num_maps_per_mask)}." + ) + + if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0: + raise ValueError( + f"`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type" + f" {type(mask_thresholding_ratio)}." + ) + + # 2. Define call parameters + if target_prompt is not None and isinstance(target_prompt, str): + batch_size = 1 + elif target_prompt is not None and isinstance(target_prompt, list): + batch_size = len(target_prompt) + else: + batch_size = target_prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompts + (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None) + target_negative_prompt_embeds, target_prompt_embeds = self.encode_prompt( + target_prompt, + device, + num_maps_per_mask, + do_classifier_free_guidance, + target_negative_prompt, + prompt_embeds=target_prompt_embeds, + negative_prompt_embeds=target_negative_prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + target_prompt_embeds = torch.cat([target_negative_prompt_embeds, target_prompt_embeds]) + + source_negative_prompt_embeds, source_prompt_embeds = self.encode_prompt( + source_prompt, + device, + num_maps_per_mask, + do_classifier_free_guidance, + source_negative_prompt, + prompt_embeds=source_prompt_embeds, + negative_prompt_embeds=source_negative_prompt_embeds, + ) + if do_classifier_free_guidance: + source_prompt_embeds = torch.cat([source_negative_prompt_embeds, source_prompt_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image).repeat_interleave(num_maps_per_mask, dim=0) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, _ = self.get_timesteps(num_inference_steps, mask_encode_strength, device) + encode_timestep = timesteps[0] + + # 6. Prepare image latents and add noise with specified strength + image_latents = self.prepare_image_latents( + image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator + ) + noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype) + image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep) + + latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2)) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep) + + # 7. Predict the noise residual + prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds]) + noise_pred = self.unet( + latent_model_input, + encode_timestep, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + if do_classifier_free_guidance: + noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target = noise_pred.chunk(4) + noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src) + noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond) + else: + noise_pred_source, noise_pred_target = noise_pred.chunk(2) + + # 8. Compute the mask from the absolute difference of predicted noise residuals + # TODO: Consider smoothing mask guidance map + mask_guidance_map = ( + torch.abs(noise_pred_target - noise_pred_source) + .reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:]) + .mean([1, 2]) + ) + clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio + semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude + semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1) + mask_image = semantic_mask_image.cpu().numpy() + + # 9. Convert to Numpy array or PIL. + if output_type == "pil": + mask_image = self.image_processor.numpy_to_pil(mask_image) + + # Offload all models + self.maybe_free_model_hooks() + + return mask_image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) + def invert( + self, + prompt: Optional[Union[str, List[str]]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + num_inference_steps: int = 50, + inpaint_strength: float = 0.8, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + decode_latents: bool = False, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + lambda_auto_corr: float = 20.0, + lambda_kl: float = 20.0, + num_reg_steps: int = 0, + num_auto_corr_rolls: int = 5, + ): + r""" + Generate inverted latents given a prompt and image. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to produce the inverted latents guided by `prompt`. + inpaint_strength (`float`, *optional*, defaults to 0.8): + Indicates extent of the noising process to run latent inversion. Must be between 0 and 1. When + `inpaint_strength` is 1, the inversion process is run for the full number of iterations specified in + `num_inference_steps`. `image` is used as a reference for the inversion process, and adding more noise + increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + decode_latents (`bool`, *optional*, defaults to `False`): + Whether or not to decode the inverted latents into a generated image. Setting this argument to `True` + decodes all inverted latents for each timestep into a list of generated images. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.DiffEditInversionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the + [`~models.attention_processor.AttnProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + lambda_auto_corr (`float`, *optional*, defaults to 20.0): + Lambda parameter to control auto correction. + lambda_kl (`float`, *optional*, defaults to 20.0): + Lambda parameter to control Kullback-Leibler divergence output. + num_reg_steps (`int`, *optional*, defaults to 0): + Number of regularization loss steps. + num_auto_corr_rolls (`int`, *optional*, defaults to 5): + Number of auto correction roll steps. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] or + `tuple`: + If `return_dict` is `True`, + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is the inverted latents tensors + ordered by increasing noise, and the second is the corresponding decoded images if `decode_latents` is + `True`, otherwise `None`. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + inpaint_strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. Prepare latent variables + num_images_per_prompt = 1 + latents = self.prepare_image_latents( + image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator + ) + + # 5. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 6. Prepare timesteps + self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device) + + # 7. Noising loop where we obtain the intermediate noised latent image for each timestep. + num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order + inverted_latents = [] + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # regularization of the noise prediction (not in original code or paper but borrowed from Pix2PixZero) + if num_reg_steps > 0: + with torch.enable_grad(): + for _ in range(num_reg_steps): + if lambda_auto_corr > 0: + for _ in range(num_auto_corr_rolls): + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_ac = auto_corr_loss(var_epsilon, generator=generator) + l_ac.backward() + + grad = var.grad.detach() / num_auto_corr_rolls + noise_pred = noise_pred - lambda_auto_corr * grad + + if lambda_kl > 0: + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_kld = kl_divergence(var_epsilon) + l_kld.backward() + + grad = var.grad.detach() + noise_pred = noise_pred - lambda_kl * grad + + noise_pred = noise_pred.detach() + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample + inverted_latents.append(latents.detach().clone()) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + assert len(inverted_latents) == len(timesteps) + latents = torch.stack(list(reversed(inverted_latents)), 1) + + # 8. Post-processing + image = None + if decode_latents: + image = self.decode_latents(latents.flatten(0, 1)) + + # 9. Convert to PIL. + if decode_latents and output_type == "pil": + image = self.image_processor.numpy_to_pil(image) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (latents, image) + + return DiffEditInversionPipelineOutput(latents=latents, images=image) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + image_latents: Union[torch.FloatTensor, PIL.Image.Image] = None, + inpaint_strength: Optional[float] = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_ckip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + mask_image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to mask the generated image. White pixels in the mask are + repainted, while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, 1, H, W)`. + image_latents (`PIL.Image.Image` or `torch.FloatTensor`): + Partially noised image latents from the inversion process to be used as inputs for image generation. + inpaint_strength (`float`, *optional*, defaults to 0.8): + Indicates extent to inpaint the masked area. Must be between 0 and 1. When `inpaint_strength` is 1, the + denoising process is run on the masked area for the full number of iterations specified in + `num_inference_steps`. `image_latents` is used as a reference for the masked area, and adding more + noise to a region increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + inpaint_strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if mask_image is None: + raise ValueError( + "`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts." + ) + if image_latents is None: + raise ValueError( + "`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images." + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_ckip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess mask + mask_image = preprocess_mask(mask_image, batch_size) + latent_height, latent_width = mask_image.shape[-2:] + mask_image = torch.cat([mask_image] * num_images_per_prompt) + mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, inpaint_strength, device) + + # 6. Preprocess image latents + if isinstance(image_latents, list) and any(isinstance(l, torch.Tensor) and l.ndim == 5 for l in image_latents): + image_latents = torch.cat(image_latents).detach() + elif isinstance(image_latents, torch.Tensor) and image_latents.ndim == 5: + image_latents = image_latents.detach() + else: + image_latents = self.image_processor.preprocess(image_latents).detach() + + latent_shape = (self.vae.config.latent_channels, latent_height, latent_width) + if image_latents.shape[-3:] != latent_shape: + raise ValueError( + f"Each latent image in `image_latents` must have shape {latent_shape}, " + f"but has shape {image_latents.shape[-3:]}" + ) + if image_latents.ndim == 4: + image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape) + if image_latents.shape[:2] != (batch_size, len(timesteps)): + raise ValueError( + f"`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)}" + f" timesteps, but has batch size {image_latents.shape[0]} with latent images from" + f" {image_latents.shape[1]} timesteps." + ) + image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1) + image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + latents = image_latents[0].clone() + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # mask with inverted latents from appropriate timestep - use original image latent for last step + latents = latents * mask_image + image_latents[i] * (1 - mask_image) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py new file mode 100644 index 000000000..147980cbf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"] + _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_gligen import StableDiffusionGLIGENPipeline + from .pipeline_stable_diffusion_gligen_text_image import StableDiffusionGLIGENTextImagePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py new file mode 100644 index 000000000..9f0d1190f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py @@ -0,0 +1,845 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention import GatedSelfAttentionDense +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionGLIGENPipeline + >>> from diffusers.utils import load_image + + >>> # Insert objects described by text at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained( + ... "masterful/gligen-1-4-inpainting-text-box", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> input_image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" + ... ) + >>> prompt = "a birthday cake" + >>> boxes = [[0.2676, 0.6088, 0.4773, 0.7183]] + >>> phrases = ["a birthday cake"] + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_inpaint_image=input_image, + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-1-4-inpainting-text-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # insert objects described by text at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained( + ... "masterful/gligen-1-4-generation-text-box", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a waterfall and a modern high speed train running through the tunnel in a beautiful forest with fall foliage" + >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]] + >>> phrases = ["a waterfall", "a modern high speed train running through the tunnel"] + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-1-4-generation-text-box.jpg") + ``` +""" + + +class StableDiffusionGLIGENPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + _optional_components = ["safety_checker", "feature_extractor"] + model_cpu_offload_seq = "text_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + gligen_phrases, + gligen_boxes, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if len(gligen_phrases) != len(gligen_boxes): + ValueError( + "length of `gligen_phrases` and `gligen_boxes` has to be same, but" + f" got: `gligen_phrases` {len(gligen_phrases)} != `gligen_boxes` {len(gligen_boxes)}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_fuser(self, enabled=True): + for module in self.unet.modules(): + if type(module) is GatedSelfAttentionDense: + module.enabled = enabled + + def draw_inpaint_mask_from_boxes(self, boxes, size): + inpaint_mask = torch.ones(size[0], size[1]) + for box in boxes: + x0, x1 = box[0] * size[0], box[2] * size[0] + y0, y1 = box[1] * size[1], box[3] * size[1] + inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0 + return inpaint_mask + + def crop(self, im, new_width, new_height): + width, height = im.size + left = (width - new_width) / 2 + top = (height - new_height) / 2 + right = (width + new_width) / 2 + bottom = (height + new_height) / 2 + return im.crop((left, top, right, bottom)) + + def target_size_center_crop(self, im, new_hw): + width, height = im.size + if width != height: + im = self.crop(im, min(height, width), min(height, width)) + return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + gligen_scheduled_sampling_beta: float = 0.3, + gligen_phrases: List[str] = None, + gligen_boxes: List[List[float]] = None, + gligen_inpaint_image: Optional[PIL.Image.Image] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + gligen_phrases (`List[str]`): + The phrases to guide what to include in each of the regions defined by the corresponding + `gligen_boxes`. There should only be one phrase per bounding box. + gligen_boxes (`List[List[float]]`): + The bounding boxes that identify rectangular regions of the image that are going to be filled with the + content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a + `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. + gligen_inpaint_image (`PIL.Image.Image`, *optional*): + The input image, if provided, is inpainted with objects described by the `gligen_boxes` and + `gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image. + gligen_scheduled_sampling_beta (`float`, defaults to 0.3): + Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image + Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for + scheduled sampling during inference for improved quality and controllability. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + gligen_phrases, + gligen_boxes, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5.1 Prepare GLIGEN variables + max_objs = 30 + if len(gligen_boxes) > max_objs: + warnings.warn( + f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", + FutureWarning, + ) + gligen_phrases = gligen_phrases[:max_objs] + gligen_boxes = gligen_boxes[:max_objs] + # prepare batched input to the GLIGENTextBoundingboxProjection (boxes, phrases, mask) + # Get tokens for phrases from pre-trained CLIPTokenizer + tokenizer_inputs = self.tokenizer(gligen_phrases, padding=True, return_tensors="pt").to(device) + # For the token, we use the same pre-trained text encoder + # to obtain its text feature + _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output + n_objs = len(gligen_boxes) + # For each entity, described in phrases, is denoted with a bounding box, + # we represent the location information as (xmin,ymin,xmax,ymax) + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + boxes[:n_objs] = torch.tensor(gligen_boxes) + text_embeddings = torch.zeros( + max_objs, self.unet.cross_attention_dim, device=device, dtype=self.text_encoder.dtype + ) + text_embeddings[:n_objs] = _text_embeddings + # Generate a mask for each object that is entity described by phrases + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + masks[:n_objs] = 1 + + repeat_batch = batch_size * num_images_per_prompt + boxes = boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() + text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() + masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() + if do_classifier_free_guidance: + repeat_batch = repeat_batch * 2 + boxes = torch.cat([boxes] * 2) + text_embeddings = torch.cat([text_embeddings] * 2) + masks = torch.cat([masks] * 2) + masks[: repeat_batch // 2] = 0 + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + cross_attention_kwargs["gligen"] = {"boxes": boxes, "positive_embeddings": text_embeddings, "masks": masks} + + # Prepare latent variables for GLIGEN inpainting + if gligen_inpaint_image is not None: + # if the given input image is not of the same size as expected by VAE + # center crop and resize the input image to expected shape + if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): + gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) + # Convert a single image into a batch of images with a batch size of 1 + # The resulting shape becomes (1, C, H, W), where C is the number of channels, + # and H and W are the height and width of the image. + # scales the pixel values to a range [-1, 1] + gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) + gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) + # Run AutoEncoder to get corresponding latents + gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() + gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent + # Generate an inpainting mask + # pixel value = 0, where the object is present (defined by bounding boxes above) + # 1, everywhere else + gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) + gligen_inpaint_mask = gligen_inpaint_mask.to( + dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device + ) + gligen_inpaint_mask = gligen_inpaint_mask[None, None] + gligen_inpaint_mask_addition = torch.cat( + (gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1 + ) + # Convert a single mask into a batch of masks with a batch size of 1 + gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() + + num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) + self.enable_fuser(True) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Scheduled sampling + if i == num_grounding_steps: + self.enable_fuser(False) + + if latents.shape[1] != 4: + latents = torch.randn_like(latents[:, :4]) + + if gligen_inpaint_image is not None: + gligen_inpaint_latent_with_noise = ( + self.scheduler.add_noise( + gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t]) + ) + .expand(latents.shape[0], -1, -1, -1) + .clone() + ) + latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * ( + 1 - gligen_inpaint_mask + ) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if gligen_inpaint_image is not None: + latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py new file mode 100644 index 000000000..296ecae65 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py @@ -0,0 +1,1017 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import ( + CLIPFeatureExtractor, + CLIPProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention import GatedSelfAttentionDense +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.clip_image_project_model import CLIPImageProjection +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionGLIGENTextImagePipeline + >>> from diffusers.utils import load_image + + >>> # Insert objects described by image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Inpainting_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> input_image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" + ... ) + >>> prompt = "a backpack" + >>> boxes = [[0.2676, 0.4088, 0.4773, 0.7183]] + >>> phrases = None + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/backpack.jpeg" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_inpaint_image=input_image, + ... gligen_boxes=boxes, + ... gligen_images=[gligen_image], + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-inpainting-text-image-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # insert objects described by text and image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a flower sitting on the beach" + >>> boxes = [[0.0, 0.09, 0.53, 0.76]] + >>> phrases = ["flower"] + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/pexels-pixabay-60597.jpg" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_images=[gligen_image], + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-generation-text-image-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # transfer style described by image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a dragon flying on the sky" + >>> boxes = [[0.4, 0.2, 1.0, 0.8], [0.0, 1.0, 0.0, 1.0]] # Set `[0.0, 1.0, 0.0, 1.0]` for the style + + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" + ... ) + + >>> gligen_placeholder = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=[ + ... "dragon", + ... "placeholder", + ... ], # Can use any text instead of `placeholder` token, because we will use mask here + ... gligen_images=[ + ... gligen_placeholder, + ... gligen_image, + ... ], # Can use any image in gligen_placeholder, because we will use mask here + ... input_phrases_mask=[1, 0], # Set 0 for the placeholder token + ... input_images_mask=[0, 1], # Set 0 for the placeholder image + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-generation-text-image-box-style-transfer.jpg") + ``` +""" + + +class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + processor ([`~transformers.CLIPProcessor`]): + A `CLIPProcessor` to procces reference image. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + image_project ([`CLIPImageProjection`]): + A `CLIPImageProjection` to project image embedding into phrases embedding space. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + processor: CLIPProcessor, + image_encoder: CLIPVisionModelWithProjection, + image_project: CLIPImageProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + image_encoder=image_encoder, + processor=processor, + image_project=image_project, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_fuser(self, enabled=True): + for module in self.unet.modules(): + if type(module) is GatedSelfAttentionDense: + module.enabled = enabled + + def draw_inpaint_mask_from_boxes(self, boxes, size): + """ + Create an inpainting mask based on given boxes. This function generates an inpainting mask using the provided + boxes to mark regions that need to be inpainted. + """ + inpaint_mask = torch.ones(size[0], size[1]) + for box in boxes: + x0, x1 = box[0] * size[0], box[2] * size[0] + y0, y1 = box[1] * size[1], box[3] * size[1] + inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0 + return inpaint_mask + + def crop(self, im, new_width, new_height): + """ + Crop the input image to the specified dimensions. + """ + width, height = im.size + left = (width - new_width) / 2 + top = (height - new_height) / 2 + right = (width + new_width) / 2 + bottom = (height + new_height) / 2 + return im.crop((left, top, right, bottom)) + + def target_size_center_crop(self, im, new_hw): + """ + Crop and resize the image to the target size while keeping the center. + """ + width, height = im.size + if width != height: + im = self.crop(im, min(height, width), min(height, width)) + return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) + + def complete_mask(self, has_mask, max_objs, device): + """ + Based on the input mask corresponding value `0 or 1` for each phrases and image, mask the features + corresponding to phrases and images. + """ + mask = torch.ones(1, max_objs).type(self.text_encoder.dtype).to(device) + if has_mask is None: + return mask + + if isinstance(has_mask, int): + return mask * has_mask + else: + for idx, value in enumerate(has_mask): + mask[0, idx] = value + return mask + + def get_clip_feature(self, input, normalize_constant, device, is_image=False): + """ + Get image and phrases embedding by using CLIP pretrain model. The image embedding is transformed into the + phrases embedding space through a projection. + """ + if is_image: + if input is None: + return None + inputs = self.processor(images=[input], return_tensors="pt").to(device) + inputs["pixel_values"] = inputs["pixel_values"].to(self.image_encoder.dtype) + + outputs = self.image_encoder(**inputs) + feature = outputs.image_embeds + feature = self.image_project(feature).squeeze(0) + feature = (feature / feature.norm()) * normalize_constant + feature = feature.unsqueeze(0) + else: + if input is None: + return None + inputs = self.tokenizer(input, return_tensors="pt", padding=True).to(device) + outputs = self.text_encoder(**inputs) + feature = outputs.pooler_output + return feature + + def get_cross_attention_kwargs_with_grounded( + self, + hidden_size, + gligen_phrases, + gligen_images, + gligen_boxes, + input_phrases_mask, + input_images_mask, + repeat_batch, + normalize_constant, + max_objs, + device, + ): + """ + Prepare the cross-attention kwargs containing information about the grounded input (boxes, mask, image + embedding, phrases embedding). + """ + phrases, images = gligen_phrases, gligen_images + images = [None] * len(phrases) if images is None else images + phrases = [None] * len(images) if phrases is None else phrases + + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + + text_features = [] + image_features = [] + for phrase, image in zip(phrases, images): + text_features.append(self.get_clip_feature(phrase, normalize_constant, device, is_image=False)) + image_features.append(self.get_clip_feature(image, normalize_constant, device, is_image=True)) + + for idx, (box, text_feature, image_feature) in enumerate(zip(gligen_boxes, text_features, image_features)): + boxes[idx] = torch.tensor(box) + masks[idx] = 1 + if text_feature is not None: + phrases_embeddings[idx] = text_feature + phrases_masks[idx] = 1 + if image_feature is not None: + image_embeddings[idx] = image_feature + image_masks[idx] = 1 + + input_phrases_mask = self.complete_mask(input_phrases_mask, max_objs, device) + phrases_masks = phrases_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_phrases_mask + input_images_mask = self.complete_mask(input_images_mask, max_objs, device) + image_masks = image_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_images_mask + boxes = boxes.unsqueeze(0).repeat(repeat_batch, 1, 1) + masks = masks.unsqueeze(0).repeat(repeat_batch, 1) + phrases_embeddings = phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) + image_embeddings = image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) + + out = { + "boxes": boxes, + "masks": masks, + "phrases_masks": phrases_masks, + "image_masks": image_masks, + "phrases_embeddings": phrases_embeddings, + "image_embeddings": image_embeddings, + } + + return out + + def get_cross_attention_kwargs_without_grounded(self, hidden_size, repeat_batch, max_objs, device): + """ + Prepare the cross-attention kwargs without information about the grounded input (boxes, mask, image embedding, + phrases embedding) (All are zero tensor). + """ + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + + out = { + "boxes": boxes.unsqueeze(0).repeat(repeat_batch, 1, 1), + "masks": masks.unsqueeze(0).repeat(repeat_batch, 1), + "phrases_masks": phrases_masks.unsqueeze(0).repeat(repeat_batch, 1), + "image_masks": image_masks.unsqueeze(0).repeat(repeat_batch, 1), + "phrases_embeddings": phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), + "image_embeddings": image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), + } + + return out + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + gligen_scheduled_sampling_beta: float = 0.3, + gligen_phrases: List[str] = None, + gligen_images: List[PIL.Image.Image] = None, + input_phrases_mask: Union[int, List[int]] = None, + input_images_mask: Union[int, List[int]] = None, + gligen_boxes: List[List[float]] = None, + gligen_inpaint_image: Optional[PIL.Image.Image] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + gligen_normalize_constant: float = 28.7, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + gligen_phrases (`List[str]`): + The phrases to guide what to include in each of the regions defined by the corresponding + `gligen_boxes`. There should only be one phrase per bounding box. + gligen_images (`List[PIL.Image.Image]`): + The images to guide what to include in each of the regions defined by the corresponding `gligen_boxes`. + There should only be one image per bounding box + input_phrases_mask (`int` or `List[int]`): + pre phrases mask input defined by the correspongding `input_phrases_mask` + input_images_mask (`int` or `List[int]`): + pre images mask input defined by the correspongding `input_images_mask` + gligen_boxes (`List[List[float]]`): + The bounding boxes that identify rectangular regions of the image that are going to be filled with the + content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a + `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. + gligen_inpaint_image (`PIL.Image.Image`, *optional*): + The input image, if provided, is inpainted with objects described by the `gligen_boxes` and + `gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image. + gligen_scheduled_sampling_beta (`float`, defaults to 0.3): + Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image + Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for + scheduled sampling during inference for improved quality and controllability. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + gligen_normalize_constant (`float`, *optional*, defaults to 28.7): + The normalize value of the image embedding. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5.1 Prepare GLIGEN variables + max_objs = 30 + if len(gligen_boxes) > max_objs: + warnings.warn( + f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", + FutureWarning, + ) + gligen_phrases = gligen_phrases[:max_objs] + gligen_boxes = gligen_boxes[:max_objs] + gligen_images = gligen_images[:max_objs] + + repeat_batch = batch_size * num_images_per_prompt + + if do_classifier_free_guidance: + repeat_batch = repeat_batch * 2 + + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + hidden_size = prompt_embeds.shape[2] + + cross_attention_kwargs["gligen"] = self.get_cross_attention_kwargs_with_grounded( + hidden_size=hidden_size, + gligen_phrases=gligen_phrases, + gligen_images=gligen_images, + gligen_boxes=gligen_boxes, + input_phrases_mask=input_phrases_mask, + input_images_mask=input_images_mask, + repeat_batch=repeat_batch, + normalize_constant=gligen_normalize_constant, + max_objs=max_objs, + device=device, + ) + + cross_attention_kwargs_without_grounded = {} + cross_attention_kwargs_without_grounded["gligen"] = self.get_cross_attention_kwargs_without_grounded( + hidden_size=hidden_size, repeat_batch=repeat_batch, max_objs=max_objs, device=device + ) + + # Prepare latent variables for GLIGEN inpainting + if gligen_inpaint_image is not None: + # if the given input image is not of the same size as expected by VAE + # center crop and resize the input image to expected shape + if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): + gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) + # Convert a single image into a batch of images with a batch size of 1 + # The resulting shape becomes (1, C, H, W), where C is the number of channels, + # and H and W are the height and width of the image. + # scales the pixel values to a range [-1, 1] + gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) + gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) + # Run AutoEncoder to get corresponding latents + gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() + gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent + # Generate an inpainting mask + # pixel value = 0, where the object is present (defined by bounding boxes above) + # 1, everywhere else + gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) + gligen_inpaint_mask = gligen_inpaint_mask.to( + dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device + ) + gligen_inpaint_mask = gligen_inpaint_mask[None, None] + gligen_inpaint_mask_addition = torch.cat( + (gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1 + ) + # Convert a single mask into a batch of masks with a batch size of 1 + gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() + + int(gligen_scheduled_sampling_beta * len(timesteps)) + self.enable_fuser(True) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if latents.shape[1] != 4: + latents = torch.randn_like(latents[:, :4]) + + if gligen_inpaint_image is not None: + gligen_inpaint_latent_with_noise = ( + self.scheduler.add_noise( + gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t]) + ) + .expand(latents.shape[0], -1, -1, -1) + .clone() + ) + latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * ( + 1 - gligen_inpaint_mask + ) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if gligen_inpaint_image is not None: + latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) + + # predict the noise residual with grounded information + noise_pred_with_grounding = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # predict the noise residual without grounded information + noise_pred_without_grounding = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs_without_grounded, + ).sample + + # perform guidance + if do_classifier_free_guidance: + # Using noise_pred_text from noise residual with grounded information and noise_pred_uncond from noise residual without grounded information + _, noise_pred_text = noise_pred_with_grounding.chunk(2) + noise_pred_uncond, _ = noise_pred_without_grounding.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + else: + noise_pred = noise_pred_with_grounding + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py new file mode 100644 index 000000000..7eb5bf8c2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py @@ -0,0 +1,62 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_k_diffusion_available, + is_k_diffusion_version, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not ( + is_transformers_available() + and is_torch_available() + and is_k_diffusion_available() + and is_k_diffusion_version(">=", "0.0.12") + ): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) +else: + _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_xl_k_diffusion"] = ["StableDiffusionXLKDiffusionPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not ( + is_transformers_available() + and is_torch_available() + and is_k_diffusion_available() + and is_k_diffusion_version(">=", "0.0.12") + ): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + else: + from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline + from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py new file mode 100644 index 000000000..bc565c938 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -0,0 +1,664 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +from typing import Callable, List, Optional, Union + +import torch +from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser +from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LMSDiscreteScheduler +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class ModelWrapper: + def __init__(self, model, alphas_cumprod): + self.model = model + self.alphas_cumprod = alphas_cumprod + + def apply_model(self, *args, **kwargs): + if len(args) == 3: + encoder_hidden_states = args[-1] + args = args[:2] + if kwargs.get("cond", None) is not None: + encoder_hidden_states = kwargs.pop("cond") + return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample + + +class StableDiffusionKDiffusionPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin +): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + + + This is an experimental pipeline and is likely to change in the future. + + + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae, + text_encoder, + tokenizer, + unet, + scheduler, + safety_checker, + feature_extractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + logger.info( + f"{self.__class__} is an experimntal pipeline and is likely to change in the future. We recommend to use" + " this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines" + " as defined in https://huggingface.co/docs/diffusers/api/schedulers#implemented-schedulers for" + " production settings." + ) + + # get correct sigmas from LMS + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + model = ModelWrapper(unet, scheduler.alphas_cumprod) + if scheduler.config.prediction_type == "v_prediction": + self.k_diffusion_model = CompVisVDenoiser(model) + else: + self.k_diffusion_model = CompVisDenoiser(model) + + def set_scheduler(self, scheduler_type: str): + library = importlib.import_module("k_diffusion") + sampling = getattr(library, "sampling") + try: + self.sampler = getattr(sampling, scheduler_type) + except Exception: + valid_samplers = [] + for s in dir(sampling): + if "sample_" in s: + valid_samplers.append(s) + + raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + clip_skip: int = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Use karras sigmas. For example, specifying `sample_dpmpp_2m` to `set_scheduler` will be equivalent to + `DPM++2M` in stable-diffusion-webui. On top of that, setting this option to True will make it `DPM++2M + Karras`. + noise_sampler_seed (`int`, *optional*, defaults to `None`): + The random seed to use for the noise sampler. If `None`, a random seed will be generated. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = True + if guidance_scale <= 1.0: + raise ValueError("has to use guidance_scale") + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) + + # 5. Prepare sigmas + if use_karras_sigmas: + sigma_min: float = self.k_diffusion_model.sigmas[0].item() + sigma_max: float = self.k_diffusion_model.sigmas[-1].item() + sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) + sigmas = sigmas.to(device) + else: + sigmas = self.scheduler.sigmas + sigmas = sigmas.to(prompt_embeds.dtype) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents = latents * sigmas[0] + self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) + self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) + + # 7. Define model function + def model_fn(x, t): + latent_model_input = torch.cat([x] * 2) + t = torch.cat([t] * 2) + + noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds) + + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + return noise_pred + + # 8. Run k-diffusion solver + sampler_kwargs = {} + + if "noise_sampler" in inspect.signature(self.sampler).parameters: + min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) + sampler_kwargs["noise_sampler"] = noise_sampler + + if "generator" in inspect.signature(self.sampler).parameters: + sampler_kwargs["generator"] = generator + + latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py new file mode 100644 index 000000000..ed46a1e36 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -0,0 +1,891 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser +from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras +from transformers import ( + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLKDiffusionPipeline + + >>> pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + >>> pipe.set_scheduler("sample_dpmpp_2m_sde") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.ModelWrapper +class ModelWrapper: + def __init__(self, model, alphas_cumprod): + self.model = model + self.alphas_cumprod = alphas_cumprod + + def apply_model(self, *args, **kwargs): + if len(args) == 3: + encoder_hidden_states = args[-1] + args = args[:2] + if kwargs.get("cond", None) is not None: + encoder_hidden_states = kwargs.pop("cond") + return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample + + +class StableDiffusionXLKDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL and k-diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + ): + super().__init__() + + # get correct sigmas from LMS + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + model = ModelWrapper(unet, scheduler.alphas_cumprod) + if scheduler.config.prediction_type == "v_prediction": + self.k_diffusion_model = CompVisVDenoiser(model) + else: + self.k_diffusion_model = CompVisDenoiser(model) + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.set_scheduler + def set_scheduler(self, scheduler_type: str): + library = importlib.import_module("k_diffusion") + sampling = getattr(library, "sampling") + try: + self.sampler = getattr(sampling, scheduler_type) + except Exception: + valid_samplers = [] + for s in dir(sampling): + if "sample_" in s: + valid_samplers.append(s) + + raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + if guidance_scale <= 1.0: + raise ValueError("has to use guidance_scale") + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = None + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) + + # 5. Prepare sigmas + if use_karras_sigmas: + sigma_min: float = self.k_diffusion_model.sigmas[0].item() + sigma_max: float = self.k_diffusion_model.sigmas[-1].item() + sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) + else: + sigmas = self.scheduler.sigmas + sigmas = sigmas.to(dtype=prompt_embeds.dtype, device=device) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents = latents * sigmas[0] + + self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) + self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # 8. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 9. Define model function + def model_fn(x, t): + latent_model_input = torch.cat([x] * 2) + t = torch.cat([t] * 2) + + noise_pred = self.k_diffusion_model( + latent_model_input, + t, + cond=prompt_embeds, + timestep_cond=timestep_cond, + added_cond_kwargs=added_cond_kwargs, + ) + + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + return noise_pred + + # 10. Run k-diffusion solver + sampler_kwargs = {} + + if "noise_sampler" in inspect.signature(self.sampler).parameters: + min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) + sampler_kwargs["noise_sampler"] = noise_sampler + + if "generator" in inspect.signature(self.sampler).parameters: + sampler_kwargs["generator"] = generator + + latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py new file mode 100644 index 000000000..dae2affdd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py new file mode 100644 index 000000000..c7c05feaf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py @@ -0,0 +1,985 @@ +# Copyright 2024 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D +from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> from diffusers import StableDiffusionLDM3DPipeline + + >>> pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c") + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> output = pipe(prompt) + >>> rgb_image, depth_image = output.rgb, output.depth + >>> rgb_image[0].save("astronaut_ldm3d_rgb.jpg") + >>> depth_image[0].save("astronaut_ldm3d_depth.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +@dataclass +class LDM3DPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + rgb (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + depth (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + rgb: Union[List[PIL.Image.Image], np.ndarray] + depth: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +class StableDiffusionLDM3DPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + LoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image and 3D generation using LDM3D. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + rgb_feature_extractor_input = feature_extractor_input[0] + safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 49, + timesteps: List[int] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return ((rgb, depth), has_nsfw_concept) + + return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py new file mode 100644 index 000000000..f7572db72 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py new file mode 100644 index 000000000..feda710e0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py @@ -0,0 +1,933 @@ +# Copyright 2024 MultiDiffusion Authors and The HuggingFace Team. All rights reserved." +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler + + >>> model_ckpt = "stabilityai/stable-diffusion-2-base" + >>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") + >>> pipe = StableDiffusionPanoramaPipeline.from_pretrained( + ... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16 + ... ) + + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of the dolomites" + >>> image = pipe(prompt).images[0] + ``` +""" + + +class StableDiffusionPanoramaPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin +): + r""" + Pipeline for text-to-image generation using MultiDiffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def decode_latents_with_padding(self, latents, padding=8): + # Add padding to latents for circular inference + # padding is the number of latents to add on each side + # it would slightly increase the memory usage, but remove the boundary artifacts + latents = 1 / self.vae.config.scaling_factor * latents + latents_left = latents[..., :padding] + latents_right = latents[..., -padding:] + latents = torch.cat((latents_right, latents, latents_left), axis=-1) + image = self.vae.decode(latents, return_dict=False)[0] + padding_pix = self.vae_scale_factor * padding + image = image[..., padding_pix:-padding_pix] + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def get_views(self, panorama_height, panorama_width, window_size=64, stride=8, circular_padding=False): + # Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113) + # if panorama's height/width < window_size, num_blocks of height/width should return 1 + panorama_height /= 8 + panorama_width /= 8 + num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1 + if circular_padding: + num_blocks_width = panorama_width // stride if panorama_width > window_size else 1 + else: + num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1 + total_num_blocks = int(num_blocks_height * num_blocks_width) + views = [] + for i in range(total_num_blocks): + h_start = int((i // num_blocks_width) * stride) + h_end = h_start + window_size + w_start = int((i % num_blocks_width) * stride) + w_end = w_start + window_size + views.append((h_start, h_end, w_start, w_end)) + return views + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = 512, + width: Optional[int] = 2048, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + view_batch_size: int = 1, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + circular_padding: bool = False, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 2048): + The width in pixels of the generated image. The width is kept high because the pipeline is supposed + generate panorama-like images. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + view_batch_size (`int`, *optional*, defaults to 1): + The batch size to denoise split views. For some GPUs with high performance, higher view batch size can + speedup the generation and increase the VRAM usage. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + circular_padding (`bool`, *optional*, defaults to `False`): + If set to `True`, circular padding is applied to ensure there are no stitching artifacts. Circular + padding allows the model to seamlessly generate a transition from the rightmost part of the image to + the leftmost part, maintaining consistency in a 360-degree sense. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Define panorama grid and initialize views for synthesis. + # prepare batch grid + views = self.get_views(height, width, circular_padding=circular_padding) + views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] + views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch) + count = torch.zeros_like(latents) + value = torch.zeros_like(latents) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. Denoising loop + # Each denoising step also includes refinement of the latents with respect to the + # views. + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + count.zero_() + value.zero_() + + # generate views + # Here, we iterate through different spatial crops of the latents and denoise them. These + # denoised (latent) crops are then averaged to produce the final latent + # for the current timestep via MultiDiffusion. Please see Sec. 4.1 in the + # MultiDiffusion paper for more details: https://arxiv.org/abs/2302.08113 + # Batch views denoise + for j, batch_view in enumerate(views_batch): + vb_size = len(batch_view) + # get the latents corresponding to the current view coordinates + if circular_padding: + latents_for_view = [] + for h_start, h_end, w_start, w_end in batch_view: + if w_end > latents.shape[3]: + # Add circular horizontal padding + latent_view = torch.cat( + ( + latents[:, :, h_start:h_end, w_start:], + latents[:, :, h_start:h_end, : w_end - latents.shape[3]], + ), + axis=-1, + ) + else: + latent_view = latents[:, :, h_start:h_end, w_start:w_end] + latents_for_view.append(latent_view) + latents_for_view = torch.cat(latents_for_view) + else: + latents_for_view = torch.cat( + [ + latents[:, :, h_start:h_end, w_start:w_end] + for h_start, h_end, w_start, w_end in batch_view + ] + ) + + # rematch block's scheduler status + self.scheduler.__dict__.update(views_scheduler_status[j]) + + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + latents_for_view.repeat_interleave(2, dim=0) + if do_classifier_free_guidance + else latents_for_view + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # repeat prompt_embeds for batch + prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds_input, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_denoised_batch = self.scheduler.step( + noise_pred, t, latents_for_view, **extra_step_kwargs + ).prev_sample + + # save views scheduler status after sample + views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__) + + # extract value from batch + for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip( + latents_denoised_batch.chunk(vb_size), batch_view + ): + if circular_padding and w_end > latents.shape[3]: + # Case for circular padding + value[:, :, h_start:h_end, w_start:] += latents_view_denoised[ + :, :, h_start:h_end, : latents.shape[3] - w_start + ] + value[:, :, h_start:h_end, : w_end - latents.shape[3]] += latents_view_denoised[ + :, :, h_start:h_end, latents.shape[3] - w_start : + ] + count[:, :, h_start:h_end, w_start:] += 1 + count[:, :, h_start:h_end, : w_end - latents.shape[3]] += 1 + else: + value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised + count[:, :, h_start:h_end, w_start:w_end] += 1 + + # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113 + latents = torch.where(count > 0, value / count, value) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + if circular_padding: + image = self.decode_latents_with_padding(latents) + else: + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/__init__.py new file mode 100644 index 000000000..b432b9418 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/__init__.py @@ -0,0 +1,99 @@ +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + BaseOutput, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +@dataclass +class SafetyConfig(object): + WEAK = { + "sld_warmup_steps": 15, + "sld_guidance_scale": 20, + "sld_threshold": 0.0, + "sld_momentum_scale": 0.0, + "sld_mom_beta": 0.0, + } + MEDIUM = { + "sld_warmup_steps": 10, + "sld_guidance_scale": 1000, + "sld_threshold": 0.01, + "sld_momentum_scale": 0.3, + "sld_mom_beta": 0.4, + } + STRONG = { + "sld_warmup_steps": 7, + "sld_guidance_scale": 2000, + "sld_threshold": 0.025, + "sld_momentum_scale": 0.5, + "sld_mom_beta": 0.7, + } + MAX = { + "sld_warmup_steps": 0, + "sld_guidance_scale": 5000, + "sld_threshold": 1.0, + "sld_momentum_scale": 0.5, + "sld_mom_beta": 0.7, + } + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {} + +_additional_imports.update({"SafetyConfig": SafetyConfig}) + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure.update( + { + "pipeline_output": ["StableDiffusionSafePipelineOutput"], + "pipeline_stable_diffusion_safe": ["StableDiffusionPipelineSafe"], + "safety_checker": ["StableDiffusionSafetyChecker"], + } + ) + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_output import StableDiffusionSafePipelineOutput + from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe + from .safety_checker import SafeStableDiffusionSafetyChecker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py new file mode 100644 index 000000000..69a064d66 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py @@ -0,0 +1,34 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import ( + BaseOutput, +) + + +@dataclass +class StableDiffusionSafePipelineOutput(BaseOutput): + """ + Output class for Safe Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_content_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, or `None` if safety checking could not be performed. + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work" + (nsfw) content, or `None` if no safety check was performed or no images were flagged. + applied_safety_concept (`str`) + The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] + applied_safety_concept: Optional[str] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py new file mode 100644 index 000000000..ae74e0967 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -0,0 +1,764 @@ +import inspect +import warnings +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionSafePipelineOutput +from .safety_checker import SafeStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class StableDiffusionPipelineSafe(DiffusionPipeline, StableDiffusionMixin, IPAdapterMixin): + r""" + Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: SafeStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + safety_concept: Optional[str] = ( + "an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity," + " bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child" + " abuse, brutality, cruelty" + ) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self._safety_text_concept = safety_concept + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @property + def safety_concept(self): + r""" + Getter method for the safety concept used with SLD + + Returns: + `str`: The text describing the safety concept + """ + return self._safety_text_concept + + @safety_concept.setter + def safety_concept(self, concept): + r""" + Setter method for the safety concept used with SLD + + Args: + concept (`str`): + The text of the new safety concept + """ + self._safety_text_concept = concept + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + enable_safety_guidance, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # Encode the safety concept text + if enable_safety_guidance: + safety_concept_input = self.tokenizer( + [self._safety_text_concept], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0] + + # duplicate safety embeddings for each generation per prompt, using mps friendly method + seq_len = safety_embeddings.shape[1] + safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1) + safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance + sld, we need to do three forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing three forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, safety_embeddings]) + + else: + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def run_safety_checker(self, image, device, dtype, enable_safety_guidance): + if self.safety_checker is not None: + images = image.copy() + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + flagged_images = np.zeros((2, *image.shape[1:])) + if any(has_nsfw_concept): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead." + f"{'You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.'}" + ) + for idx, has_nsfw_concept in enumerate(has_nsfw_concept): + if has_nsfw_concept: + flagged_images[idx] = images[idx] + image[idx] = np.zeros(image[idx].shape) # black image + else: + has_nsfw_concept = None + flagged_images = None + return image, has_nsfw_concept, flagged_images + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def perform_safety_guidance( + self, + enable_safety_guidance, + safety_momentum, + noise_guidance, + noise_pred_out, + i, + sld_guidance_scale, + sld_warmup_steps, + sld_threshold, + sld_momentum_scale, + sld_mom_beta, + ): + # Perform SLD guidance + if enable_safety_guidance: + if safety_momentum is None: + safety_momentum = torch.zeros_like(noise_guidance) + noise_pred_text, noise_pred_uncond = noise_pred_out[0], noise_pred_out[1] + noise_pred_safety_concept = noise_pred_out[2] + + # Equation 6 + scale = torch.clamp(torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0) + + # Equation 6 + safety_concept_scale = torch.where( + (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, torch.zeros_like(scale), scale + ) + + # Equation 4 + noise_guidance_safety = torch.mul((noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale) + + # Equation 7 + noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum + + # Equation 8 + safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety + + if i >= sld_warmup_steps: # Warmup + # Equation 3 + noise_guidance = noise_guidance - noise_guidance_safety + return noise_guidance, safety_momentum + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + sld_guidance_scale: Optional[float] = 1000, + sld_warmup_steps: Optional[int] = 10, + sld_threshold: Optional[float] = 0.01, + sld_momentum_scale: Optional[float] = 0.3, + sld_mom_beta: Optional[float] = 0.4, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + sld_guidance_scale (`float`, *optional*, defaults to 1000): + If `sld_guidance_scale < 1`, safety guidance is disabled. + sld_warmup_steps (`int`, *optional*, defaults to 10): + Number of warmup steps for safety guidance. SLD is only be applied for diffusion steps greater than + `sld_warmup_steps`. + sld_threshold (`float`, *optional*, defaults to 0.01): + Threshold that separates the hyperplane between appropriate and inappropriate images. + sld_momentum_scale (`float`, *optional*, defaults to 0.3): + Scale of the SLD momentum to be added to the safety guidance at each diffusion step. If set to 0.0, + momentum is disabled. Momentum is built up during warmup for diffusion steps smaller than + `sld_warmup_steps`. + sld_mom_beta (`float`, *optional*, defaults to 0.4): + Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous + momentum is kept. Momentum is built up during warmup for diffusion steps smaller than + `sld_warmup_steps`. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + + Examples: + + ```py + import torch + from diffusers import StableDiffusionPipelineSafe + from diffusers.pipelines.stable_diffusion_safe import SafetyConfig + + pipeline = StableDiffusionPipelineSafe.from_pretrained( + "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16 + ).to("cuda") + prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker" + image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0] + ``` + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance + if not enable_safety_guidance: + warnings.warn("Safety checker disabled!") + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if do_classifier_free_guidance: + if enable_safety_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds]) + else: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + safety_momentum = None + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * (3 if enable_safety_guidance else 2)) + if do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2)) + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + + # default classifier free guidance + noise_guidance = noise_pred_text - noise_pred_uncond + + # Perform SLD guidance + if enable_safety_guidance: + if safety_momentum is None: + safety_momentum = torch.zeros_like(noise_guidance) + noise_pred_safety_concept = noise_pred_out[2] + + # Equation 6 + scale = torch.clamp( + torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0 + ) + + # Equation 6 + safety_concept_scale = torch.where( + (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, + torch.zeros_like(scale), + scale, + ) + + # Equation 4 + noise_guidance_safety = torch.mul( + (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale + ) + + # Equation 7 + noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum + + # Equation 8 + safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety + + if i >= sld_warmup_steps: # Warmup + # Equation 3 + noise_guidance = noise_guidance - noise_guidance_safety + + noise_pred = noise_pred_uncond + guidance_scale * noise_guidance + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept, flagged_images = self.run_safety_checker( + image, device, prompt_embeds.dtype, enable_safety_guidance + ) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + if flagged_images is not None: + flagged_images = self.numpy_to_pil(flagged_images) + + if not return_dict: + return ( + image, + has_nsfw_concept, + self._safety_text_concept if enable_safety_guidance else None, + flagged_images, + ) + + return StableDiffusionSafePipelineOutput( + images=image, + nsfw_content_detected=has_nsfw_concept, + applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None, + unsafe_images=flagged_images, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py new file mode 100644 index 000000000..549747e97 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py @@ -0,0 +1,109 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = nn.functional.normalize(image_embeds) + normalized_text_embeds = nn.functional.normalize(text_embeds) + return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) + + +class SafeStableDiffusionSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModel(config.vision_config) + self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) + + self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) + self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) + + self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) + self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) + + @torch.no_grad() + def forward(self, clip_input, images): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() + cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() + + result = [] + batch_size = image_embeds.shape[0] + for i in range(batch_size): + result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + for concept_idx in range(len(special_cos_dist[0])): + concept_cos = special_cos_dist[i][concept_idx] + concept_threshold = self.special_care_embeds_weights[concept_idx].item() + result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["special_scores"][concept_idx] > 0: + result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) + adjustment = 0.01 + + for concept_idx in range(len(cos_dist[0])): + concept_cos = cos_dist[i][concept_idx] + concept_threshold = self.concept_embeds_weights[concept_idx].item() + result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["concept_scores"][concept_idx] > 0: + result_img["bad_concepts"].append(concept_idx) + + result.append(result_img) + + has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] + + return images, has_nsfw_concepts + + @torch.no_grad() + def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nsfw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + # special_scores = special_scores.round(decimals=3) + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) + + concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment + # concept_scores = concept_scores.round(decimals=3) + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + return images, has_nsfw_concepts diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/__init__.py new file mode 100644 index 000000000..378e0e578 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py new file mode 100644 index 000000000..96aa006d2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py @@ -0,0 +1,886 @@ +# Copyright 2024 Susung Hong and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionSAGPipeline + + >>> pipe = StableDiffusionSAGPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, sag_scale=0.75).images[0] + ``` +""" + + +# processes and stores attention probabilities +class CrossAttnStoreProcessor: + def __init__(self): + self.attention_probs = None + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + ): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + self.attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(self.attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +# Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input +class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + sag_scale: float = 0.75, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + sag_scale (`float`, *optional*, defaults to 0.75): + Chosen between [0, 1.0] for better quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + # and `sag_scale` is` `s` of equation (16) + # of the self-attentnion guidance paper: https://arxiv.org/pdf/2210.00939.pdf + # `sag_scale = 0` means no self-attention guidance + do_self_attention_guidance = sag_scale > 0.0 + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]: + raise ValueError( + f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinlgestepScheduler'." + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + added_uncond_kwargs = {"image_embeds": negative_image_embeds} if ip_adapter_image is not None else None + + # 7. Denoising loop + store_processor = CrossAttnStoreProcessor() + self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + map_size = None + + def get_map_size(module, input, output): + nonlocal map_size + map_size = output[0].shape[-2:] + + with self.unet.mid_block.attentions[0].register_forward_hook(get_map_size): + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # perform self-attention guidance with the stored self-attentnion map + if do_self_attention_guidance: + # classifier-free guidance produces two chunks of attention map + # and we only use unconditional one according to equation (25) + # in https://arxiv.org/pdf/2210.00939.pdf + if do_classifier_free_guidance: + # DDIM-like prediction of x0 + pred_x0 = self.pred_x0(latents, noise_pred_uncond, t) + # get the stored attention maps + uncond_attn, cond_attn = store_processor.attention_probs.chunk(2) + # self-attention-based degrading of latents + degraded_latents = self.sag_masking( + pred_x0, uncond_attn, map_size, t, self.pred_epsilon(latents, noise_pred_uncond, t) + ) + uncond_emb, _ = prompt_embeds.chunk(2) + # forward and give guidance + degraded_pred = self.unet( + degraded_latents, + t, + encoder_hidden_states=uncond_emb, + added_cond_kwargs=added_uncond_kwargs, + ).sample + noise_pred += sag_scale * (noise_pred_uncond - degraded_pred) + else: + # DDIM-like prediction of x0 + pred_x0 = self.pred_x0(latents, noise_pred, t) + # get the stored attention maps + cond_attn = store_processor.attention_probs + # self-attention-based degrading of latents + degraded_latents = self.sag_masking( + pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t) + ) + # forward and give guidance + degraded_pred = self.unet( + degraded_latents, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + ).sample + noise_pred += sag_scale * (noise_pred - degraded_pred) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def sag_masking(self, original_latents, attn_map, map_size, t, eps): + # Same masking process as in SAG paper: https://arxiv.org/pdf/2210.00939.pdf + bh, hw1, hw2 = attn_map.shape + b, latent_channel, latent_h, latent_w = original_latents.shape + h = self.unet.config.attention_head_dim + if isinstance(h, list): + h = h[-1] + + # Produce attention mask + attn_map = attn_map.reshape(b, h, hw1, hw2) + attn_mask = attn_map.mean(1, keepdim=False).sum(1, keepdim=False) > 1.0 + attn_mask = ( + attn_mask.reshape(b, map_size[0], map_size[1]) + .unsqueeze(1) + .repeat(1, latent_channel, 1, 1) + .type(attn_map.dtype) + ) + attn_mask = F.interpolate(attn_mask, (latent_h, latent_w)) + + # Blur according to the self-attention mask + degraded_latents = gaussian_blur_2d(original_latents, kernel_size=9, sigma=1.0) + degraded_latents = degraded_latents * attn_mask + original_latents * (1 - attn_mask) + + # Noise it again to match the noise level + degraded_latents = self.scheduler.add_noise(degraded_latents, noise=eps, timesteps=t[None]) + + return degraded_latents + + # Modified from diffusers.schedulers.scheduling_ddim.DDIMScheduler.step + # Note: there are some schedulers that clip or do not return x_0 (PNDMScheduler, DDIMScheduler, etc.) + def pred_x0(self, sample, model_output, timestep): + alpha_prod_t = self.scheduler.alphas_cumprod[timestep].to(sample.device) + + beta_prod_t = 1 - alpha_prod_t + if self.scheduler.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.scheduler.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.scheduler.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + # predict V + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," + " or `v_prediction`" + ) + + return pred_original_sample + + def pred_epsilon(self, sample, model_output, timestep): + alpha_prod_t = self.scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + if self.scheduler.config.prediction_type == "epsilon": + pred_eps = model_output + elif self.scheduler.config.prediction_type == "sample": + pred_eps = (sample - (alpha_prod_t**0.5) * model_output) / (beta_prod_t**0.5) + elif self.scheduler.config.prediction_type == "v_prediction": + pred_eps = (beta_prod_t**0.5) * sample + (alpha_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," + " or `v_prediction`" + ) + + return pred_eps + + +# Gaussian blur +def gaussian_blur_2d(img, kernel_size, sigma): + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + + x_kernel = pdf / pdf.sum() + x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) + + kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) + kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) + + padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] + + img = F.pad(img, padding, mode="reflect") + img = F.conv2d(img, kernel2d, groups=img.shape[-3]) + + return img diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/__init__.py new file mode 100644 index 000000000..8088fbcfc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/__init__.py @@ -0,0 +1,76 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["StableDiffusionXLPipelineOutput"]} + +if is_transformers_available() and is_flax_available(): + _import_structure["pipeline_output"].extend(["FlaxStableDiffusionXLPipelineOutput"]) +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_xl"] = ["StableDiffusionXLPipeline"] + _import_structure["pipeline_stable_diffusion_xl_img2img"] = ["StableDiffusionXLImg2ImgPipeline"] + _import_structure["pipeline_stable_diffusion_xl_inpaint"] = ["StableDiffusionXLInpaintPipeline"] + _import_structure["pipeline_stable_diffusion_xl_instruct_pix2pix"] = ["StableDiffusionXLInstructPix2PixPipeline"] + +if is_transformers_available() and is_flax_available(): + from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState + + _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) + _import_structure["pipeline_flax_stable_diffusion_xl"] = ["FlaxStableDiffusionXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline + from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline + from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline + from .pipeline_stable_diffusion_xl_instruct_pix2pix import StableDiffusionXLInstructPix2PixPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_objects import * + else: + from .pipeline_flax_stable_diffusion_xl import ( + FlaxStableDiffusionXLPipeline, + ) + from .pipeline_output import FlaxStableDiffusionXLPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py new file mode 100644 index 000000000..77363b254 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py @@ -0,0 +1,308 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict +from transformers import CLIPTokenizer, FlaxCLIPTextModel + +from diffusers.utils import logging + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + + +class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): + def __init__( + self, + text_encoder: FlaxCLIPTextModel, + text_encoder_2: FlaxCLIPTextModel, + vae: FlaxAutoencoderKL, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + # Assume we have the two encoders + inputs = [] + for tokenizer in [self.tokenizer, self.tokenizer_2]: + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + inputs.append(text_inputs.input_ids) + inputs = jnp.stack(inputs, axis=1) + return inputs + + def __call__( + self, + prompt_ids: jax.Array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + guidance_scale: Union[float, jax.Array] = 7.5, + height: Optional[int] = None, + width: Optional[int] = None, + latents: jnp.array = None, + neg_prompt_ids: jnp.array = None, + return_dict: bool = True, + output_type: str = None, + jit: bool = False, + ): + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float) and jit: + # Convert to a tensor so each device gets a copy. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + guidance_scale = guidance_scale[:, None] + + return_latents = output_type == "latent" + + if jit: + images = _p_generate( + self, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) + else: + images = self._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) + + if not return_dict: + return (images,) + + return FlaxStableDiffusionXLPipelineOutput(images=images) + + def get_embeddings(self, prompt_ids: jnp.array, params): + # We assume we have the two encoders + + # bs, encoder_input, seq_length + te_1_inputs = prompt_ids[:, 0, :] + te_2_inputs = prompt_ids[:, 1, :] + + prompt_embeds = self.text_encoder(te_1_inputs, params=params["text_encoder"], output_hidden_states=True) + prompt_embeds = prompt_embeds["hidden_states"][-2] + prompt_embeds_2_out = self.text_encoder_2( + te_2_inputs, params=params["text_encoder_2"], output_hidden_states=True + ) + prompt_embeds_2 = prompt_embeds_2_out["hidden_states"][-2] + text_embeds = prompt_embeds_2_out["text_embeds"] + prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) + return prompt_embeds, text_embeds + + def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) + return add_time_ids + + def _generate( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.array] = None, + neg_prompt_ids: Optional[jnp.array] = None, + return_latents=False, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # Encode input prompt + prompt_embeds, pooled_embeds = self.get_embeddings(prompt_ids, params) + + # Get unconditional embeddings + batch_size = prompt_embeds.shape[0] + if neg_prompt_ids is None: + neg_prompt_embeds = jnp.zeros_like(prompt_embeds) + negative_pooled_embeds = jnp.zeros_like(pooled_embeds) + else: + neg_prompt_embeds, negative_pooled_embeds = self.get_embeddings(neg_prompt_ids, params) + + add_time_ids = self._get_add_time_ids( + (height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype + ) + + prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) # (2, 77, 2048) + add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) + add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) + + # Ensure model output will be `float32` before going into the scheduler + guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) + + # Create random latents + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # Prepare scheduler state + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * scheduler_state.init_noise_sigma + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # Denoising loop + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + if return_latents: + return latents + + # Decode latents + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + +# Static argnums are pipe, num_inference_steps, height, width, return_latents. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), + static_broadcasted_argnums=(0, 4, 5, 6, 10), +) +def _p_generate( + pipe, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, +): + return pipe._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py new file mode 100644 index 000000000..0783f4448 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py @@ -0,0 +1,37 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput, is_flax_available + + +@dataclass +class StableDiffusionXLPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +if is_flax_available(): + import flax + + @flax.struct.dataclass + class FlaxStableDiffusionXLPipelineOutput(BaseOutput): + """ + Output class for Flax Stable Diffusion XL pipelines. + + Args: + images (`np.ndarray`) + Array of shape `(batch_size, height, width, num_channels)` with images from the diffusion pipeline. + """ + + images: np.ndarray diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py new file mode 100644 index 000000000..776696e9d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py @@ -0,0 +1,1266 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLPipeline + + >>> pipe = StableDiffusionXLPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py new file mode 100644 index 000000000..fd4c412f4 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py @@ -0,0 +1,1442 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" + + >>> init_image = load_image(url).convert("RGB") + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, image=init_image).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + else: + t_start = 0 + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + if denoising_start is not None: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + timesteps = timesteps[-num_inference_steps:] + return timesteps, num_inference_steps + + return timesteps, num_inference_steps - t_start + + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + strength: float = 0.3, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of + `denoising_start` being declared as an integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + add_noise = True if self.denoising_start is None else False + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise, + ) + # 7. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 8. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 9. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 9.1 Apply denoising_end + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py new file mode 100644 index 000000000..c25628c22 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -0,0 +1,1812 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... torch_dtype=torch.float16, + ... variant="fp16", + ... use_safetensors=True, + ... ) + >>> pipe.to("cuda") + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = load_image(img_url).convert("RGB") + >>> mask_image = load_image(mask_url).convert("RGB") + + >>> prompt = "A majestic tiger sitting on a bench" + >>> image = pipe( + ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80 + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +def mask_pil_to_torch(mask, height, width): + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask = torch.from_numpy(mask) + return mask + + +def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + # checkpoint. TOD(Yiyi) - need to clean this up later + deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead" + deprecate( + "prepare_mask_and_masked_image", + "0.30.0", + deprecation_message, + ) + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + mask = mask_pil_to_torch(mask, height, width) + + if image.ndim == 3: + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + # if image.min() < -1 or image.max() > 1: + # raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + mask = mask_pil_to_torch(mask, height, width) + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + if image.shape[1] == 4: + # images are in latent space and thus can't + # be masked set masked_image to None + # we assume that the checkpoint is not an inpainting + # checkpoint. TOD(Yiyi) - need to clean this up later + masked_image = None + else: + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config + of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + "mask", + "masked_image_latents", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + add_noise=True, + return_noise=False, + return_image_latents=False, + ): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if image.shape[1] == 4: + image_latents = image.to(device=device, dtype=dtype) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + elif return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None and add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + elif add_noise: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + else: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = image_latents.to(device) + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + dtype = image.dtype + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + image_latents = image_latents.to(dtype) + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + + if masked_image is not None and masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = None + + if masked_image is not None: + if masked_image_latents is None: + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + else: + t_start = 0 + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + if denoising_start is not None: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + timesteps = timesteps[-num_inference_steps:] + return timesteps, num_inference_steps + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: torch.FloatTensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.9999, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If + `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and + contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on + the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large + and contain information inreleant for inpainging, such as background. + strength (`float`, *optional*, defaults to 0.9999): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. Note that in the case of `denoising_start` being declared as an + integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is not None: + masked_image = masked_image_latents + elif init_image.shape[1] == 4: + # if images are in latent space, we can't mask it + masked_image = None + else: + masked_image = init_image * (mask < 0.5) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + add_noise = True if self.denoising_start is None else False + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + add_noise=add_noise, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + # 8.1 Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 10. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 11.1 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py new file mode 100644 index 000000000..51e413d4b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -0,0 +1,976 @@ +# Copyright 2024 Harutatsu Akiyama and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLInstructPix2PixPipeline + >>> from diffusers.utils import load_image + + >>> resolution = 768 + >>> image = load_image( + ... "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + ... ).resize((resolution, resolution)) + >>> edit_instruction = "Turn sky into a cloudy one" + + >>> pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( + ... "diffusers/sdxl-instructpix2pix-768", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> edited_image = pipe( + ... prompt=edit_instruction, + ... image=image, + ... height=resolution, + ... width=resolution, + ... guidance_scale=3.0, + ... image_guidance_scale=1.5, + ... num_inference_steps=30, + ... ).images[0] + >>> edited_image + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class StableDiffusionXLInstructPix2PixPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, +): + r""" + Pipeline for pixel-level image editing by following text instructions. Based on Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config + of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder( + text_input_ids.to(device), + output_hidden_states=True, + ) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + prompt_embeds = prompt_embeds.hidden_states[-2] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt, negative_prompt_2] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + prompt_embeds_dtype = self.text_encoder_2.dtype if self.text_encoder_2 is not None else self.unet.dtype + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.check_inputs + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents( + self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + image_latents = image + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + if needs_upcasting: + self.upcast_vae() + image = image.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + if image_latents.dtype != self.vae.dtype: + image_latents = image_latents.to(dtype=self.vae.dtype) + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 100, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + image_guidance_scale: float = 1.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + image_guidance_scale (`float`, *optional*, defaults to 1.5): + Image guidance scale is to push the generated image towards the inital image `image`. Image guidance + scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to + generate images that are closely linked to the source image `image`, usually at the expense of lower + image quality. This pipeline requires a value of at least `1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image, height=height, width=width).to(device) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare Image latents + image_latents = self.prepare_image_latents( + image, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + do_classifier_free_guidance, + ) + + # 7. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 8. Check that shapes of latents and image match the UNet channels + num_channels_image = image_latents.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents + num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + # The extra concat similar to how it's done in SD InstructPix2Pix. + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds], dim=0) + add_text_embeds = torch.cat( + [add_text_embeds, negative_pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0 + ) + add_time_ids = torch.cat([add_time_ids, add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance + # is applied for both the text and the input image. + latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents + + # concat latents, image_latents in the channel dimension + scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + noise_pred = self.unet( + scaled_latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + guidance_scale * (noise_pred_text - noise_pred_image) + + image_guidance_scale * (noise_pred_image - noise_pred_uncond) + ) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/watermark.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/watermark.py new file mode 100644 index 000000000..5b6e36d9f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_diffusion_xl/watermark.py @@ -0,0 +1,36 @@ +import numpy as np +import torch + +from ...utils import is_invisible_watermark_available + + +if is_invisible_watermark_available(): + from imwatermark import WatermarkEncoder + + +# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 +WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 +# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 +WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] + + +class StableDiffusionXLWatermarker: + def __init__(self): + self.watermark = WATERMARK_BITS + self.encoder = WatermarkEncoder() + + self.encoder.set_watermark("bits", self.watermark) + + def apply_watermark(self, images: torch.FloatTensor): + # can't encode images that are smaller than 256 + if images.shape[-1] < 256: + return images + + images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() + + images = [self.encoder.encode(image, "dwtDct") for image in images] + + images = torch.from_numpy(np.array(images)).permute(0, 3, 1, 2) + + images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) + return images diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/__init__.py new file mode 100644 index 000000000..3bd4dc789 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/__init__.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + BaseOutput, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure.update( + { + "pipeline_stable_video_diffusion": [ + "StableVideoDiffusionPipeline", + "StableVideoDiffusionPipelineOutput", + ], + } + ) + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_video_diffusion import ( + StableVideoDiffusionPipeline, + StableVideoDiffusionPipelineOutput, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py new file mode 100644 index 000000000..1342fe429 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py @@ -0,0 +1,673 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel +from ...schedulers import EulerDiscreteScheduler +from ...utils import BaseOutput, logging, replace_example_docstring +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import StableVideoDiffusionPipeline + >>> from diffusers.utils import load_image, export_to_video + + >>> pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16") + >>> pipe.to("cuda") + + >>> image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd-docstring-example.jpeg") + >>> image = image.resize((1024, 576)) + + >>> frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0] + >>> export_to_video(frames, "generated.mp4", fps=7) + ``` +""" + + +def _append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") + return x[(...,) + (None,) * dims_to_append] + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor: VaeImageProcessor, output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +@dataclass +class StableVideoDiffusionPipelineOutput(BaseOutput): + r""" + Output class for Stable Video Diffusion pipeline. + + Args: + frames (`[List[List[PIL.Image.Image]]`, `np.ndarray`, `torch.FloatTensor`]): + List of denoised PIL images of length `batch_size` or numpy array or torch tensor + of shape `(batch_size, num_frames, height, width, num_channels)`. + """ + + frames: Union[List[List[PIL.Image.Image]], np.ndarray, torch.FloatTensor] + + +class StableVideoDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline to generate video from an input image using Stable Video Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKLTemporalDecoder`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)). + unet ([`UNetSpatioTemporalConditionModel`]): + A `UNetSpatioTemporalConditionModel` to denoise the encoded image latents. + scheduler ([`EulerDiscreteScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images. + """ + + model_cpu_offload_seq = "image_encoder->unet->vae" + _callback_tensor_inputs = ["latents"] + + def __init__( + self, + vae: AutoencoderKLTemporalDecoder, + image_encoder: CLIPVisionModelWithProjection, + unet: UNetSpatioTemporalConditionModel, + scheduler: EulerDiscreteScheduler, + feature_extractor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def _encode_image( + self, + image: PipelineImageInput, + device: Union[str, torch.device], + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ) -> torch.FloatTensor: + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.image_processor.pil_to_numpy(image) + image = self.image_processor.numpy_to_pt(image) + + # We normalize the image before resizing to match with the original implementation. + # Then we unnormalize it after resizing. + image = image * 2.0 - 1.0 + image = _resize_with_antialiasing(image, (224, 224)) + image = (image + 1.0) / 2.0 + + # Normalize the image with for CLIP input + image = self.feature_extractor( + images=image, + do_normalize=True, + do_center_crop=False, + do_resize=False, + do_rescale=False, + return_tensors="pt", + ).pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_image_embeddings = torch.zeros_like(image_embeddings) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) + + return image_embeddings + + def _encode_vae_image( + self, + image: torch.Tensor, + device: Union[str, torch.device], + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ): + image = image.to(device=device) + image_latents = self.vae.encode(image).latent_dist.mode() + + if do_classifier_free_guidance: + negative_image_latents = torch.zeros_like(image_latents) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_latents = torch.cat([negative_image_latents, image_latents]) + + # duplicate image_latents for each generation per prompt, using mps friendly method + image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1) + + return image_latents + + def _get_add_time_ids( + self, + fps: int, + motion_bucket_id: int, + noise_aug_strength: float, + dtype: torch.dtype, + batch_size: int, + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ): + add_time_ids = [fps, motion_bucket_id, noise_aug_strength] + + passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1) + + if do_classifier_free_guidance: + add_time_ids = torch.cat([add_time_ids, add_time_ids]) + + return add_time_ids + + def decode_latents(self, latents: torch.FloatTensor, num_frames: int, decode_chunk_size: int = 14): + # [batch, frames, channels, height, width] -> [batch*frames, channels, height, width] + latents = latents.flatten(0, 1) + + latents = 1 / self.vae.config.scaling_factor * latents + + forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward + accepts_num_frames = "num_frames" in set(inspect.signature(forward_vae_fn).parameters.keys()) + + # decode decode_chunk_size frames at a time to avoid OOM + frames = [] + for i in range(0, latents.shape[0], decode_chunk_size): + num_frames_in = latents[i : i + decode_chunk_size].shape[0] + decode_kwargs = {} + if accepts_num_frames: + # we only pass num_frames_in if it's expected + decode_kwargs["num_frames"] = num_frames_in + + frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample + frames.append(frame) + frames = torch.cat(frames, dim=0) + + # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width] + frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + frames = frames.float() + return frames + + def check_inputs(self, image, height, width): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + def prepare_latents( + self, + batch_size: int, + num_frames: int, + num_channels_latents: int, + height: int, + width: int, + dtype: torch.dtype, + device: Union[str, torch.device], + generator: torch.Generator, + latents: Optional[torch.FloatTensor] = None, + ): + shape = ( + batch_size, + num_frames, + num_channels_latents // 2, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + if isinstance(self.guidance_scale, (int, float)): + return self.guidance_scale > 1 + return self.guidance_scale.max() > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor], + height: int = 576, + width: int = 1024, + num_frames: Optional[int] = None, + num_inference_steps: int = 25, + min_guidance_scale: float = 1.0, + max_guidance_scale: float = 3.0, + fps: int = 7, + motion_bucket_id: int = 127, + noise_aug_strength: float = 0.02, + decode_chunk_size: Optional[int] = None, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + return_dict: bool = True, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): + Image(s) to guide image generation. If you provide a tensor, the expected value range is between `[0, 1]`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_frames (`int`, *optional*): + The number of video frames to generate. Defaults to `self.unet.config.num_frames` + (14 for `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt`). + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. This parameter is modulated by `strength`. + min_guidance_scale (`float`, *optional*, defaults to 1.0): + The minimum guidance scale. Used for the classifier free guidance with first frame. + max_guidance_scale (`float`, *optional*, defaults to 3.0): + The maximum guidance scale. Used for the classifier free guidance with last frame. + fps (`int`, *optional*, defaults to 7): + Frames per second. The rate at which the generated images shall be exported to a video after generation. + Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training. + motion_bucket_id (`int`, *optional*, defaults to 127): + Used for conditioning the amount of motion for the generation. The higher the number the more motion + will be in the video. + noise_aug_strength (`float`, *optional*, defaults to 0.02): + The amount of noise added to the init image, the higher it is the less the video will look like the init image. Increase it for more motion. + decode_chunk_size (`int`, *optional*): + The number of frames to decode at a time. Higher chunk size leads to better temporal consistency at the expense of more memory usage. By default, the decoder decodes all frames at once for maximal + quality. For lower memory usage, reduce `decode_chunk_size`. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `pil`, `np` or `pt`. + callback_on_step_end (`Callable`, *optional*): + A function that is called at the end of each denoising step during inference. The function is called + with the following arguments: + `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. + `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is returned, + otherwise a `tuple` of (`List[List[PIL.Image.Image]]` or `np.ndarray` or `torch.FloatTensor`) is returned. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_frames = num_frames if num_frames is not None else self.unet.config.num_frames + decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width) + + # 2. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + self._guidance_scale = max_guidance_scale + + # 3. Encode input image + image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance) + + # NOTE: Stable Video Diffusion was conditioned on fps - 1, which is why it is reduced here. + # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188 + fps = fps - 1 + + # 4. Encode input image using VAE + image = self.image_processor.preprocess(image, height=height, width=width).to(device) + noise = randn_tensor(image.shape, generator=generator, device=device, dtype=image.dtype) + image = image + noise_aug_strength * noise + + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + if needs_upcasting: + self.vae.to(dtype=torch.float32) + + image_latents = self._encode_vae_image( + image, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + image_latents = image_latents.to(image_embeddings.dtype) + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + # Repeat the image latents for each frame so we can concatenate them with the noise + # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width] + image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1) + + # 5. Get Added Time IDs + added_time_ids = self._get_add_time_ids( + fps, + motion_bucket_id, + noise_aug_strength, + image_embeddings.dtype, + batch_size, + num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + added_time_ids = added_time_ids.to(device) + + # 6. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 7. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_frames, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 8. Prepare guidance scale + guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0) + guidance_scale = guidance_scale.to(device, latents.dtype) + guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1) + guidance_scale = _append_dims(guidance_scale, latents.ndim) + + self._guidance_scale = guidance_scale + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # Concatenate image_latents over channels dimension + latent_model_input = torch.cat([latent_model_input, image_latents], dim=2) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=image_embeddings, + added_time_ids=added_time_ids, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + frames = self.decode_latents(latents, num_frames, decode_chunk_size) + frames = tensor2vid(frames, self.image_processor, output_type=output_type) + else: + frames = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return frames + + return StableVideoDiffusionPipelineOutput(frames=frames) + + +# resizing utils +# TODO: clean up later +def _resize_with_antialiasing(input, size, interpolation="bicubic", align_corners=True): + h, w = input.shape[-2:] + factors = (h / size[0], w / size[1]) + + # First, we have to determine sigma + # Taken from skimage: https://github.com/scikit-image/scikit-image/blob/v0.19.2/skimage/transform/_warps.py#L171 + sigmas = ( + max((factors[0] - 1.0) / 2.0, 0.001), + max((factors[1] - 1.0) / 2.0, 0.001), + ) + + # Now kernel size. Good results are for 3 sigma, but that is kind of slow. Pillow uses 1 sigma + # https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Resample.c#L206 + # But they do it in the 2 passes, which gives better results. Let's try 2 sigmas for now + ks = int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3)) + + # Make sure it is odd + if (ks[0] % 2) == 0: + ks = ks[0] + 1, ks[1] + + if (ks[1] % 2) == 0: + ks = ks[0], ks[1] + 1 + + input = _gaussian_blur2d(input, ks, sigmas) + + output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners) + return output + + +def _compute_padding(kernel_size): + """Compute padding tuple.""" + # 4 or 6 ints: (padding_left, padding_right,padding_top,padding_bottom) + # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad + if len(kernel_size) < 2: + raise AssertionError(kernel_size) + computed = [k - 1 for k in kernel_size] + + # for even kernels we need to do asymmetric padding :( + out_padding = 2 * len(kernel_size) * [0] + + for i in range(len(kernel_size)): + computed_tmp = computed[-(i + 1)] + + pad_front = computed_tmp // 2 + pad_rear = computed_tmp - pad_front + + out_padding[2 * i + 0] = pad_front + out_padding[2 * i + 1] = pad_rear + + return out_padding + + +def _filter2d(input, kernel): + # prepare kernel + b, c, h, w = input.shape + tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype) + + tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) + + height, width = tmp_kernel.shape[-2:] + + padding_shape: list[int] = _compute_padding([height, width]) + input = torch.nn.functional.pad(input, padding_shape, mode="reflect") + + # kernel and input tensor reshape to align element-wise or batch-wise params + tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) + input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) + + # convolve the tensor with the kernel. + output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) + + out = output.view(b, c, h, w) + return out + + +def _gaussian(window_size: int, sigma): + if isinstance(sigma, float): + sigma = torch.tensor([[sigma]]) + + batch_size = sigma.shape[0] + + x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1) + + if window_size % 2 == 0: + x = x + 0.5 + + gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0))) + + return gauss / gauss.sum(-1, keepdim=True) + + +def _gaussian_blur2d(input, kernel_size, sigma): + if isinstance(sigma, tuple): + sigma = torch.tensor([sigma], dtype=input.dtype) + else: + sigma = sigma.to(dtype=input.dtype) + + ky, kx = int(kernel_size[0]), int(kernel_size[1]) + bs = sigma.shape[0] + kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1)) + kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1)) + out_x = _filter2d(input, kernel_x[..., None, :]) + out = _filter2d(out_x, kernel_y[..., None]) + + return out diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/__init__.py new file mode 100644 index 000000000..08c22a270 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/__init__.py @@ -0,0 +1,47 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_adapter"] = ["StableDiffusionAdapterPipeline"] + _import_structure["pipeline_stable_diffusion_xl_adapter"] = ["StableDiffusionXLAdapterPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline + from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py new file mode 100644 index 000000000..0b55bb38b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -0,0 +1,912 @@ +# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +@dataclass +class StableDiffusionAdapterPipelineOutput(BaseOutput): + """ + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_content_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, or `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from PIL import Image + >>> from diffusers.utils import load_image + >>> import torch + >>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter + + >>> image = load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png" + ... ) + + >>> color_palette = image.resize((8, 8)) + >>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST) + + >>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16) + >>> pipe = StableDiffusionAdapterPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", + ... adapter=adapter, + ... torch_dtype=torch.float16, + ... ) + + >>> pipe.to("cuda") + + >>> out_image = pipe( + ... "At night, glowing cubes in front of the beach", + ... image=color_palette, + ... ).images[0] + ``` +""" + + +def _preprocess_adapter_image(image, height, width): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] + image = [ + i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image + ] # expand [h, w] or [h, w, c] to [b, h, w, c] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + if image[0].ndim == 3: + image = torch.stack(image, dim=0) + elif image[0].ndim == 4: + image = torch.cat(image, dim=0) + else: + raise ValueError( + f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" + ) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter + https://arxiv.org/abs/2302.08453 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a + list, the outputs from each Adapter are added together to create one combined additional conditioning. + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding them + together. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->adapter->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPFeatureExtractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(adapter, (list, tuple)): + adapter = MultiAdapter(adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + adapter=adapter, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + image, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if isinstance(self.adapter, MultiAdapter): + if not isinstance(image, list): + raise ValueError( + "MultiAdapter is enabled, but `image` is not a list. Please pass a list of images to `image`." + ) + + if len(image) != len(self.adapter.adapters): + raise ValueError( + f"MultiAdapter requires passing the same number of images as adapters. Given {len(image)} images and {len(self.adapter.adapters)} adapters." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[-2] + + # round down to nearest multiple of `self.adapter.downscale_factor` + height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[-1] + + # round down to nearest multiple of `self.adapter.downscale_factor` + width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor + + return height, width + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + adapter_conditioning_scale: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): + The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the + type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be + accepted as an image. The control image is automatically resized to fit the output image. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the + residual in the original unet. If multiple adapters are specified in init, you can set the + corresponding scale as a list. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. When returning a tuple, the first element is a list with the generated images, and the second + element is a list of `bool`s denoting whether the corresponding generated image likely represents + "not-safe-for-work" (nsfw) content, according to the `safety_checker`. + """ + # 0. Default height and width to unet + height, width = self._default_height_width(height, width, image) + device = self._execution_device + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, image, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + self._guidance_scale = guidance_scale + + if isinstance(self.adapter, MultiAdapter): + adapter_input = [] + + for one_image in image: + one_image = _preprocess_adapter_image(one_image, height, width) + one_image = one_image.to(device=device, dtype=self.adapter.dtype) + adapter_input.append(one_image) + else: + adapter_input = _preprocess_adapter_image(image, height, width) + adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + if isinstance(self.adapter, MultiAdapter): + adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) + for k, v in enumerate(adapter_state): + adapter_state[k] = v + else: + adapter_state = self.adapter(adapter_input) + for k, v in enumerate(adapter_state): + adapter_state[k] = v * adapter_conditioning_scale + if num_images_per_prompt > 1: + for k, v in enumerate(adapter_state): + adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) + if self.do_classifier_free_guidance: + for k, v in enumerate(adapter_state): + adapter_state[k] = torch.cat([v] * 2, dim=0) + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + down_intrablock_additional_residuals=[state.clone() for state in adapter_state], + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type == "latent": + image = latents + has_nsfw_concept = None + elif output_type == "pil": + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + else: + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py new file mode 100644 index 000000000..4e0cc61f5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -0,0 +1,1258 @@ +# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, MultiAdapter, T2IAdapter, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler + >>> from diffusers.utils import load_image + + >>> sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L") + + >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" + + >>> adapter = T2IAdapter.from_pretrained( + ... "Adapter/t2iadapter", + ... subfolder="sketch_sdxl_1.0", + ... torch_dtype=torch.float16, + ... adapter_type="full_adapter_xl", + ... ) + >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") + + >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + ... model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler + ... ).to("cuda") + + >>> generator = torch.manual_seed(42) + >>> sketch_image_out = pipe( + ... prompt="a photo of a dog in real world, high quality", + ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", + ... image=sketch_image, + ... generator=generator, + ... guidance_scale=7.5, + ... ).images[0] + ``` +""" + + +def _preprocess_adapter_image(image, height, width): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] + image = [ + i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image + ] # expand [h, w] or [h, w, c] to [b, h, w, c] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + if image[0].ndim == 3: + image = torch.stack(image, dim=0) + elif image[0].ndim == 4: + image = torch.cat(image, dim=0) + else: + raise ValueError( + f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" + ) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLAdapterPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter + https://arxiv.org/abs/2302.08453 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a + list, the outputs from each Adapter are added together to create one combined additional conditioning. + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding them + together. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + adapter=adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = self.unet.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[-2] + + # round down to nearest multiple of `self.adapter.downscale_factor` + height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[-1] + + # round down to nearest multiple of `self.adapter.downscale_factor` + width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor + + return height, width + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + adapter_conditioning_scale: Union[float, List[float]] = 1.0, + adapter_conditioning_factor: float = 1.0, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): + The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the + type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be + accepted as an image. The control image is automatically resized to fit the output image. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. + Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding + if `do_classifier_free_guidance` is set to `True`. + If not provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] + instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the + residual in the original unet. If multiple adapters are specified in init, you can set the + corresponding scale as a list. + adapter_conditioning_factor (`float`, *optional*, defaults to 1.0): + The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is + `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for + all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + + height, width = self._default_height_width(height, width, image) + device = self._execution_device + + if isinstance(self.adapter, MultiAdapter): + adapter_input = [] + + for one_image in image: + one_image = _preprocess_adapter_image(one_image, height, width) + one_image = one_image.to(device=device, dtype=self.adapter.dtype) + adapter_input.append(one_image) + else: + adapter_input = _preprocess_adapter_image(image, height, width) + adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + ) + + self._guidance_scale = guidance_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3.1 Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + clip_skip=clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.1 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare added time ids & embeddings & adapter features + if isinstance(self.adapter, MultiAdapter): + adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) + for k, v in enumerate(adapter_state): + adapter_state[k] = v + else: + adapter_state = self.adapter(adapter_input) + for k, v in enumerate(adapter_state): + adapter_state[k] = v * adapter_conditioning_scale + if num_images_per_prompt > 1: + for k, v in enumerate(adapter_state): + adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) + if self.do_classifier_free_guidance: + for k, v in enumerate(adapter_state): + adapter_state[k] = torch.cat([v] * 2, dim=0) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + # Apply denoising_end + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + if i < int(num_inference_steps * adapter_conditioning_factor): + down_intrablock_additional_residuals = [state.clone() for state in adapter_state] + else: + down_intrablock_additional_residuals = None + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + down_intrablock_additional_residuals=down_intrablock_additional_residuals, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/__init__.py new file mode 100644 index 000000000..8d8fdb927 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/__init__.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_output"] = ["TextToVideoSDPipelineOutput"] + _import_structure["pipeline_text_to_video_synth"] = ["TextToVideoSDPipeline"] + _import_structure["pipeline_text_to_video_synth_img2img"] = ["VideoToVideoSDPipeline"] + _import_structure["pipeline_text_to_video_zero"] = ["TextToVideoZeroPipeline"] + _import_structure["pipeline_text_to_video_zero_sdxl"] = ["TextToVideoZeroSDXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_output import TextToVideoSDPipelineOutput + from .pipeline_text_to_video_synth import TextToVideoSDPipeline + from .pipeline_text_to_video_synth_img2img import VideoToVideoSDPipeline + from .pipeline_text_to_video_zero import TextToVideoZeroPipeline + from .pipeline_text_to_video_zero_sdxl import TextToVideoZeroSDXLPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py new file mode 100644 index 000000000..c155386cf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL +import torch + +from ...utils import ( + BaseOutput, +) + + +@dataclass +class TextToVideoSDPipelineOutput(BaseOutput): + """ + Output class for text-to-video pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py new file mode 100644 index 000000000..6c33836e6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py @@ -0,0 +1,663 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet3DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import TextToVideoSDPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import TextToVideoSDPipeline + >>> from diffusers.utils import export_to_video + + >>> pipe = TextToVideoSDPipeline.from_pretrained( + ... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16" + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "Spiderman is surfing" + >>> video_frames = pipe(prompt).frames[0] + >>> video_path = export_to_video(video_frames) + >>> video_path + ``` +""" + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet3DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet3DConditionModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_frames: int = 16, + num_inference_steps: int = 50, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "np", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_images_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + bsz, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = tensor2vid(video_tensor, self.image_processor, output_type) + + # 9. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return TextToVideoSDPipelineOutput(frames=video) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py new file mode 100644 index 000000000..3901946af --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -0,0 +1,760 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet3DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import TextToVideoSDPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler + >>> from diffusers.utils import export_to_video + + >>> pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.to("cuda") + + >>> prompt = "spiderman running in the desert" + >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0] + >>> # safe low-res video + >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4") + + >>> # let's offload the text-to-image model + >>> pipe.to("cpu") + + >>> # and load the image-to-image model + >>> pipe = DiffusionPipeline.from_pretrained( + ... "cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/15" + ... ) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # The VAE consumes A LOT of memory, let's make sure we run it in sliced mode + >>> pipe.vae.enable_slicing() + + >>> # now let's upscale it + >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] + + >>> # and denoise it + >>> video_frames = pipe(prompt, video=video, strength=0.6).frames[0] + >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4") + >>> video_path + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid +def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): + batch_size, channels, num_frames, height, width = video.shape + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = processor.postprocess(batch_vid, output_type) + + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + + elif output_type == "pt": + outputs = torch.stack(outputs) + + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs + + +def preprocess_video(video): + supported_formats = (np.ndarray, torch.Tensor, PIL.Image.Image) + + if isinstance(video, supported_formats): + video = [video] + elif not (isinstance(video, list) and all(isinstance(i, supported_formats) for i in video)): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in video]}. Currently, we only support {', '.join(supported_formats)}" + ) + + if isinstance(video[0], PIL.Image.Image): + video = [np.array(frame) for frame in video] + + if isinstance(video[0], np.ndarray): + video = np.concatenate(video, axis=0) if video[0].ndim == 5 else np.stack(video, axis=0) + + if video.dtype == np.uint8: + video = np.array(video).astype(np.float32) / 255.0 + + if video.ndim == 4: + video = video[None, ...] + + video = torch.from_numpy(video.transpose(0, 4, 1, 2, 3)) + + elif isinstance(video[0], torch.Tensor): + video = torch.cat(video, axis=0) if video[0].ndim == 5 else torch.stack(video, axis=0) + + # don't need any preprocess if the video is latents + channel = video.shape[1] + if channel == 4: + return video + + # move channels before num_frames + video = video.permute(0, 2, 1, 3, 4) + + # normalize video + video = 2.0 * video - 1.0 + + return video + + +class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): + r""" + Pipeline for text-guided video-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet3DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet3DConditionModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, video, timestep, batch_size, dtype, device, generator=None): + video = video.to(device=device, dtype=dtype) + + # change from (b, c, f, h, w) -> (b * f, c, w, h) + bsz, channel, frames, width, height = video.shape + video = video.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + if video.shape[1] == 4: + init_latents = video + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(video[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(video), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `video` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + latents = latents[None, :].reshape((bsz, frames, latents.shape[1]) + latents.shape[2:]).permute(0, 2, 1, 3, 4) + + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + video: Union[List[np.ndarray], torch.FloatTensor] = None, + strength: float = 0.6, + num_inference_steps: int = 50, + guidance_scale: float = 15.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "np", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + video (`List[np.ndarray]` or `torch.FloatTensor`): + `video` frames or tensor representing a video batch to be used as the starting point for the process. + Can also accept video latents as `image`, if passing latents directly, it will not be encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `video`. Must be between 0 and 1. `video` is used as a + starting point, adding more noise to it the larger the `strength`. The number of denoising steps + depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the + denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of + 1 essentially ignores `video`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in video generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + num_images_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess video + video = preprocess_video(video) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents(video, latent_timestep, batch_size, prompt_embeds.dtype, device, generator) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + bsz, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = tensor2vid(video_tensor, self.image_processor, output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return TextToVideoSDPipelineOutput(frames=video) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py new file mode 100644 index 000000000..d3ff3728c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py @@ -0,0 +1,969 @@ +import copy +import inspect +from dataclasses import dataclass +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from torch.nn.functional import grid_sample +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def rearrange_0(tensor, f): + F, C, H, W = tensor.size() + tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) + return tensor + + +def rearrange_1(tensor): + B, C, F, H, W = tensor.size() + return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) + + +def rearrange_3(tensor, f): + F, D, C = tensor.size() + return torch.reshape(tensor, (F // f, f, D, C)) + + +def rearrange_4(tensor): + B, F, D, C = tensor.size() + return torch.reshape(tensor, (B * F, D, C)) + + +class CrossFrameAttnProcessor: + """ + Cross frame attention processor. Each frame attends the first frame. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = key.size()[0] // self.batch_size + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class CrossFrameAttnProcessor2_0: + """ + Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + inner_dim = hidden_states.shape[-1] + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = max(1, key.size()[0] // self.batch_size) + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + return hidden_states + + +@dataclass +class TextToVideoPipelineOutput(BaseOutput): + r""" + Output class for zero-shot text-to-video pipeline. + + Args: + images (`[List[PIL.Image.Image]`, `np.ndarray`]): + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`[List[bool]]`): + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +def coords_grid(batch, ht, wd, device): + # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +def warp_single_latent(latent, reference_flow): + """ + Warp latent of a single frame with given flow + + Args: + latent: latent code of a single frame + reference_flow: flow which to warp the latent with + + Returns: + warped: warped latent + """ + _, _, H, W = reference_flow.size() + _, _, h, w = latent.size() + coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) + + coords_t0 = coords0 + reference_flow + coords_t0[:, 0] /= W + coords_t0[:, 1] /= H + + coords_t0 = coords_t0 * 2.0 - 1.0 + coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") + coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) + + warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") + return warped + + +def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): + """ + Create translation motion field + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + device: device + dtype: dtype + + Returns: + + """ + seq_length = len(frame_ids) + reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) + for fr_idx in range(seq_length): + reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) + reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) + return reference_flow + + +def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): + """ + Creates translation motion and warps the latents accordingly + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + latents: latent codes of frames + + Returns: + warped_latents: warped latents + """ + motion_field = create_motion_field( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + frame_ids=frame_ids, + device=latents.device, + dtype=latents.dtype, + ) + warped_latents = latents.clone().detach() + for i in range(len(warped_latents)): + warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) + return warped_latents + + +class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): + r""" + Pipeline for zero-shot text-to-video generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`CLIPImageProcessor`]): + A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + processor = ( + CrossFrameAttnProcessor2_0(batch_size=2) + if hasattr(F, "scaled_dot_product_attention") + else CrossFrameAttnProcessor(batch_size=2) + ) + self.unet.set_attn_processor(processor) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def forward_loop(self, x_t0, t0, t1, generator): + """ + Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. + + Args: + x_t0: + Latent code at time t0. + t0: + Timestep at t0. + t1: + Timestamp at t1. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + + Returns: + x_t1: + Forward process applied to x_t0 from time t0 to t1. + """ + eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) + alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) + x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps + return x_t1 + + def backward_loop( + self, + latents, + timesteps, + prompt_embeds, + guidance_scale, + callback, + callback_steps, + num_warmup_steps, + extra_step_kwargs, + cross_attention_kwargs=None, + ): + """ + Perform backward process given list of time steps. + + Args: + latents: + Latents at time timesteps[0]. + timesteps: + Time steps along which to perform backward process. + prompt_embeds: + Pre-generated text embeddings. + guidance_scale: + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + extra_step_kwargs: + Extra_step_kwargs. + cross_attention_kwargs: + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + num_warmup_steps: + number of warmup steps. + + Returns: + latents: + Latents of backward process output at time timesteps[-1]. + """ + do_classifier_free_guidance = guidance_scale > 1.0 + num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order + with self.progress_bar(total=num_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + return latents.clone().detach() + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + video_length: Optional[int] = 8, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + motion_field_strength_x: float = 12, + motion_field_strength_y: float = 12, + output_type: Optional[str] = "tensor", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + t0: int = 44, + t1: int = 47, + frame_ids: Optional[List[int]] = None, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + video_length (`int`, *optional*, defaults to 8): + The number of generated video frames. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in video generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"numpy"`): + The output format of the generated video. Choose between `"latent"` and `"numpy"`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of + a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + motion_field_strength_x (`float`, *optional*, defaults to 12): + Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + motion_field_strength_y (`float`, *optional*, defaults to 12): + Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + t0 (`int`, *optional*, defaults to 44): + Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + t1 (`int`, *optional*, defaults to 47): + Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + frame_ids (`List[int]`, *optional*): + Indexes of the frames that are being generated. This is used when generating longer videos + chunk-by-chunk. + + Returns: + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: + The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a + latent code of generated videos and a list of `bool`s indicating whether the corresponding generated + video contains "not-safe-for-work" (nsfw) content.. + """ + assert video_length > 0 + if frame_ids is None: + frame_ids = list(range(video_length)) + assert len(frame_ids) == video_length + + assert num_videos_per_prompt == 1 + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + # Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # Encode input prompt + prompt_embeds_tuple = self.encode_prompt( + prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt + ) + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + # Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + # Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # Perform the first backward process up to time T_1 + x_1_t1 = self.backward_loop( + timesteps=timesteps[: -t1 - 1], + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=num_warmup_steps, + ) + scheduler_copy = copy.deepcopy(self.scheduler) + + # Perform the second backward process up to time T_0 + x_1_t0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 : -t0 - 1], + prompt_embeds=prompt_embeds, + latents=x_1_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + ) + + # Propagate first frame latents at time T_0 to remaining frames + x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) + + # Add motion in latents at time T_0 + x_2k_t0 = create_motion_field_and_warp_latents( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + latents=x_2k_t0, + frame_ids=frame_ids[1:], + ) + + # Perform forward process up to time T_1 + x_2k_t1 = self.forward_loop( + x_t0=x_2k_t0, + t0=timesteps[-t0 - 1].item(), + t1=timesteps[-t1 - 1].item(), + generator=generator, + ) + + # Perform backward process from time T_1 to 0 + x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) + b, l, d = prompt_embeds.size() + prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) + + self.scheduler = scheduler_copy + x_1k_0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 :], + prompt_embeds=prompt_embeds, + latents=x_1k_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + ) + latents = x_1k_0 + + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + torch.cuda.empty_cache() + + if output_type == "latent": + image = latents + has_nsfw_concept = None + else: + image = self.decode_latents(latents) + # Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py new file mode 100644 index 000000000..eaa276036 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -0,0 +1,1315 @@ +import copy +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +import torch.nn.functional as F +from torch.nn.functional import grid_sample +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + is_invisible_watermark_available, + logging, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 +def rearrange_0(tensor, f): + F, C, H, W = tensor.size() + tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) + return tensor + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 +def rearrange_1(tensor): + B, C, F, H, W = tensor.size() + return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 +def rearrange_3(tensor, f): + F, D, C = tensor.size() + return torch.reshape(tensor, (F // f, f, D, C)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 +def rearrange_4(tensor): + B, F, D, C = tensor.size() + return torch.reshape(tensor, (B * F, D, C)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +class CrossFrameAttnProcessor: + """ + Cross frame attention processor. Each frame attends the first frame. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = key.size()[0] // self.batch_size + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 +class CrossFrameAttnProcessor2_0: + """ + Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + inner_dim = hidden_states.shape[-1] + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = max(1, key.size()[0] // self.batch_size) + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + return hidden_states + + +@dataclass +class TextToVideoSDXLPipelineOutput(BaseOutput): + """ + Output class for zero-shot text-to-video pipeline. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid +def coords_grid(batch, ht, wd, device): + # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent +def warp_single_latent(latent, reference_flow): + """ + Warp latent of a single frame with given flow + + Args: + latent: latent code of a single frame + reference_flow: flow which to warp the latent with + + Returns: + warped: warped latent + """ + _, _, H, W = reference_flow.size() + _, _, h, w = latent.size() + coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) + + coords_t0 = coords0 + reference_flow + coords_t0[:, 0] /= W + coords_t0[:, 1] /= H + + coords_t0 = coords_t0 * 2.0 - 1.0 + coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") + coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) + + warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") + return warped + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field +def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): + """ + Create translation motion field + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + device: device + dtype: dtype + + Returns: + + """ + seq_length = len(frame_ids) + reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) + for fr_idx in range(seq_length): + reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) + reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) + return reference_flow + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents +def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): + """ + Creates translation motion and warps the latents accordingly + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + latents: latent codes of frames + + Returns: + warped_latents: warped latents + """ + motion_field = create_motion_field( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + frame_ids=frame_ids, + device=latents.device, + dtype=latents.dtype, + ) + warped_latents = latents.clone().detach() + for i in range(len(warped_latents)): + warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) + return warped_latents + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class TextToVideoZeroSDXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +): + r""" + Pipeline for zero-shot text-to-video generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + processor = ( + CrossFrameAttnProcessor2_0(batch_size=2) + if hasattr(F, "scaled_dot_product_attention") + else CrossFrameAttnProcessor(batch_size=2) + ) + + self.unet.set_attn_processor(processor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + LoRAXFormersAttnProcessor, + LoRAAttnProcessor2_0, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop + def forward_loop(self, x_t0, t0, t1, generator): + """ + Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. + + Args: + x_t0: + Latent code at time t0. + t0: + Timestep at t0. + t1: + Timestamp at t1. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + + Returns: + x_t1: + Forward process applied to x_t0 from time t0 to t1. + """ + eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) + alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) + x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps + return x_t1 + + def backward_loop( + self, + latents, + timesteps, + prompt_embeds, + guidance_scale, + callback, + callback_steps, + num_warmup_steps, + extra_step_kwargs, + add_text_embeds, + add_time_ids, + cross_attention_kwargs=None, + guidance_rescale: float = 0.0, + ): + """ + Perform backward process given list of time steps + + Args: + latents: + Latents at time timesteps[0]. + timesteps: + Time steps along which to perform backward process. + prompt_embeds: + Pre-generated text embeddings. + guidance_scale: + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + extra_step_kwargs: + Extra_step_kwargs. + cross_attention_kwargs: + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + num_warmup_steps: + number of warmup steps. + + Returns: + latents: latents of backward process output at time timesteps[-1] + """ + + do_classifier_free_guidance = guidance_scale > 1.0 + num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order + + with self.progress_bar(total=num_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + return latents.clone().detach() + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + prompt_2: Optional[Union[str, List[str]]] = None, + video_length: Optional[int] = 8, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + frame_ids: Optional[List[int]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + latents: Optional[torch.FloatTensor] = None, + motion_field_strength_x: float = 12, + motion_field_strength_y: float = 12, + output_type: Optional[str] = "tensor", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + t0: int = 44, + t1: int = 47, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + video_length (`int`, *optional*, defaults to 8): + The number of generated video frames. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + frame_ids (`List[int]`, *optional*): + Indexes of the frames that are being generated. This is used when generating longer videos + chunk-by-chunk. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + motion_field_strength_x (`float`, *optional*, defaults to 12): + Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + motion_field_strength_y (`float`, *optional*, defaults to 12): + Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). + guidance_rescale (`float`, *optional*, defaults to 0.7): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + t0 (`int`, *optional*, defaults to 44): + Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + t1 (`int`, *optional*, defaults to 47): + Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + + Returns: + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] or + `tuple`: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] + if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated images. + """ + assert video_length > 0 + if frame_ids is None: + frame_ids = list(range(video_length)) + assert len(frame_ids) == video_length + + assert num_videos_per_prompt == 1 + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + # 2. Define call parameters + batch_size = ( + 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0] + ) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # Perform the first backward process up to time T_1 + x_1_t1 = self.backward_loop( + timesteps=timesteps[: -t1 - 1], + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=num_warmup_steps, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + scheduler_copy = copy.deepcopy(self.scheduler) + + # Perform the second backward process up to time T_0 + x_1_t0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 : -t0 - 1], + prompt_embeds=prompt_embeds, + latents=x_1_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + # Propagate first frame latents at time T_0 to remaining frames + x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) + + # Add motion in latents at time T_0 + x_2k_t0 = create_motion_field_and_warp_latents( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + latents=x_2k_t0, + frame_ids=frame_ids[1:], + ) + + # Perform forward process up to time T_1 + x_2k_t1 = self.forward_loop( + x_t0=x_2k_t0, + t0=timesteps[-t0 - 1].to(torch.long), + t1=timesteps[-t1 - 1].to(torch.long), + generator=generator, + ) + + # Perform backward process from time T_1 to 0 + latents = torch.cat([x_1_t1, x_2k_t1]) + + self.scheduler = scheduler_copy + timesteps = timesteps[-t1 - 1 :] + + b, l, d = prompt_embeds.size() + prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) + + b, k = add_text_embeds.size() + add_text_embeds = add_text_embeds[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) + + b, k = add_time_ids.size() + add_time_ids = add_time_ids[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) + + # 7.1 Apply denoising_end + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + x_1k_0 = self.backward_loop( + timesteps=timesteps, + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + latents = x_1k_0 + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return TextToVideoSDXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload last model to CPU manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image,) + + return TextToVideoSDXLPipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/__init__.py new file mode 100644 index 000000000..c89e89946 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/__init__.py @@ -0,0 +1,52 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline + + _dummy_objects.update( + {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} + ) +else: + _import_structure["pipeline_unclip"] = ["UnCLIPPipeline"] + _import_structure["pipeline_unclip_image_variation"] = ["UnCLIPImageVariationPipeline"] + _import_structure["text_proj"] = ["UnCLIPTextProjModel"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_unclip import UnCLIPPipeline + from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline + from .text_proj import UnCLIPTextProjModel + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip.py new file mode 100644 index 000000000..72e5b3113 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip.py @@ -0,0 +1,493 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from torch.nn import functional as F +from transformers import CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput + +from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel +from ...schedulers import UnCLIPScheduler +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_proj import UnCLIPTextProjModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UnCLIPPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_proj ([`UnCLIPTextProjModel`]): + Utility class to prepare and combine the embeddings before they are passed to the decoder. + decoder ([`UNet2DConditionModel`]): + The decoder to invert the image embedding into an image. + super_res_first ([`UNet2DModel`]): + Super resolution UNet. Used in all but the last step of the super resolution diffusion process. + super_res_last ([`UNet2DModel`]): + Super resolution UNet. Used in the last step of the super resolution diffusion process. + prior_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). + decoder_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). + super_res_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). + + """ + + _exclude_from_cpu_offload = ["prior"] + + prior: PriorTransformer + decoder: UNet2DConditionModel + text_proj: UnCLIPTextProjModel + text_encoder: CLIPTextModelWithProjection + tokenizer: CLIPTokenizer + super_res_first: UNet2DModel + super_res_last: UNet2DModel + + prior_scheduler: UnCLIPScheduler + decoder_scheduler: UnCLIPScheduler + super_res_scheduler: UnCLIPScheduler + + model_cpu_offload_seq = "text_encoder->text_proj->decoder->super_res_first->super_res_last" + + def __init__( + self, + prior: PriorTransformer, + decoder: UNet2DConditionModel, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_proj: UnCLIPTextProjModel, + super_res_first: UNet2DModel, + super_res_last: UNet2DModel, + prior_scheduler: UnCLIPScheduler, + decoder_scheduler: UnCLIPScheduler, + super_res_scheduler: UnCLIPScheduler, + ): + super().__init__() + + self.register_modules( + prior=prior, + decoder=decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_proj=text_proj, + super_res_first=super_res_first, + super_res_last=super_res_last, + prior_scheduler=prior_scheduler, + decoder_scheduler=decoder_scheduler, + super_res_scheduler=super_res_scheduler, + ) + + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + ): + if text_model_output is None: + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_enc_hid_states = text_encoder_output.last_hidden_state + + else: + batch_size = text_model_output[0].shape[0] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] + text_mask = text_attention_mask + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_enc_hid_states, text_mask + + @torch.no_grad() + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + prior_num_inference_steps: int = 25, + decoder_num_inference_steps: int = 25, + super_res_num_inference_steps: int = 7, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prior_latents: Optional[torch.FloatTensor] = None, + decoder_latents: Optional[torch.FloatTensor] = None, + super_res_latents: Optional[torch.FloatTensor] = None, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + prior_guidance_scale: float = 4.0, + decoder_guidance_scale: float = 8.0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` + and `text_attention_mask` is passed. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + prior_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the prior. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + decoder_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + super_res_num_inference_steps (`int`, *optional*, defaults to 7): + The number of denoising steps for super resolution. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*): + Pre-generated noisy latents to be used as inputs for the prior. + decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + decoder_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + text_model_output (`CLIPTextModelOutput`, *optional*): + Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text + outputs can be passed for tasks like text embedding interpolations. Make sure to also pass + `text_attention_mask` in this case. `prompt` can the be left `None`. + text_attention_mask (`torch.Tensor`, *optional*): + Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention + masks are necessary when passing `text_model_output`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if prompt is not None: + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + else: + batch_size = text_model_output[0].shape[0] + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 + + prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask + ) + + # prior + + self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) + prior_timesteps_tensor = self.prior_scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + prior_latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + prior_latents, + self.prior_scheduler, + ) + + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_enc_hid_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == prior_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = prior_timesteps_tensor[i + 1] + + prior_latents = self.prior_scheduler.step( + predicted_image_embedding, + timestep=t, + sample=prior_latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + prior_latents = self.prior.post_process_latents(prior_latents) + + image_embeddings = prior_latents + + # done prior + + # decoder + + text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( + image_embeddings=image_embeddings, + prompt_embeds=prompt_embeds, + text_encoder_hidden_states=text_enc_hid_states, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + if device.type == "mps": + # HACK: MPS: There is a panic when padding bool tensors, + # so cast to int tensor for the pad and back to bool afterwards + text_mask = text_mask.type(torch.int) + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) + decoder_text_mask = decoder_text_mask.type(torch.bool) + else: + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) + + self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) + decoder_timesteps_tensor = self.decoder_scheduler.timesteps + + num_channels_latents = self.decoder.config.in_channels + height = self.decoder.config.sample_size + width = self.decoder.config.sample_size + + decoder_latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_enc_hid_states.dtype, + device, + generator, + decoder_latents, + self.decoder_scheduler, + ) + + for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents + + noise_pred = self.decoder( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_enc_hid_states, + class_labels=additive_clip_time_embeddings, + attention_mask=decoder_text_mask, + ).sample + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if i + 1 == decoder_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = decoder_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + decoder_latents = self.decoder_scheduler.step( + noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + decoder_latents = decoder_latents.clamp(-1, 1) + + image_small = decoder_latents + + # done decoder + + # super res + + self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) + super_res_timesteps_tensor = self.super_res_scheduler.timesteps + + channels = self.super_res_first.config.in_channels // 2 + height = self.super_res_first.config.sample_size + width = self.super_res_first.config.sample_size + + super_res_latents = self.prepare_latents( + (batch_size, channels, height, width), + image_small.dtype, + device, + generator, + super_res_latents, + self.super_res_scheduler, + ) + + if device.type == "mps": + # MPS does not support many interpolations + image_upscaled = F.interpolate(image_small, size=[height, width]) + else: + interpolate_antialias = {} + if "antialias" in inspect.signature(F.interpolate).parameters: + interpolate_antialias["antialias"] = True + + image_upscaled = F.interpolate( + image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias + ) + + for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): + # no classifier free guidance + + if i == super_res_timesteps_tensor.shape[0] - 1: + unet = self.super_res_last + else: + unet = self.super_res_first + + latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) + + noise_pred = unet( + sample=latent_model_input, + timestep=t, + ).sample + + if i + 1 == super_res_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = super_res_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + super_res_latents = self.super_res_scheduler.step( + noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + image = super_res_latents + # done super res + + self.maybe_free_model_hooks() + + # post processing + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py new file mode 100644 index 000000000..6c646a7df --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py @@ -0,0 +1,420 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Union + +import PIL.Image +import torch +from torch.nn import functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...models import UNet2DConditionModel, UNet2DModel +from ...schedulers import UnCLIPScheduler +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_proj import UnCLIPTextProjModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UnCLIPImageVariationPipeline(DiffusionPipeline): + """ + Pipeline to generate image variations from an input image using UnCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_proj ([`UnCLIPTextProjModel`]): + Utility class to prepare and combine the embeddings before they are passed to the decoder. + decoder ([`UNet2DConditionModel`]): + The decoder to invert the image embedding into an image. + super_res_first ([`UNet2DModel`]): + Super resolution UNet. Used in all but the last step of the super resolution diffusion process. + super_res_last ([`UNet2DModel`]): + Super resolution UNet. Used in the last step of the super resolution diffusion process. + decoder_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). + super_res_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). + """ + + decoder: UNet2DConditionModel + text_proj: UnCLIPTextProjModel + text_encoder: CLIPTextModelWithProjection + tokenizer: CLIPTokenizer + feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + super_res_first: UNet2DModel + super_res_last: UNet2DModel + + decoder_scheduler: UnCLIPScheduler + super_res_scheduler: UnCLIPScheduler + model_cpu_offload_seq = "text_encoder->image_encoder->text_proj->decoder->super_res_first->super_res_last" + + def __init__( + self, + decoder: UNet2DConditionModel, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_proj: UnCLIPTextProjModel, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + super_res_first: UNet2DModel, + super_res_last: UNet2DModel, + decoder_scheduler: UnCLIPScheduler, + super_res_scheduler: UnCLIPScheduler, + ): + super().__init__() + + self.register_modules( + decoder=decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_proj=text_proj, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + super_res_first=super_res_first, + super_res_last=super_res_last, + decoder_scheduler=decoder_scheduler, + super_res_scheduler=super_res_scheduler, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor] = None): + dtype = next(self.image_encoder.parameters()).dtype + + if image_embeddings is None: + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + + image_embeddings = image_embeddings.repeat_interleave(num_images_per_prompt, dim=0) + + return image_embeddings + + @torch.no_grad() + def __call__( + self, + image: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor]] = None, + num_images_per_prompt: int = 1, + decoder_num_inference_steps: int = 25, + super_res_num_inference_steps: int = 7, + generator: Optional[torch.Generator] = None, + decoder_latents: Optional[torch.FloatTensor] = None, + super_res_latents: Optional[torch.FloatTensor] = None, + image_embeddings: Optional[torch.Tensor] = None, + decoder_guidance_scale: float = 8.0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): + `Image` or tensor representing an image batch to be used as the starting point. If you provide a + tensor, it needs to be compatible with the [`CLIPImageProcessor`] + [configuration](https://huggingface.co/fusing/karlo-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + Can be left as `None` only when `image_embeddings` are passed. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + decoder_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + super_res_num_inference_steps (`int`, *optional*, defaults to 7): + The number of denoising steps for super resolution. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + decoder_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + image_embeddings (`torch.Tensor`, *optional*): + Pre-defined image embeddings that can be derived from the image encoder. Pre-defined image embeddings + can be passed for tasks like image interpolations. `image` can be left as `None`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if image is not None: + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + else: + batch_size = image_embeddings.shape[0] + + prompt = [""] * batch_size + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = decoder_guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance + ) + + image_embeddings = self._encode_image(image, device, num_images_per_prompt, image_embeddings) + + # decoder + text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( + image_embeddings=image_embeddings, + prompt_embeds=prompt_embeds, + text_encoder_hidden_states=text_encoder_hidden_states, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + if device.type == "mps": + # HACK: MPS: There is a panic when padding bool tensors, + # so cast to int tensor for the pad and back to bool afterwards + text_mask = text_mask.type(torch.int) + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) + decoder_text_mask = decoder_text_mask.type(torch.bool) + else: + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) + + self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) + decoder_timesteps_tensor = self.decoder_scheduler.timesteps + + num_channels_latents = self.decoder.config.in_channels + height = self.decoder.config.sample_size + width = self.decoder.config.sample_size + + if decoder_latents is None: + decoder_latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + decoder_latents, + self.decoder_scheduler, + ) + + for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents + + noise_pred = self.decoder( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + class_labels=additive_clip_time_embeddings, + attention_mask=decoder_text_mask, + ).sample + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if i + 1 == decoder_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = decoder_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + decoder_latents = self.decoder_scheduler.step( + noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + decoder_latents = decoder_latents.clamp(-1, 1) + + image_small = decoder_latents + + # done decoder + + # super res + + self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) + super_res_timesteps_tensor = self.super_res_scheduler.timesteps + + channels = self.super_res_first.config.in_channels // 2 + height = self.super_res_first.config.sample_size + width = self.super_res_first.config.sample_size + + if super_res_latents is None: + super_res_latents = self.prepare_latents( + (batch_size, channels, height, width), + image_small.dtype, + device, + generator, + super_res_latents, + self.super_res_scheduler, + ) + + if device.type == "mps": + # MPS does not support many interpolations + image_upscaled = F.interpolate(image_small, size=[height, width]) + else: + interpolate_antialias = {} + if "antialias" in inspect.signature(F.interpolate).parameters: + interpolate_antialias["antialias"] = True + + image_upscaled = F.interpolate( + image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias + ) + + for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): + # no classifier free guidance + + if i == super_res_timesteps_tensor.shape[0] - 1: + unet = self.super_res_last + else: + unet = self.super_res_first + + latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) + + noise_pred = unet( + sample=latent_model_input, + timestep=t, + ).sample + + if i + 1 == super_res_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = super_res_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + super_res_latents = self.super_res_scheduler.step( + noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + image = super_res_latents + + # done super res + self.maybe_free_model_hooks() + + # post processing + + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/text_proj.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/text_proj.py new file mode 100644 index 000000000..5a86d0c08 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unclip/text_proj.py @@ -0,0 +1,86 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin + + +class UnCLIPTextProjModel(ModelMixin, ConfigMixin): + """ + Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the + decoder. + + For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1 + """ + + @register_to_config + def __init__( + self, + *, + clip_extra_context_tokens: int = 4, + clip_embeddings_dim: int = 768, + time_embed_dim: int, + cross_attention_dim, + ): + super().__init__() + + self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) + + # parameters for additional clip time embeddings + self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) + self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) + + # parameters for encoder hidden states + self.clip_extra_context_tokens = clip_extra_context_tokens + self.clip_extra_context_tokens_proj = nn.Linear( + clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim + ) + self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) + self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): + if do_classifier_free_guidance: + # Add the classifier free guidance embeddings to the image embeddings + image_embeddings_batch_size = image_embeddings.shape[0] + classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) + classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand( + image_embeddings_batch_size, -1 + ) + image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) + + # The image embeddings batch size and the text embeddings batch size are equal + assert image_embeddings.shape[0] == prompt_embeds.shape[0] + + batch_size = prompt_embeds.shape[0] + + # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and + # adding CLIP embeddings to the existing timestep embedding, ... + time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) + time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) + additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds + + # ... and by projecting CLIP embeddings into four + # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" + clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) + clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) + clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) + + text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) + text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) + text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) + + return text_encoder_hidden_states, additive_clip_time_embeddings diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/__init__.py new file mode 100644 index 000000000..1ac2b09a6 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/__init__.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + ImageTextPipelineOutput, + UniDiffuserPipeline, + ) + + _dummy_objects.update( + {"ImageTextPipelineOutput": ImageTextPipelineOutput, "UniDiffuserPipeline": UniDiffuserPipeline} + ) +else: + _import_structure["modeling_text_decoder"] = ["UniDiffuserTextDecoder"] + _import_structure["modeling_uvit"] = ["UniDiffuserModel", "UTransformer2DModel"] + _import_structure["pipeline_unidiffuser"] = ["ImageTextPipelineOutput", "UniDiffuserPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + ImageTextPipelineOutput, + UniDiffuserPipeline, + ) + else: + from .modeling_text_decoder import UniDiffuserTextDecoder + from .modeling_uvit import UniDiffuserModel, UTransformer2DModel + from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py new file mode 100644 index 000000000..bf0a4eb47 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py @@ -0,0 +1,296 @@ +from typing import Optional + +import numpy as np +import torch +from torch import nn +from transformers import GPT2Config, GPT2LMHeadModel +from transformers.modeling_utils import ModuleUtilsMixin + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin + + +# Modified from ClipCaptionModel in https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py +class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + """ + Text decoder model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is used to + generate text from the UniDiffuser image-text embedding. + + Parameters: + prefix_length (`int`): + Max number of prefix tokens that will be supplied to the model. + prefix_inner_dim (`int`): + The hidden size of the incoming prefix embeddings. For UniDiffuser, this would be the hidden dim of the + CLIP text encoder. + prefix_hidden_dim (`int`, *optional*): + Hidden dim of the MLP if we encode the prefix. + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. + n_positions (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + n_inner (`int`, *optional*, defaults to None): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_attn_weights (`bool`, *optional*, defaults to `True`): + Scale attention weights by dividing by sqrt(hidden_size).. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): + Whether to additionally scale attention weights by `1 / layer_idx + 1`. + reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): + Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention + dot-product/softmax to float() when training with mixed precision. + """ + + _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] + + @register_to_config + def __init__( + self, + prefix_length: int, + prefix_inner_dim: int, + prefix_hidden_dim: Optional[int] = None, + vocab_size: int = 50257, # Start of GPT2 config args + n_positions: int = 1024, + n_embd: int = 768, + n_layer: int = 12, + n_head: int = 12, + n_inner: Optional[int] = None, + activation_function: str = "gelu_new", + resid_pdrop: float = 0.1, + embd_pdrop: float = 0.1, + attn_pdrop: float = 0.1, + layer_norm_epsilon: float = 1e-5, + initializer_range: float = 0.02, + scale_attn_weights: bool = True, + use_cache: bool = True, + scale_attn_by_inverse_layer_idx: bool = False, + reorder_and_upcast_attn: bool = False, + ): + super().__init__() + + self.prefix_length = prefix_length + + if prefix_inner_dim != n_embd and prefix_hidden_dim is None: + raise ValueError( + f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" + f" `n_embd`: {n_embd} are not equal." + ) + + self.prefix_inner_dim = prefix_inner_dim + self.prefix_hidden_dim = prefix_hidden_dim + + self.encode_prefix = ( + nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim) + if self.prefix_hidden_dim is not None + else nn.Identity() + ) + self.decode_prefix = ( + nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity() + ) + + gpt_config = GPT2Config( + vocab_size=vocab_size, + n_positions=n_positions, + n_embd=n_embd, + n_layer=n_layer, + n_head=n_head, + n_inner=n_inner, + activation_function=activation_function, + resid_pdrop=resid_pdrop, + embd_pdrop=embd_pdrop, + attn_pdrop=attn_pdrop, + layer_norm_epsilon=layer_norm_epsilon, + initializer_range=initializer_range, + scale_attn_weights=scale_attn_weights, + use_cache=use_cache, + scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, + reorder_and_upcast_attn=reorder_and_upcast_attn, + ) + self.transformer = GPT2LMHeadModel(gpt_config) + + def forward( + self, + input_ids: torch.Tensor, + prefix_embeds: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + ): + """ + Args: + input_ids (`torch.Tensor` of shape `(N, max_seq_len)`): + Text tokens to use for inference. + prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`): + Prefix embedding to preprend to the embedded tokens. + attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*): + Attention mask for the prefix embedding. + labels (`torch.Tensor`, *optional*): + Labels to use for language modeling. + """ + embedding_text = self.transformer.transformer.wte(input_ids) + hidden = self.encode_prefix(prefix_embeds) + prefix_embeds = self.decode_prefix(hidden) + embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1) + + if labels is not None: + dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device) + labels = torch.cat((dummy_token, input_ids), dim=1) + out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask) + if self.prefix_hidden_dim is not None: + return out, hidden + else: + return out + + def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor: + return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device) + + def encode(self, prefix): + return self.encode_prefix(prefix) + + @torch.no_grad() + def generate_captions(self, features, eos_token_id, device): + """ + Generate captions given text embedding features. Returns list[L]. + + Args: + features (`torch.Tensor` of shape `(B, L, D)`): + Text embedding features to generate captions from. + eos_token_id (`int`): + The token ID of the EOS token for the text decoder model. + device: + Device to perform text generation on. + + Returns: + `List[str]`: A list of strings generated from the decoder model. + """ + + features = torch.split(features, 1, dim=0) + generated_tokens = [] + generated_seq_lengths = [] + for feature in features: + feature = self.decode_prefix(feature.to(device)) # back to the clip feature + # Only support beam search for now + output_tokens, seq_lengths = self.generate_beam( + input_embeds=feature, device=device, eos_token_id=eos_token_id + ) + generated_tokens.append(output_tokens[0]) + generated_seq_lengths.append(seq_lengths[0]) + generated_tokens = torch.stack(generated_tokens) + generated_seq_lengths = torch.stack(generated_seq_lengths) + return generated_tokens, generated_seq_lengths + + @torch.no_grad() + def generate_beam( + self, + input_ids=None, + input_embeds=None, + device=None, + beam_size: int = 5, + entry_length: int = 67, + temperature: float = 1.0, + eos_token_id: Optional[int] = None, + ): + """ + Generates text using the given tokenizer and text prompt or token embedding via beam search. This + implementation is based on the beam search implementation from the [original UniDiffuser + code](https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py#L89). + + Args: + eos_token_id (`int`, *optional*): + The token ID of the EOS token for the text decoder model. + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Tokenizer indices of input sequence tokens in the vocabulary. One of `input_ids` and `input_embeds` + must be supplied. + input_embeds (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + An embedded representation to directly pass to the transformer as a prefix for beam search. One of + `input_ids` and `input_embeds` must be supplied. + device: + The device to perform beam search on. + beam_size (`int`, *optional*, defaults to `5`): + The number of best states to store during beam search. + entry_length (`int`, *optional*, defaults to `67`): + The number of iterations to run beam search. + temperature (`float`, *optional*, defaults to 1.0): + The temperature to use when performing the softmax over logits from the decoding model. + + Returns: + `Tuple(torch.Tensor, torch.Tensor)`: A tuple of tensors where the first element is a tensor of generated + token sequences sorted by score in descending order, and the second element is the sequence lengths + corresponding to those sequences. + """ + # Generates text until stop_token is reached using beam search with the desired beam size. + stop_token_index = eos_token_id + tokens = None + scores = None + seq_lengths = torch.ones(beam_size, device=device, dtype=torch.int) + is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool) + + if input_embeds is not None: + generated = input_embeds + else: + generated = self.transformer.transformer.wte(input_ids) + + for i in range(entry_length): + outputs = self.transformer(inputs_embeds=generated) + logits = outputs.logits + logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) + logits = logits.softmax(-1).log() + + if scores is None: + scores, next_tokens = logits.topk(beam_size, -1) + generated = generated.expand(beam_size, *generated.shape[1:]) + next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0) + if tokens is None: + tokens = next_tokens + else: + tokens = tokens.expand(beam_size, *tokens.shape[1:]) + tokens = torch.cat((tokens, next_tokens), dim=1) + else: + logits[is_stopped] = -float(np.inf) + logits[is_stopped, 0] = 0 + scores_sum = scores[:, None] + logits + seq_lengths[~is_stopped] += 1 + scores_sum_average = scores_sum / seq_lengths[:, None] + scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1) + next_tokens_source = next_tokens // scores_sum.shape[1] + seq_lengths = seq_lengths[next_tokens_source] + next_tokens = next_tokens % scores_sum.shape[1] + next_tokens = next_tokens.unsqueeze(1) + tokens = tokens[next_tokens_source] + tokens = torch.cat((tokens, next_tokens), dim=1) + generated = generated[next_tokens_source] + scores = scores_sum_average * seq_lengths + is_stopped = is_stopped[next_tokens_source] + + next_token_embed = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1) + generated = torch.cat((generated, next_token_embed), dim=1) + is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze() + if is_stopped.all(): + break + + scores = scores / seq_lengths + order = scores.argsort(descending=True) + # tokens tensors are already padded to max_seq_length + output_texts = [tokens[i] for i in order] + output_texts = torch.stack(output_texts, dim=0) + seq_lengths = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype) + return output_texts, seq_lengths diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_uvit.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_uvit.py new file mode 100644 index 000000000..c074b9916 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/modeling_uvit.py @@ -0,0 +1,1197 @@ +import math +from typing import Optional, Union + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin +from ...models.attention import FeedForward +from ...models.attention_processor import Attention +from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed +from ...models.normalization import AdaLayerNorm +from ...models.transformers.transformer_2d import Transformer2DModelOutput +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + logger.warning( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect." + ) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # type: (torch.Tensor, float, float, float, float) -> torch.Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, + \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for + generating the random values works best when :math:`a \leq \text{mean} \leq b`. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +class PatchEmbed(nn.Module): + """2D Image to Patch Embedding""" + + def __init__( + self, + height=224, + width=224, + patch_size=16, + in_channels=3, + embed_dim=768, + layer_norm=False, + flatten=True, + bias=True, + use_pos_embed=True, + ): + super().__init__() + + num_patches = (height // patch_size) * (width // patch_size) + self.flatten = flatten + self.layer_norm = layer_norm + + self.proj = nn.Conv2d( + in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias + ) + if layer_norm: + self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) + else: + self.norm = None + + self.use_pos_embed = use_pos_embed + if self.use_pos_embed: + pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5)) + self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) + + def forward(self, latent): + latent = self.proj(latent) + if self.flatten: + latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC + if self.layer_norm: + latent = self.norm(latent) + if self.use_pos_embed: + return latent + self.pos_embed + else: + return latent + + +class SkipBlock(nn.Module): + def __init__(self, dim: int): + super().__init__() + + self.skip_linear = nn.Linear(2 * dim, dim) + + # Use torch.nn.LayerNorm for now, following the original code + self.norm = nn.LayerNorm(dim) + + def forward(self, x, skip): + x = self.skip_linear(torch.cat([x, skip], dim=-1)) + x = self.norm(x) + + return x + + +# Modified to support both pre-LayerNorm and post-LayerNorm configurations +# Don't support AdaLayerNormZero for now +# Modified from diffusers.models.attention.BasicTransformerBlock +class UTransformerBlock(nn.Module): + r""" + A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): + Activation function to be used in feed-forward. + num_embeds_ada_norm (:obj: `int`, *optional*): + The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (:obj: `bool`, *optional*, defaults to `False`): + Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float32 when performing the attention calculation. + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + norm_type (`str`, defaults to `"layer_norm"`): + The layer norm implementation to use. + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g. + `pre_layer_norm = True`. + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", + pre_layer_norm: bool = True, + final_dropout: bool = False, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + + self.pre_layer_norm = pre_layer_norm + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + # 1. Self-Attn + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + ) # is self-attn if encoder_hidden_states is none + else: + self.attn2 = None + + if self.use_ada_layer_norm: + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = ( + AdaLayerNorm(dim, num_embeds_ada_norm) + if self.use_ada_layer_norm + else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + ) + else: + self.norm2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + timestep=None, + cross_attention_kwargs=None, + class_labels=None, + ): + # Pre-LayerNorm + if self.pre_layer_norm: + if self.use_ada_layer_norm: + norm_hidden_states = self.norm1(hidden_states, timestep) + else: + norm_hidden_states = self.norm1(hidden_states) + else: + norm_hidden_states = hidden_states + + # 1. Self-Attention + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + # Post-LayerNorm + if not self.pre_layer_norm: + if self.use_ada_layer_norm: + attn_output = self.norm1(attn_output, timestep) + else: + attn_output = self.norm1(attn_output) + + hidden_states = attn_output + hidden_states + + if self.attn2 is not None: + # Pre-LayerNorm + if self.pre_layer_norm: + norm_hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + else: + norm_hidden_states = hidden_states + # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly + # prepare attention mask here + + # 2. Cross-Attention + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + + # Post-LayerNorm + if not self.pre_layer_norm: + attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) + + hidden_states = attn_output + hidden_states + + # 3. Feed-forward + # Pre-LayerNorm + if self.pre_layer_norm: + norm_hidden_states = self.norm3(hidden_states) + else: + norm_hidden_states = hidden_states + + ff_output = self.ff(norm_hidden_states) + + # Post-LayerNorm + if not self.pre_layer_norm: + ff_output = self.norm3(ff_output) + + hidden_states = ff_output + hidden_states + + return hidden_states + + +# Like UTransformerBlock except with LayerNorms on the residual backbone of the block +# Modified from diffusers.models.attention.BasicTransformerBlock +class UniDiffuserBlock(nn.Module): + r""" + A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the + LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser + implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104). + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): + Activation function to be used in feed-forward. + num_embeds_ada_norm (:obj: `int`, *optional*): + The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (:obj: `bool`, *optional*, defaults to `False`): + Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float() when performing the attention calculation. + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + norm_type (`str`, defaults to `"layer_norm"`): + The layer norm implementation to use. + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", + pre_layer_norm: bool = False, + final_dropout: bool = True, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + + self.pre_layer_norm = pre_layer_norm + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + # 1. Self-Attn + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + ) # is self-attn if encoder_hidden_states is none + else: + self.attn2 = None + + if self.use_ada_layer_norm: + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = ( + AdaLayerNorm(dim, num_embeds_ada_norm) + if self.use_ada_layer_norm + else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + ) + else: + self.norm2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + timestep=None, + cross_attention_kwargs=None, + class_labels=None, + ): + # Following the diffusers transformer block implementation, put the LayerNorm on the + # residual backbone + # Pre-LayerNorm + if self.pre_layer_norm: + if self.use_ada_layer_norm: + hidden_states = self.norm1(hidden_states, timestep) + else: + hidden_states = self.norm1(hidden_states) + + # 1. Self-Attention + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + attn_output = self.attn1( + hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + # Following the diffusers transformer block implementation, put the LayerNorm on the + # residual backbone + # Post-LayerNorm + if not self.pre_layer_norm: + if self.use_ada_layer_norm: + hidden_states = self.norm1(hidden_states, timestep) + else: + hidden_states = self.norm1(hidden_states) + + if self.attn2 is not None: + # Pre-LayerNorm + if self.pre_layer_norm: + hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly + # prepare attention mask here + + # 2. Cross-Attention + attn_output = self.attn2( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + # Post-LayerNorm + if not self.pre_layer_norm: + hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + + # 3. Feed-forward + # Pre-LayerNorm + if self.pre_layer_norm: + hidden_states = self.norm3(hidden_states) + + ff_output = self.ff(hidden_states) + + hidden_states = ff_output + hidden_states + + # Post-LayerNorm + if not self.pre_layer_norm: + hidden_states = self.norm3(hidden_states) + + return hidden_states + + +# Modified from diffusers.models.transformer_2d.Transformer2DModel +# Modify the transformer block structure to be U-Net like following U-ViT +# Only supports patch-style input and torch.nn.LayerNorm currently +# https://github.com/baofff/U-ViT +class UTransformer2DModel(ModelMixin, ConfigMixin): + """ + Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared + to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion, + similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`] + layer and then reshaped to (b, t, d). + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + Pass if the input is continuous. The number of channels in the input. + out_channels (`int`, *optional*): + The number of output channels; if `None`, defaults to `in_channels`. + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + norm_num_groups (`int`, *optional*, defaults to `32`): + The number of groups to use when performing Group Normalization. + cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. + attention_bias (`bool`, *optional*): + Configure if the TransformerBlocks' attention should contain a bias parameter. + sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. + Note that this is fixed at training time as it is used for learning a number of position embeddings. See + `ImagePositionalEmbeddings`. + num_vector_embeds (`int`, *optional*): + Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. + Includes the class for the masked latent pixel. + patch_size (`int`, *optional*, defaults to 2): + The patch size to use in the patch embedding. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. + The number of diffusion steps used during training. Note that this is fixed at training time as it is used + to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for + up to but not more than steps than `num_embeds_ada_norm`. + use_linear_projection (int, *optional*): TODO: Not used + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used in each + transformer block. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float() when performing the attention calculation. + norm_type (`str`, *optional*, defaults to `"layer_norm"`): + The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. + block_type (`str`, *optional*, defaults to `"unidiffuser"`): + The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual + backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard + behavior in `diffusers`.) + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + use_patch_pos_embed (`bool`, *optional*): + Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + @register_to_config + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + patch_size: Optional[int] = 2, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + norm_type: str = "layer_norm", + block_type: str = "unidiffuser", + pre_layer_norm: bool = False, + norm_elementwise_affine: bool = True, + use_patch_pos_embed=False, + ff_final_dropout: bool = False, + ): + super().__init__() + self.use_linear_projection = use_linear_projection + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + + # 1. Input + # Only support patch input of shape (batch_size, num_channels, height, width) for now + assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size." + + assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size" + + # 2. Define input layers + self.height = sample_size + self.width = sample_size + + self.patch_size = patch_size + self.pos_embed = PatchEmbed( + height=sample_size, + width=sample_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=inner_dim, + use_pos_embed=use_patch_pos_embed, + ) + + # 3. Define transformers blocks + # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block, + # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in + # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.). + # Quick hack to make the transformer block type configurable + if block_type == "unidiffuser": + block_cls = UniDiffuserBlock + else: + block_cls = UTransformerBlock + self.transformer_in_blocks = nn.ModuleList( + [ + block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ) + for d in range(num_layers // 2) + ] + ) + + self.transformer_mid_block = block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ) + + # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs + # before each transformer out_block. + self.transformer_out_blocks = nn.ModuleList( + [ + nn.ModuleDict( + { + "skip": SkipBlock( + inner_dim, + ), + "block": block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ), + } + ) + for d in range(num_layers // 2) + ] + ) + + # 4. Define output layers + self.out_channels = in_channels if out_channels is None else out_channels + + # Following the UniDiffuser U-ViT implementation, we process the transformer output with + # a LayerNorm layer with per-element affine params + self.norm_out = nn.LayerNorm(inner_dim) + + def forward( + self, + hidden_states, + encoder_hidden_states=None, + timestep=None, + class_labels=None, + cross_attention_kwargs=None, + return_dict: bool = True, + hidden_states_is_embedding: bool = False, + unpatchify: bool = True, + ): + """ + Args: + hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. + When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input + hidden_states + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.long`, *optional*): + Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. + class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): + Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels + conditioning. + cross_attention_kwargs (*optional*): + Keyword arguments to supply to the cross attention layers, if used. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. + hidden_states_is_embedding (`bool`, *optional*, defaults to `False`): + Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will + ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the + transformer blocks. + unpatchify (`bool`, *optional*, defaults to `True`): + Whether to unpatchify the transformer output. + + Returns: + [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: + [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + """ + # 0. Check inputs + + if not unpatchify and return_dict: + raise ValueError( + f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when" + f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)" + " rather than (batch_size, num_channels, height, width)." + ) + + # 1. Input + if not hidden_states_is_embedding: + hidden_states = self.pos_embed(hidden_states) + + # 2. Blocks + + # In ("downsample") blocks + skips = [] + for in_block in self.transformer_in_blocks: + hidden_states = in_block( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + skips.append(hidden_states) + + # Mid block + hidden_states = self.transformer_mid_block(hidden_states) + + # Out ("upsample") blocks + for out_block in self.transformer_out_blocks: + hidden_states = out_block["skip"](hidden_states, skips.pop()) + hidden_states = out_block["block"]( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + + # 3. Output + # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic + hidden_states = self.norm_out(hidden_states) + # hidden_states = self.proj_out(hidden_states) + + if unpatchify: + # unpatchify + height = width = int(hidden_states.shape[1] ** 0.5) + hidden_states = hidden_states.reshape( + shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) + ) + hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) + output = hidden_states.reshape( + shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) + ) + else: + output = hidden_states + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) + + +class UniDiffuserModel(ModelMixin, ConfigMixin): + """ + Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a + modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the + CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details). + + Parameters: + text_dim (`int`): The hidden dimension of the CLIP text model used to embed images. + clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts. + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + Pass if the input is continuous. The number of channels in the input. + out_channels (`int`, *optional*): + The number of output channels; if `None`, defaults to `in_channels`. + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + norm_num_groups (`int`, *optional*, defaults to `32`): + The number of groups to use when performing Group Normalization. + cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. + attention_bias (`bool`, *optional*): + Configure if the TransformerBlocks' attention should contain a bias parameter. + sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. + Note that this is fixed at training time as it is used for learning a number of position embeddings. See + `ImagePositionalEmbeddings`. + num_vector_embeds (`int`, *optional*): + Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. + Includes the class for the masked latent pixel. + patch_size (`int`, *optional*, defaults to 2): + The patch size to use in the patch embedding. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. + The number of diffusion steps used during training. Note that this is fixed at training time as it is used + to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for + up to but not more than steps than `num_embeds_ada_norm`. + use_linear_projection (int, *optional*): TODO: Not used + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used in each + transformer block. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float32 when performing the attention calculation. + norm_type (`str`, *optional*, defaults to `"layer_norm"`): + The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. + block_type (`str`, *optional*, defaults to `"unidiffuser"`): + The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual + backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard + behavior in `diffusers`.) + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + use_patch_pos_embed (`bool`, *optional*): + Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). + ff_final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + use_data_type_embedding (`bool`, *optional*): + Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1 + is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type` + argument, which can either be `1` to use the weights trained on non-publically-available data or `0` + otherwise. This argument is subsequently embedded by the data type embedding, if used. + """ + + @register_to_config + def __init__( + self, + text_dim: int = 768, + clip_img_dim: int = 512, + num_text_tokens: int = 77, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + patch_size: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + norm_type: str = "layer_norm", + block_type: str = "unidiffuser", + pre_layer_norm: bool = False, + use_timestep_embedding=False, + norm_elementwise_affine: bool = True, + use_patch_pos_embed=False, + ff_final_dropout: bool = True, + use_data_type_embedding: bool = False, + ): + super().__init__() + + # 0. Handle dimensions + self.inner_dim = num_attention_heads * attention_head_dim + + assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size" + self.sample_size = sample_size + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + + self.patch_size = patch_size + # Assume image is square... + self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size) + + # 1. Define input layers + # 1.1 Input layers for text and image input + # For now, only support patch input for VAE latent image input + self.vae_img_in = PatchEmbed( + height=sample_size, + width=sample_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=self.inner_dim, + use_pos_embed=use_patch_pos_embed, + ) + self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) + self.text_in = nn.Linear(text_dim, self.inner_dim) + + # 1.2. Timestep embeddings for t_img, t_text + self.timestep_img_proj = Timesteps( + self.inner_dim, + flip_sin_to_cos=True, + downscale_freq_shift=0, + ) + self.timestep_img_embed = ( + TimestepEmbedding( + self.inner_dim, + 4 * self.inner_dim, + out_dim=self.inner_dim, + ) + if use_timestep_embedding + else nn.Identity() + ) + + self.timestep_text_proj = Timesteps( + self.inner_dim, + flip_sin_to_cos=True, + downscale_freq_shift=0, + ) + self.timestep_text_embed = ( + TimestepEmbedding( + self.inner_dim, + 4 * self.inner_dim, + out_dim=self.inner_dim, + ) + if use_timestep_embedding + else nn.Identity() + ) + + # 1.3. Positional embedding + self.num_text_tokens = num_text_tokens + self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) + self.pos_embed_drop = nn.Dropout(p=dropout) + trunc_normal_(self.pos_embed, std=0.02) + + # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary + self.use_data_type_embedding = use_data_type_embedding + if self.use_data_type_embedding: + self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) + self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) + + # 2. Define transformer blocks + self.transformer = UTransformer2DModel( + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + in_channels=in_channels, + out_channels=out_channels, + num_layers=num_layers, + dropout=dropout, + norm_num_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + attention_bias=attention_bias, + sample_size=sample_size, + num_vector_embeds=num_vector_embeds, + patch_size=patch_size, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + block_type=block_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + use_patch_pos_embed=use_patch_pos_embed, + ff_final_dropout=ff_final_dropout, + ) + + # 3. Define output layers + patch_dim = (patch_size**2) * out_channels + self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) + self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) + self.text_out = nn.Linear(self.inner_dim, text_dim) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed"} + + def forward( + self, + latent_image_embeds: torch.FloatTensor, + image_embeds: torch.FloatTensor, + prompt_embeds: torch.FloatTensor, + timestep_img: Union[torch.Tensor, float, int], + timestep_text: Union[torch.Tensor, float, int], + data_type: Optional[Union[torch.Tensor, float, int]] = 1, + encoder_hidden_states=None, + cross_attention_kwargs=None, + ): + """ + Args: + latent_image_embeds (`torch.FloatTensor` of shape `(batch size, latent channels, height, width)`): + Latent image representation from the VAE encoder. + image_embeds (`torch.FloatTensor` of shape `(batch size, 1, clip_img_dim)`): + CLIP-embedded image representation (unsqueezed in the first dimension). + prompt_embeds (`torch.FloatTensor` of shape `(batch size, seq_len, text_dim)`): + CLIP-embedded text representation. + timestep_img (`torch.long` or `float` or `int`): + Current denoising step for the image. + timestep_text (`torch.long` or `float` or `int`): + Current denoising step for the text. + data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`): + Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data, + or `0` otherwise. + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + cross_attention_kwargs (*optional*): + Keyword arguments to supply to the cross attention layers, if used. + + + Returns: + `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE + image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text + embedding. + """ + batch_size = latent_image_embeds.shape[0] + + # 1. Input + # 1.1. Map inputs to shape (B, N, inner_dim) + vae_hidden_states = self.vae_img_in(latent_image_embeds) + clip_hidden_states = self.clip_img_in(image_embeds) + text_hidden_states = self.text_in(prompt_embeds) + + num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1) + + # 1.2. Encode image timesteps to single token (B, 1, inner_dim) + if not torch.is_tensor(timestep_img): + timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) + + timestep_img_token = self.timestep_img_proj(timestep_img) + # t_img_token does not contain any weights and will always return f32 tensors + # but time_embedding might be fp16, so we need to cast here. + timestep_img_token = timestep_img_token.to(dtype=self.dtype) + timestep_img_token = self.timestep_img_embed(timestep_img_token) + timestep_img_token = timestep_img_token.unsqueeze(dim=1) + + # 1.3. Encode text timesteps to single token (B, 1, inner_dim) + if not torch.is_tensor(timestep_text): + timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) + + timestep_text_token = self.timestep_text_proj(timestep_text) + # t_text_token does not contain any weights and will always return f32 tensors + # but time_embedding might be fp16, so we need to cast here. + timestep_text_token = timestep_text_token.to(dtype=self.dtype) + timestep_text_token = self.timestep_text_embed(timestep_text_token) + timestep_text_token = timestep_text_token.unsqueeze(dim=1) + + # 1.4. Concatenate all of the embeddings together. + if self.use_data_type_embedding: + assert data_type is not None, "data_type must be supplied if the model uses a data type embedding" + if not torch.is_tensor(data_type): + data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) + + data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) + hidden_states = torch.cat( + [ + timestep_img_token, + timestep_text_token, + data_type_token, + text_hidden_states, + clip_hidden_states, + vae_hidden_states, + ], + dim=1, + ) + else: + hidden_states = torch.cat( + [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], + dim=1, + ) + + # 1.5. Prepare the positional embeddings and add to hidden states + # Note: I think img_vae should always have the proper shape, so there's no need to interpolate + # the position embeddings. + if self.use_data_type_embedding: + pos_embed = torch.cat( + [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1 + ) + else: + pos_embed = self.pos_embed + hidden_states = hidden_states + pos_embed + hidden_states = self.pos_embed_drop(hidden_states) + + # 2. Blocks + hidden_states = self.transformer( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=None, + class_labels=None, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + hidden_states_is_embedding=True, + unpatchify=False, + )[0] + + # 3. Output + # Split out the predicted noise representation. + if self.use_data_type_embedding: + ( + t_img_token_out, + t_text_token_out, + data_type_token_out, + text_out, + img_clip_out, + img_vae_out, + ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) + else: + t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split( + (1, 1, num_text_tokens, 1, num_img_tokens), dim=1 + ) + + img_vae_out = self.vae_img_out(img_vae_out) + + # unpatchify + height = width = int(img_vae_out.shape[1] ** 0.5) + img_vae_out = img_vae_out.reshape( + shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) + ) + img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out) + img_vae_out = img_vae_out.reshape( + shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) + ) + + img_clip_out = self.clip_img_out(img_clip_out) + + text_out = self.text_out(text_out) + + return img_vae_out, img_clip_out, text_out diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py new file mode 100644 index 000000000..5d61b1054 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py @@ -0,0 +1,1419 @@ +import inspect +from dataclasses import dataclass +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, + GPT2Tokenizer, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.outputs import BaseOutput +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .modeling_text_decoder import UniDiffuserTextDecoder +from .modeling_uvit import UniDiffuserModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# New BaseOutput child class for joint image-text output +@dataclass +class ImageTextPipelineOutput(BaseOutput): + """ + Output class for joint image-text pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + text (`List[str]` or `List[List[str]]`) + List of generated text strings of length `batch_size` or a list of list of strings whose outer list has + length `batch_size`. + """ + + images: Optional[Union[List[PIL.Image.Image], np.ndarray]] + text: Optional[Union[List[str], List[List[str]]]] + + +class UniDiffuserPipeline(DiffusionPipeline): + r""" + Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned + image generation, image-conditioned text generation, and joint image-text generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This + is part of the UniDiffuser image representation along with the CLIP vision encoding. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + image_encoder ([`CLIPVisionModel`]): + A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE + latent representation. + image_processor ([`CLIPImageProcessor`]): + [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`. + clip_tokenizer ([`CLIPTokenizer`]): + A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`. + text_decoder ([`UniDiffuserTextDecoder`]): + Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser + embedding. + text_tokenizer ([`GPT2Tokenizer`]): + A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`. + unet ([`UniDiffuserModel`]): + A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer + layers to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The + original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler. + """ + + # TODO: support for moving submodules for components with enable_model_cpu_offload + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae->text_decoder" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + image_encoder: CLIPVisionModelWithProjection, + clip_image_processor: CLIPImageProcessor, + clip_tokenizer: CLIPTokenizer, + text_decoder: UniDiffuserTextDecoder, + text_tokenizer: GPT2Tokenizer, + unet: UniDiffuserModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: + raise ValueError( + f"The text encoder hidden size and text decoder prefix inner dim must be the same, but" + f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}" + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + image_encoder=image_encoder, + clip_image_processor=clip_image_processor, + clip_tokenizer=clip_tokenizer, + text_decoder=text_decoder, + text_tokenizer=text_tokenizer, + unet=unet, + scheduler=scheduler, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.num_channels_latents = vae.config.latent_channels + self.text_encoder_seq_len = text_encoder.config.max_position_embeddings + self.text_encoder_hidden_size = text_encoder.config.hidden_size + self.image_encoder_projection_dim = image_encoder.config.projection_dim + self.unet_resolution = unet.config.sample_size + + self.text_intermediate_dim = self.text_encoder_hidden_size + if self.text_decoder.prefix_hidden_dim is not None: + self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim + + self.mode = None + + # TODO: handle safety checking? + self.safety_checker = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): + r""" + Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set + mode will be used. + """ + prompt_available = (prompt is not None) or (prompt_embeds is not None) + image_available = image is not None + input_available = prompt_available or image_available + + prompt_latents_available = prompt_latents is not None + vae_latents_available = vae_latents is not None + clip_latents_available = clip_latents is not None + full_latents_available = latents is not None + image_latents_available = vae_latents_available and clip_latents_available + all_indv_latents_available = prompt_latents_available and image_latents_available + + if self.mode is not None: + # Preferentially use the mode set by the user + mode = self.mode + elif prompt_available: + mode = "text2img" + elif image_available: + mode = "img2text" + else: + # Neither prompt nor image supplied, infer based on availability of latents + if full_latents_available or all_indv_latents_available: + mode = "joint" + elif prompt_latents_available: + mode = "text" + elif image_latents_available: + mode = "img" + else: + # No inputs or latents available + mode = "joint" + + # Give warnings for ambiguous cases + if self.mode is None and prompt_available and image_available: + logger.warning( + f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually," + f" defaulting to mode '{mode}'." + ) + + if self.mode is None and not input_available: + if vae_latents_available != clip_latents_available: + # Exactly one of vae_latents and clip_latents is supplied + logger.warning( + f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none" + f" are expected to be supplied. Defaulting to mode '{mode}'." + ) + elif not prompt_latents_available and not vae_latents_available and not clip_latents_available: + # No inputs or latents supplied + logger.warning( + f"No inputs or latents have been supplied, and mode has not been manually set," + f" defaulting to mode '{mode}'." + ) + + return mode + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + # Functions to manually set the mode + def set_text_mode(self): + r"""Manually set the generation mode to unconditional ("marginal") text generation.""" + self.mode = "text" + + def set_image_mode(self): + r"""Manually set the generation mode to unconditional ("marginal") image generation.""" + self.mode = "img" + + def set_text_to_image_mode(self): + r"""Manually set the generation mode to text-conditioned image generation.""" + self.mode = "text2img" + + def set_image_to_text_mode(self): + r"""Manually set the generation mode to image-conditioned text generation.""" + self.mode = "img2text" + + def set_joint_mode(self): + r"""Manually set the generation mode to unconditional joint image-text generation.""" + self.mode = "joint" + + def reset_mode(self): + r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs.""" + self.mode = None + + def _infer_batch_size( + self, + mode, + prompt, + prompt_embeds, + image, + num_images_per_prompt, + num_prompts_per_image, + latents, + prompt_latents, + vae_latents, + clip_latents, + ): + r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`.""" + if num_images_per_prompt is None: + num_images_per_prompt = 1 + if num_prompts_per_image is None: + num_prompts_per_image = 1 + + assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer" + assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer" + + if mode in ["text2img"]: + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + # Either prompt or prompt_embeds must be present for text2img. + batch_size = prompt_embeds.shape[0] + multiplier = num_images_per_prompt + elif mode in ["img2text"]: + if isinstance(image, PIL.Image.Image): + batch_size = 1 + else: + # Image must be available and type either PIL.Image.Image or torch.FloatTensor. + # Not currently supporting something like image_embeds. + batch_size = image.shape[0] + multiplier = num_prompts_per_image + elif mode in ["img"]: + if vae_latents is not None: + batch_size = vae_latents.shape[0] + elif clip_latents is not None: + batch_size = clip_latents.shape[0] + else: + batch_size = 1 + multiplier = num_images_per_prompt + elif mode in ["text"]: + if prompt_latents is not None: + batch_size = prompt_latents.shape[0] + else: + batch_size = 1 + multiplier = num_prompts_per_image + elif mode in ["joint"]: + if latents is not None: + batch_size = latents.shape[0] + elif prompt_latents is not None: + batch_size = prompt_latents.shape[0] + elif vae_latents is not None: + batch_size = vae_latents.shape[0] + elif clip_latents is not None: + batch_size = clip_latents.shape[0] + else: + batch_size = 1 + + if num_images_per_prompt == num_prompts_per_image: + multiplier = num_images_per_prompt + else: + multiplier = min(num_images_per_prompt, num_prompts_per_image) + logger.warning( + f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and" + f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to" + f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}." + ) + return batch_size, multiplier + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with self.tokenizer->self.clip_tokenizer + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.clip_tokenizer) + + text_inputs = self.clip_tokenizer( + prompt, + padding="max_length", + max_length=self.clip_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.clip_tokenizer.batch_decode( + untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.clip_tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.clip_tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents + # Add num_prompts_per_image argument, sample from autoencoder moment distribution + def encode_image_vae_latents( + self, + image, + batch_size, + num_prompts_per_image, + dtype, + device, + do_classifier_free_guidance, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_prompts_per_image + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + image_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) + * self.vae.config.scaling_factor + for i in range(batch_size) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) + # Scale image_latents by the VAE's scaling factor + image_latents = image_latents * self.vae.config.scaling_factor + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + return image_latents + + def encode_image_clip_latents( + self, + image, + batch_size, + num_prompts_per_image, + dtype, + device, + generator=None, + ): + # Map image to CLIP embedding. + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + preprocessed_image = self.clip_image_processor.preprocess( + image, + return_tensors="pt", + ) + preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_prompts_per_image + if isinstance(generator, list): + image_latents = [ + self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.image_encoder(**preprocessed_image).image_embeds + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + return image_latents + + def prepare_text_latents( + self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None + ): + # Prepare latents for the CLIP embedded prompt. + shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shace (B, L, D) + latents = latents.repeat(num_images_per_prompt, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument. + def prepare_image_vae_latents( + self, + batch_size, + num_prompts_per_image, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + shape = ( + batch_size * num_prompts_per_image, + num_channels_latents, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shape (B, C, H, W) + latents = latents.repeat(num_prompts_per_image, 1, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_clip_latents( + self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None + ): + # Prepare latents for the CLIP embedded image. + shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shape (B, L, D) + latents = latents.repeat(num_prompts_per_image, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def decode_text_latents(self, text_latents, device): + output_token_list, seq_lengths = self.text_decoder.generate_captions( + text_latents, self.text_tokenizer.eos_token_id, device=device + ) + output_list = output_token_list.cpu().numpy() + generated_text = [ + self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) + for output, length in zip(output_list, seq_lengths) + ] + return generated_text + + def _split(self, x, height, width): + r""" + Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W) + and (B, 1, clip_img_dim) + """ + batch_size = x.shape[0] + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + img_vae_dim = self.num_channels_latents * latent_height * latent_width + + img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) + + img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) + img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) + return img_vae, img_clip + + def _combine(self, img_vae, img_clip): + r""" + Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, + clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). + """ + img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) + img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) + return torch.concat([img_vae, img_clip], dim=-1) + + def _split_joint(self, x, height, width): + r""" + Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae, + img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is + of shape (B, text_seq_len, text_dim). + """ + batch_size = x.shape[0] + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + img_vae_dim = self.num_channels_latents * latent_height * latent_width + text_dim = self.text_encoder_seq_len * self.text_intermediate_dim + + img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) + + img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) + img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) + text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) + return img_vae, img_clip, text + + def _combine_joint(self, img_vae, img_clip, text): + r""" + Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img, + clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B, + C * H * W + L_img * clip_img_dim + L_text * text_dim). + """ + img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) + img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) + text = torch.reshape(text, (text.shape[0], -1)) + return torch.concat([img_vae, img_clip, text], dim=-1) + + def _get_noise_pred( + self, + mode, + latents, + t, + prompt_embeds, + img_vae, + img_clip, + max_timestep, + data_type, + guidance_scale, + generator, + device, + height, + width, + ): + r""" + Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary. + """ + if mode == "joint": + # Joint text-image generation + img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type + ) + + x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) + + if guidance_scale <= 1.0: + return x_out + + # Classifier-free guidance + img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) + img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) + text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + + _, _, text_out_uncond = self.unet( + img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + img_vae_out_uncond, img_clip_out_uncond, _ = self.unet( + img_vae_latents, + img_clip_latents, + text_T, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) + + return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond + elif mode == "text2img": + # Text-conditioned image generation + img_vae_latents, img_clip_latents = self._split(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type + ) + + img_out = self._combine(img_vae_out, img_clip_out) + + if guidance_scale <= 1.0: + return img_out + + # Classifier-free guidance + text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + + img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( + img_vae_latents, + img_clip_latents, + text_T, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) + + return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond + elif mode == "img2text": + # Image-conditioned text generation + img_vae_out, img_clip_out, text_out = self.unet( + img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type + ) + + if guidance_scale <= 1.0: + return text_out + + # Classifier-free guidance + img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) + img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) + + img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( + img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond + elif mode == "text": + # Unconditional ("marginal") text generation (no CFG) + img_vae_out, img_clip_out, text_out = self.unet( + img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + return text_out + elif mode == "img": + # Unconditional ("marginal") image generation (no CFG) + img_vae_latents, img_clip_latents = self._split(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, + img_clip_latents, + prompt_embeds, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + img_out = self._combine(img_vae_out, img_clip_out) + return img_out + + def check_latents_shape(self, latents_name, latents, expected_shape): + latents_shape = latents.shape + expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension + expected_shape_str = ", ".join(str(dim) for dim in expected_shape) + if len(latents_shape) != expected_num_dims: + raise ValueError( + f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" + f" {latents_shape} has {len(latents_shape)} dimensions." + ) + for i in range(1, expected_num_dims): + if latents_shape[i] != expected_shape[i - 1]: + raise ValueError( + f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" + f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}." + ) + + def check_inputs( + self, + mode, + prompt, + image, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + latents=None, + prompt_latents=None, + vae_latents=None, + clip_latents=None, + ): + # Check inputs before running the generative process. + if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: + raise ValueError( + f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if mode == "text2img": + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if mode == "img2text": + if image is None: + raise ValueError("`img2text` mode requires an image to be provided.") + + # Check provided latents + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + full_latents_available = latents is not None + prompt_latents_available = prompt_latents is not None + vae_latents_available = vae_latents is not None + clip_latents_available = clip_latents is not None + + if full_latents_available: + individual_latents_available = ( + prompt_latents is not None or vae_latents is not None or clip_latents is not None + ) + if individual_latents_available: + logger.warning( + "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and" + " `clip_latents`. The value of `latents` will override the value of any individually supplied latents." + ) + # Check shape of full latents + img_vae_dim = self.num_channels_latents * latent_height * latent_width + text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size + latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim + latents_expected_shape = (latents_dim,) + self.check_latents_shape("latents", latents, latents_expected_shape) + + # Check individual latent shapes, if present + if prompt_latents_available: + prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) + self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape) + + if vae_latents_available: + vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) + self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape) + + if clip_latents_available: + clip_latents_expected_shape = (1, self.image_encoder_projection_dim) + self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape) + + if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available: + if vae_latents.shape[0] != clip_latents.shape[0]: + raise ValueError( + f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:" + f" {vae_latents.shape[0]} != {clip_latents.shape[0]}." + ) + + if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available: + if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: + raise ValueError( + f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch" + f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}" + f" != {clip_latents.shape[0]}." + ) + + @torch.no_grad() + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + image: Optional[Union[torch.FloatTensor, PIL.Image.Image]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + data_type: Optional[int] = 1, + num_inference_steps: int = 50, + guidance_scale: float = 8.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + num_prompts_per_image: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_latents: Optional[torch.FloatTensor] = None, + vae_latents: Optional[torch.FloatTensor] = None, + clip_latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + Required for text-conditioned image generation (`text2img`) mode. + image (`torch.FloatTensor` or `PIL.Image.Image`, *optional*): + `Image` or tensor representing an image batch. Required for image-conditioned text generation + (`img2text`) mode. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + data_type (`int`, *optional*, defaults to 1): + The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type + embedding; this is added for compatibility with the + [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 8.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in + text-conditioned image generation (`text2img`) mode. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and + `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are + supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. + num_prompts_per_image (`int`, *optional*, defaults to 1): + The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and + `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are + supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint + image-text generation. Can be used to tweak the same generation with different prompts. If not + provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes + a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`, + `vae_latents`, and `clip_latents`. + prompt_latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + vae_latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + clip_latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned + image generation (`text2img`) mode. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used + in text-conditioned image generation (`text2img`) mode. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images and the second element + is a list of generated texts. + """ + + # 0. Default height and width to unet + height = height or self.unet_resolution * self.vae_scale_factor + width = width or self.unet_resolution * self.vae_scale_factor + + # 1. Check inputs + # Recalculate mode for each call to the pipeline. + mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) + self.check_inputs( + mode, + prompt, + image, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + latents, + prompt_latents, + vae_latents, + clip_latents, + ) + + # 2. Define call parameters + batch_size, multiplier = self._infer_batch_size( + mode, + prompt, + prompt_embeds, + image, + num_images_per_prompt, + num_prompts_per_image, + latents, + prompt_latents, + vae_latents, + clip_latents, + ) + device = self._execution_device + reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img" + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + # Note that this differs from the formulation in the unidiffusers paper! + do_classifier_free_guidance = guidance_scale > 1.0 + + # check if scheduler is in sigmas space + # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") + + # 3. Encode input prompt, if available; otherwise prepare text latents + if latents is not None: + # Overwrite individual latents + vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width) + + if mode in ["text2img"]: + # 3.1. Encode input prompt, if available + assert prompt is not None or prompt_embeds is not None + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=multiplier, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # if do_classifier_free_guidance: + # prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + else: + # 3.2. Prepare text latent variables, if input not available + prompt_embeds = self.prepare_text_latents( + batch_size=batch_size, + num_images_per_prompt=multiplier, + seq_len=self.text_encoder_seq_len, + hidden_size=self.text_encoder_hidden_size, + dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision + device=device, + generator=generator, + latents=prompt_latents, + ) + + if reduce_text_emb_dim: + prompt_embeds = self.text_decoder.encode(prompt_embeds) + + # 4. Encode image, if available; otherwise prepare image latents + if mode in ["img2text"]: + # 4.1. Encode images, if available + assert image is not None, "`img2text` requires a conditioning image" + # Encode image using VAE + image_vae = self.image_processor.preprocess(image) + height, width = image_vae.shape[-2:] + image_vae_latents = self.encode_image_vae_latents( + image=image_vae, + batch_size=batch_size, + num_prompts_per_image=multiplier, + dtype=prompt_embeds.dtype, + device=device, + do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG + generator=generator, + ) + + # Encode image using CLIP + image_clip_latents = self.encode_image_clip_latents( + image=image, + batch_size=batch_size, + num_prompts_per_image=multiplier, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + ) + # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size) + image_clip_latents = image_clip_latents.unsqueeze(1) + else: + # 4.2. Prepare image latent variables, if input not available + # Prepare image VAE latents in latent space + image_vae_latents = self.prepare_image_vae_latents( + batch_size=batch_size, + num_prompts_per_image=multiplier, + num_channels_latents=self.num_channels_latents, + height=height, + width=width, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=vae_latents, + ) + + # Prepare image CLIP latents + image_clip_latents = self.prepare_image_clip_latents( + batch_size=batch_size, + num_prompts_per_image=multiplier, + clip_img_dim=self.image_encoder_projection_dim, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=clip_latents, + ) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + # max_timestep = timesteps[0] + max_timestep = self.scheduler.config.num_train_timesteps + + # 6. Prepare latent variables + if mode == "joint": + latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) + elif mode in ["text2img", "img"]: + latents = self._combine(image_vae_latents, image_clip_latents) + elif mode in ["img2text", "text"]: + latents = prompt_embeds + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}") + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # predict the noise residual + # Also applies classifier-free guidance as described in the UniDiffuser paper + noise_pred = self._get_noise_pred( + mode, + latents, + t, + prompt_embeds, + image_vae_latents, + image_clip_latents, + max_timestep, + data_type, + guidance_scale, + generator, + device, + height, + width, + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + image = None + text = None + if mode == "joint": + image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width) + + if not output_type == "latent": + # Map latent VAE image back to pixel space + image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = image_vae_latents + + text = self.decode_text_latents(text_latents, device) + elif mode in ["text2img", "img"]: + image_vae_latents, image_clip_latents = self._split(latents, height, width) + + if not output_type == "latent": + # Map latent VAE image back to pixel space + image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = image_vae_latents + elif mode in ["img2text", "text"]: + text_latents = latents + text = self.decode_text_latents(text_latents, device) + + self.maybe_free_model_hooks() + + # 10. Postprocess the image, if necessary + if image is not None: + do_denormalize = [True] * image.shape[0] + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, text) + + return ImageTextPipelineOutput(images=image, text=text) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/__init__.py new file mode 100644 index 000000000..ddb852d19 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/__init__.py @@ -0,0 +1,56 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_paella_vq_model"] = ["PaellaVQModel"] + _import_structure["modeling_wuerstchen_diffnext"] = ["WuerstchenDiffNeXt"] + _import_structure["modeling_wuerstchen_prior"] = ["WuerstchenPrior"] + _import_structure["pipeline_wuerstchen"] = ["WuerstchenDecoderPipeline"] + _import_structure["pipeline_wuerstchen_combined"] = ["WuerstchenCombinedPipeline"] + _import_structure["pipeline_wuerstchen_prior"] = ["DEFAULT_STAGE_C_TIMESTEPS", "WuerstchenPriorPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .modeling_paella_vq_model import PaellaVQModel + from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt + from .modeling_wuerstchen_prior import WuerstchenPrior + from .pipeline_wuerstchen import WuerstchenDecoderPipeline + from .pipeline_wuerstchen_combined import WuerstchenCombinedPipeline + from .pipeline_wuerstchen_prior import DEFAULT_STAGE_C_TIMESTEPS, WuerstchenPriorPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py new file mode 100644 index 000000000..3b21dfb5f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.autoencoders.vae import DecoderOutput, VectorQuantizer +from ...models.modeling_utils import ModelMixin +from ...models.vq_model import VQEncoderOutput +from ...utils.accelerate_utils import apply_forward_hook + + +class MixingResidualBlock(nn.Module): + """ + Residual block with mixing used by Paella's VQ-VAE. + """ + + def __init__(self, inp_channels, embed_dim): + super().__init__() + # depthwise + self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) + self.depthwise = nn.Sequential( + nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels) + ) + + # channelwise + self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels) + ) + + self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) + + def forward(self, x): + mods = self.gammas + x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] + x = x + self.depthwise(x_temp) * mods[2] + x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] + x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] + return x + + +class PaellaVQModel(ModelMixin, ConfigMixin): + r"""VQ-VAE model from Paella model. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the model (such as downloading or saving, etc.) + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + up_down_scale_factor (int, *optional*, defaults to 2): Up and Downscale factor of the input image. + levels (int, *optional*, defaults to 2): Number of levels in the model. + bottleneck_blocks (int, *optional*, defaults to 12): Number of bottleneck blocks in the model. + embed_dim (int, *optional*, defaults to 384): Number of hidden channels in the model. + latent_channels (int, *optional*, defaults to 4): Number of latent channels in the VQ-VAE model. + num_vq_embeddings (int, *optional*, defaults to 8192): Number of codebook vectors in the VQ-VAE. + scale_factor (float, *optional*, defaults to 0.3764): Scaling factor of the latent space. + """ + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + up_down_scale_factor: int = 2, + levels: int = 2, + bottleneck_blocks: int = 12, + embed_dim: int = 384, + latent_channels: int = 4, + num_vq_embeddings: int = 8192, + scale_factor: float = 0.3764, + ): + super().__init__() + + c_levels = [embed_dim // (2**i) for i in reversed(range(levels))] + # Encoder blocks + self.in_block = nn.Sequential( + nn.PixelUnshuffle(up_down_scale_factor), + nn.Conv2d(in_channels * up_down_scale_factor**2, c_levels[0], kernel_size=1), + ) + down_blocks = [] + for i in range(levels): + if i > 0: + down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) + block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) + down_blocks.append(block) + down_blocks.append( + nn.Sequential( + nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(latent_channels), # then normalize them to have mean 0 and std 1 + ) + ) + self.down_blocks = nn.Sequential(*down_blocks) + + # Vector Quantizer + self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) + + # Decoder blocks + up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] + for i in range(levels): + for j in range(bottleneck_blocks if i == 0 else 1): + block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) + up_blocks.append(block) + if i < levels - 1: + up_blocks.append( + nn.ConvTranspose2d( + c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1 + ) + ) + self.up_blocks = nn.Sequential(*up_blocks) + self.out_block = nn.Sequential( + nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor**2, kernel_size=1), + nn.PixelShuffle(up_down_scale_factor), + ) + + @apply_forward_hook + def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput: + h = self.in_block(x) + h = self.down_blocks(h) + + if not return_dict: + return (h,) + + return VQEncoderOutput(latents=h) + + @apply_forward_hook + def decode( + self, h: torch.FloatTensor, force_not_quantize: bool = True, return_dict: bool = True + ) -> Union[DecoderOutput, torch.FloatTensor]: + if not force_not_quantize: + quant, _, _ = self.vquantizer(h) + else: + quant = h + + x = self.up_blocks(quant) + dec = self.out_block(x) + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def forward(self, sample: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + h = self.encode(x).latents + dec = self.decode(h).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py new file mode 100644 index 000000000..101acafcf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py @@ -0,0 +1,81 @@ +import torch +import torch.nn as nn + +from ...models.attention_processor import Attention + + +class WuerstchenLayerNorm(nn.LayerNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + x = x.permute(0, 2, 3, 1) + x = super().forward(x) + return x.permute(0, 3, 1, 2) + + +class TimestepBlock(nn.Module): + def __init__(self, c, c_timestep): + super().__init__() + linear_cls = nn.Linear + self.mapper = linear_cls(c_timestep, c * 2) + + def forward(self, x, t): + a, b = self.mapper(t)[:, :, None, None].chunk(2, dim=1) + return x * (1 + a) + b + + +class ResBlock(nn.Module): + def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): + super().__init__() + + conv_cls = nn.Conv2d + linear_cls = nn.Linear + + self.depthwise = conv_cls(c + c_skip, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + linear_cls(c, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), linear_cls(c * 4, c) + ) + + def forward(self, x, x_skip=None): + x_res = x + if x_skip is not None: + x = torch.cat([x, x_skip], dim=1) + x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) + x = self.channelwise(x).permute(0, 3, 1, 2) + return x + x_res + + +# from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 +class GlobalResponseNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) + self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) + + def forward(self, x): + agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) + stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6) + return self.gamma * (x * stand_div_norm) + self.beta + x + + +class AttnBlock(nn.Module): + def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): + super().__init__() + + linear_cls = nn.Linear + + self.self_attn = self_attn + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) + self.kv_mapper = nn.Sequential(nn.SiLU(), linear_cls(c_cond, c)) + + def forward(self, x, kv): + kv = self.kv_mapper(kv) + norm_x = self.norm(x) + if self.self_attn: + batch_size, channel, _, _ = x.shape + kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) + x = x + self.attention(norm_x, encoder_hidden_states=kv) + return x diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py new file mode 100644 index 000000000..6c06cc0e7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py @@ -0,0 +1,254 @@ +# Copyright (c) 2023 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import numpy as np +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin +from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm + + +class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + c_in=4, + c_out=4, + c_r=64, + patch_size=2, + c_cond=1024, + c_hidden=[320, 640, 1280, 1280], + nhead=[-1, 10, 20, 20], + blocks=[4, 4, 14, 4], + level_config=["CT", "CTA", "CTA", "CTA"], + inject_effnet=[False, True, True, True], + effnet_embd=16, + clip_embd=1024, + kernel_size=3, + dropout=0.1, + ): + super().__init__() + self.c_r = c_r + self.c_cond = c_cond + if not isinstance(dropout, list): + dropout = [dropout] * len(c_hidden) + + # CONDITIONING + self.clip_mapper = nn.Linear(clip_embd, c_cond) + self.effnet_mappers = nn.ModuleList( + [ + nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None + for inject in inject_effnet + list(reversed(inject_effnet)) + ] + ) + self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) + + self.embedding = nn.Sequential( + nn.PixelUnshuffle(patch_size), + nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1), + WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), + ) + + def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): + if block_type == "C": + return ResBlockStageB(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) + elif block_type == "A": + return AttnBlock(c_hidden, c_cond, nhead, self_attn=True, dropout=dropout) + elif block_type == "T": + return TimestepBlock(c_hidden, c_r) + else: + raise ValueError(f"Block type {block_type} not supported") + + # BLOCKS + # -- down blocks + self.down_blocks = nn.ModuleList() + for i in range(len(c_hidden)): + down_block = nn.ModuleList() + if i > 0: + down_block.append( + nn.Sequential( + WuerstchenLayerNorm(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), + nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2), + ) + ) + for _ in range(blocks[i]): + for block_type in level_config[i]: + c_skip = c_cond if inject_effnet[i] else 0 + down_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) + self.down_blocks.append(down_block) + + # -- up blocks + self.up_blocks = nn.ModuleList() + for i in reversed(range(len(c_hidden))): + up_block = nn.ModuleList() + for j in range(blocks[i]): + for k, block_type in enumerate(level_config[i]): + c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 + c_skip += c_cond if inject_effnet[i] else 0 + up_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) + if i > 0: + up_block.append( + nn.Sequential( + WuerstchenLayerNorm(c_hidden[i], elementwise_affine=False, eps=1e-6), + nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2), + ) + ) + self.up_blocks.append(up_block) + + # OUTPUT + self.clf = nn.Sequential( + WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), + nn.Conv2d(c_hidden[0], 2 * c_out * (patch_size**2), kernel_size=1), + nn.PixelShuffle(patch_size), + ) + + # --- WEIGHT INIT --- + self.apply(self._init_weights) + + def _init_weights(self, m): + # General init + if isinstance(m, (nn.Conv2d, nn.Linear)): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + for mapper in self.effnet_mappers: + if mapper is not None: + nn.init.normal_(mapper.weight, std=0.02) # conditionings + nn.init.normal_(self.clip_mapper.weight, std=0.02) # conditionings + nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs + nn.init.constant_(self.clf[1].weight, 0) # outputs + + # blocks + for level_block in self.down_blocks + self.up_blocks: + for block in level_block: + if isinstance(block, ResBlockStageB): + block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks)) + elif isinstance(block, TimestepBlock): + nn.init.constant_(block.mapper.weight, 0) + + def gen_r_embedding(self, r, max_positions=10000): + r = r * max_positions + half_dim = self.c_r // 2 + emb = math.log(max_positions) / (half_dim - 1) + emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() + emb = r[:, None] * emb[None, :] + emb = torch.cat([emb.sin(), emb.cos()], dim=1) + if self.c_r % 2 == 1: # zero pad + emb = nn.functional.pad(emb, (0, 1), mode="constant") + return emb.to(dtype=r.dtype) + + def gen_c_embeddings(self, clip): + clip = self.clip_mapper(clip) + clip = self.seq_norm(clip) + return clip + + def _down_encode(self, x, r_embed, effnet, clip=None): + level_outputs = [] + for i, down_block in enumerate(self.down_blocks): + effnet_c = None + for block in down_block: + if isinstance(block, ResBlockStageB): + if effnet_c is None and self.effnet_mappers[i] is not None: + dtype = effnet.dtype + effnet_c = self.effnet_mappers[i]( + nn.functional.interpolate( + effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True + ).to(dtype) + ) + skip = effnet_c if self.effnet_mappers[i] is not None else None + x = block(x, skip) + elif isinstance(block, AttnBlock): + x = block(x, clip) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + level_outputs.insert(0, x) + return level_outputs + + def _up_decode(self, level_outputs, r_embed, effnet, clip=None): + x = level_outputs[0] + for i, up_block in enumerate(self.up_blocks): + effnet_c = None + for j, block in enumerate(up_block): + if isinstance(block, ResBlockStageB): + if effnet_c is None and self.effnet_mappers[len(self.down_blocks) + i] is not None: + dtype = effnet.dtype + effnet_c = self.effnet_mappers[len(self.down_blocks) + i]( + nn.functional.interpolate( + effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True + ).to(dtype) + ) + skip = level_outputs[i] if j == 0 and i > 0 else None + if effnet_c is not None: + if skip is not None: + skip = torch.cat([skip, effnet_c], dim=1) + else: + skip = effnet_c + x = block(x, skip) + elif isinstance(block, AttnBlock): + x = block(x, clip) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + return x + + def forward(self, x, r, effnet, clip=None, x_cat=None, eps=1e-3, return_noise=True): + if x_cat is not None: + x = torch.cat([x, x_cat], dim=1) + # Process the conditioning embeddings + r_embed = self.gen_r_embedding(r) + if clip is not None: + clip = self.gen_c_embeddings(clip) + + # Model Blocks + x_in = x + x = self.embedding(x) + level_outputs = self._down_encode(x, r_embed, effnet, clip) + x = self._up_decode(level_outputs, r_embed, effnet, clip) + a, b = self.clf(x).chunk(2, dim=1) + b = b.sigmoid() * (1 - eps * 2) + eps + if return_noise: + return (x_in - a) / b + else: + return a, b + + +class ResBlockStageB(nn.Module): + def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): + super().__init__() + self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(c + c_skip, c * 4), + nn.GELU(), + GlobalResponseNorm(c * 4), + nn.Dropout(dropout), + nn.Linear(c * 4, c), + ) + + def forward(self, x, x_skip=None): + x_res = x + x = self.norm(self.depthwise(x)) + if x_skip is not None: + x = torch.cat([x, x_skip], dim=1) + x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + x_res diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py new file mode 100644 index 000000000..8cc294eaf --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py @@ -0,0 +1,200 @@ +# Copyright (c) 2023 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Dict, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin +from ...models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ...models.modeling_utils import ModelMixin +from ...utils import is_torch_version +from .modeling_wuerstchen_common import AttnBlock, ResBlock, TimestepBlock, WuerstchenLayerNorm + + +class WuerstchenPrior(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): + unet_name = "prior" + _supports_gradient_checkpointing = True + + @register_to_config + def __init__(self, c_in=16, c=1280, c_cond=1024, c_r=64, depth=16, nhead=16, dropout=0.1): + super().__init__() + conv_cls = nn.Conv2d + linear_cls = nn.Linear + + self.c_r = c_r + self.projection = conv_cls(c_in, c, kernel_size=1) + self.cond_mapper = nn.Sequential( + linear_cls(c_cond, c), + nn.LeakyReLU(0.2), + linear_cls(c, c), + ) + + self.blocks = nn.ModuleList() + for _ in range(depth): + self.blocks.append(ResBlock(c, dropout=dropout)) + self.blocks.append(TimestepBlock(c, c_r)) + self.blocks.append(AttnBlock(c, c, nhead, self_attn=True, dropout=dropout)) + self.out = nn.Sequential( + WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6), + conv_cls(c, c_in * 2, kernel_size=1), + ) + + self.gradient_checkpointing = False + self.set_default_attn_processor() + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def _set_gradient_checkpointing(self, module, value=False): + self.gradient_checkpointing = value + + def gen_r_embedding(self, r, max_positions=10000): + r = r * max_positions + half_dim = self.c_r // 2 + emb = math.log(max_positions) / (half_dim - 1) + emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() + emb = r[:, None] * emb[None, :] + emb = torch.cat([emb.sin(), emb.cos()], dim=1) + if self.c_r % 2 == 1: # zero pad + emb = nn.functional.pad(emb, (0, 1), mode="constant") + return emb.to(dtype=r.dtype) + + def forward(self, x, r, c): + x_in = x + x = self.projection(x) + c_embed = self.cond_mapper(c) + r_embed = self.gen_r_embedding(r) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + for block in self.blocks: + if isinstance(block, AttnBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, c_embed, use_reentrant=False + ) + elif isinstance(block, TimestepBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, r_embed, use_reentrant=False + ) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) + else: + for block in self.blocks: + if isinstance(block, AttnBlock): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed) + elif isinstance(block, TimestepBlock): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x) + else: + for block in self.blocks: + if isinstance(block, AttnBlock): + x = block(x, c_embed) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + a, b = self.out(x).chunk(2, dim=1) + return (x_in - a) / ((1 - b).abs() + 1e-5) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py new file mode 100644 index 000000000..e4277d58a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py @@ -0,0 +1,438 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_paella_vq_model import PaellaVQModel +from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import WuerstchenPriorPipeline, WuerstchenDecoderPipeline + + >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( + ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 + ... ).to("cuda") + >>> gen_pipe = WuerstchenDecoderPipeline.from_pretrain("warp-ai/wuerstchen", torch_dtype=torch.float16).to( + ... "cuda" + ... ) + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) + ``` +""" + + +class WuerstchenDecoderPipeline(DiffusionPipeline): + """ + Pipeline for generating images from the Wuerstchen model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The CLIP tokenizer. + text_encoder (`CLIPTextModel`): + The CLIP text encoder. + decoder ([`WuerstchenDiffNeXt`]): + The WuerstchenDiffNeXt unet decoder. + vqgan ([`PaellaVQModel`]): + The VQGAN model. + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_dim_scale (float, `optional`, defaults to 10.67): + Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are + height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and + width=int(24*10.67)=256 in order to match the training conditions. + """ + + model_cpu_offload_seq = "text_encoder->decoder->vqgan" + _callback_tensor_inputs = [ + "latents", + "text_encoder_hidden_states", + "negative_prompt_embeds", + "image_embeddings", + ] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: WuerstchenDiffNeXt, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + latent_dim_scale: float = 10.67, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + self.register_to_config(latent_dim_scale=latent_dim_scale) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) + text_encoder_hidden_states = text_encoder_output.last_hidden_state + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + + uncond_text_encoder_hidden_states = None + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) + ) + + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + return text_encoder_hidden_states, uncond_text_encoder_hidden_states + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeddings: Union[torch.FloatTensor, List[torch.FloatTensor]], + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 12, + timesteps: Optional[List[float]] = None, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embedding (`torch.FloatTensor` or `List[torch.FloatTensor]`): + Image Embeddings either extracted from an image or generated by a Prior Model. + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_inference_steps (`int`, *optional*, defaults to 12): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image + embeddings. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + # 0. Define commonly used variables + device = self._execution_device + dtype = self.decoder.dtype + self._guidance_scale = guidance_scale + + # 1. Check inputs. Raise error if not correct + if not isinstance(prompt, list): + if isinstance(prompt, str): + prompt = [prompt] + else: + raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") + + if self.do_classifier_free_guidance: + if negative_prompt is not None and not isinstance(negative_prompt, list): + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + else: + raise TypeError( + f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." + ) + + if isinstance(image_embeddings, list): + image_embeddings = torch.cat(image_embeddings, dim=0) + if isinstance(image_embeddings, np.ndarray): + image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) + if not isinstance(image_embeddings, torch.Tensor): + raise TypeError( + f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}." + ) + + if not isinstance(num_inference_steps, int): + raise TypeError( + f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ + In Case you want to provide explicit timesteps, please use the 'timesteps' argument." + ) + + # 2. Encode caption + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + image_embeddings.size(0) * num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + ) + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + effnet = ( + torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) + if self.do_classifier_free_guidance + else image_embeddings + ) + + # 3. Determine latent shape of latents + latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) + latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) + latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) + + # 4. Prepare and set timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler) + + # 6. Run denoising loop + self._num_timesteps = len(timesteps[:-1]) + for i, t in enumerate(self.progress_bar(timesteps[:-1])): + ratio = t.expand(latents.size(0)).to(dtype) + # 7. Denoise latents + predicted_latents = self.decoder( + torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, + effnet=effnet, + clip=text_encoder_hidden_states, + ) + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) + predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) + + # 9. Renoise latents to next timestep + latents = self.scheduler.step( + model_output=predicted_latents, + timestep=ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # 10. Scale and decode the image latents with vq-vae + latents = self.vqgan.config.scale_factor * latents + images = self.vqgan.decode(latents).sample.clamp(0, 1) + if output_type == "np": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() + elif output_type == "pil": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() + images = self.numpy_to_pil(images) + else: + images = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return images + return ImagePipelineOutput(images) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py new file mode 100644 index 000000000..3a43ad5b9 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py @@ -0,0 +1,306 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import deprecate, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from .modeling_paella_vq_model import PaellaVQModel +from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt +from .modeling_wuerstchen_prior import WuerstchenPrior +from .pipeline_wuerstchen import WuerstchenDecoderPipeline +from .pipeline_wuerstchen_prior import WuerstchenPriorPipeline + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusions import WuerstchenCombinedPipeline + + >>> pipe = WuerstchenCombinedPipeline.from_pretrained("warp-ai/Wuerstchen", torch_dtype=torch.float16).to( + ... "cuda" + ... ) + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> images = pipe(prompt=prompt) + ``` +""" + + +class WuerstchenCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Wuerstchen + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The decoder tokenizer to be used for text inputs. + text_encoder (`CLIPTextModel`): + The decoder text encoder to be used for text inputs. + decoder (`WuerstchenDiffNeXt`): + The decoder model to be used for decoder image generation pipeline. + scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for decoder image generation pipeline. + vqgan (`PaellaVQModel`): + The VQGAN model to be used for decoder image generation pipeline. + prior_tokenizer (`CLIPTokenizer`): + The prior tokenizer to be used for text inputs. + prior_text_encoder (`CLIPTextModel`): + The prior text encoder to be used for text inputs. + prior_prior (`WuerstchenPrior`): + The prior model to be used for prior pipeline. + prior_scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for prior pipeline. + """ + + _load_connected_pipes = True + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: WuerstchenDiffNeXt, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + prior_tokenizer: CLIPTokenizer, + prior_text_encoder: CLIPTextModel, + prior_prior: WuerstchenPrior, + prior_scheduler: DDPMWuerstchenScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + prior_prior=prior_prior, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + ) + self.prior_pipe = WuerstchenPriorPipeline( + prior=prior_prior, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + ) + self.decoder_pipe = WuerstchenDecoderPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + prior_num_inference_steps: int = 60, + prior_timesteps: Optional[List[float]] = None, + prior_guidance_scale: float = 4.0, + num_inference_steps: int = 12, + decoder_timesteps: Optional[List[float]] = None, + decoder_guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation for the prior and decoder. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked + to the text `prompt`, usually at the expense of lower image quality. + prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60): + The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. For more specific timestep spacing, you can pass customized + `prior_timesteps` + num_inference_steps (`int`, *optional*, defaults to 12): + The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. For more specific timestep spacing, you can pass customized + `timesteps` + prior_timesteps (`List[float]`, *optional*): + Custom timesteps to use for the denoising process for the prior. If not defined, equal spaced + `prior_num_inference_steps` timesteps are used. Must be in descending order. + decoder_timesteps (`List[float]`, *optional*): + Custom timesteps to use for the denoising process for the decoder. If not defined, equal spaced + `num_inference_steps` timesteps are used. Must be in descending order. + decoder_guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + prior_kwargs = {} + if kwargs.get("prior_callback", None) is not None: + prior_kwargs["callback"] = kwargs.pop("prior_callback") + deprecate( + "prior_callback", + "1.0.0", + "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + if kwargs.get("prior_callback_steps", None) is not None: + deprecate( + "prior_callback_steps", + "1.0.0", + "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") + + prior_outputs = self.prior_pipe( + prompt=prompt if prompt_embeds is None else None, + height=height, + width=width, + num_inference_steps=prior_num_inference_steps, + timesteps=prior_timesteps, + guidance_scale=prior_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + **prior_kwargs, + ) + image_embeddings = prior_outputs[0] + + outputs = self.decoder_pipe( + image_embeddings=image_embeddings, + prompt=prompt if prompt is not None else "", + num_inference_steps=num_inference_steps, + timesteps=decoder_timesteps, + guidance_scale=decoder_guidance_scale, + negative_prompt=negative_prompt, + generator=generator, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + **kwargs, + ) + + return outputs diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py new file mode 100644 index 000000000..4640f7696 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py @@ -0,0 +1,516 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from math import ceil +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...loaders import LoraLoaderMixin +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import BaseOutput, deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .modeling_wuerstchen_prior import WuerstchenPrior + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import WuerstchenPriorPipeline + + >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( + ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + ``` +""" + + +@dataclass +class WuerstchenPriorPipelineOutput(BaseOutput): + """ + Output class for WuerstchenPriorPipeline. + + Args: + image_embeddings (`torch.FloatTensor` or `np.ndarray`) + Prior image embeddings for text prompt + + """ + + image_embeddings: Union[torch.FloatTensor, np.ndarray] + + +class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin): + """ + Pipeline for generating image prior for Wuerstchen. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + prior ([`Prior`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_mean ('float', *optional*, defaults to 42.0): + Mean value for latent diffusers. + latent_std ('float', *optional*, defaults to 1.0): + Standard value for latent diffusers. + resolution_multiple ('float', *optional*, defaults to 42.67): + Default resolution for multiple images generated. + """ + + unet_name = "prior" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "text_encoder->prior" + _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + prior: WuerstchenPrior, + scheduler: DDPMWuerstchenScheduler, + latent_mean: float = 42.0, + latent_std: float = 1.0, + resolution_multiple: float = 42.67, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + prior=prior, + scheduler=scheduler, + ) + self.register_to_config( + latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device) + ) + prompt_embeds = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.last_hidden_state + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + # done duplicates + + return prompt_embeds, negative_prompt_embeds + + def check_inputs( + self, + prompt, + negative_prompt, + num_inference_steps, + do_classifier_free_guidance, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if not isinstance(num_inference_steps, int): + raise TypeError( + f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ + In Case you want to provide explicit timesteps, please use the 'timesteps' argument." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + height: int = 1024, + width: int = 1024, + num_inference_steps: int = 60, + timesteps: List[float] = None, + guidance_scale: float = 8.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pt", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to 1024): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 1024): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 60): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 8.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.WuerstchenPriorPipelineOutput`] or `tuple` [`~pipelines.WuerstchenPriorPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated image embeddings. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + # 0. Define commonly used variables + device = self._execution_device + self._guidance_scale = guidance_scale + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 1. Check inputs. Raise error if not correct + if prompt is not None and not isinstance(prompt, list): + if isinstance(prompt, str): + prompt = [prompt] + else: + raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") + + if self.do_classifier_free_guidance: + if negative_prompt is not None and not isinstance(negative_prompt, list): + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + else: + raise TypeError( + f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." + ) + + self.check_inputs( + prompt, + negative_prompt, + num_inference_steps, + self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 2. Encode caption + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + + # 3. Determine latent shape of image embeddings + dtype = text_encoder_hidden_states.dtype + latent_height = ceil(height / self.config.resolution_multiple) + latent_width = ceil(width / self.config.resolution_multiple) + num_channels = self.prior.config.c_in + effnet_features_shape = (num_images_per_prompt * batch_size, num_channels, latent_height, latent_width) + + # 4. Prepare and set timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents(effnet_features_shape, dtype, device, generator, latents, self.scheduler) + + # 6. Run denoising loop + self._num_timesteps = len(timesteps[:-1]) + for i, t in enumerate(self.progress_bar(timesteps[:-1])): + ratio = t.expand(latents.size(0)).to(dtype) + + # 7. Denoise image embeddings + predicted_image_embedding = self.prior( + torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, + c=text_encoder_hidden_states, + ) + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_image_embedding_text, predicted_image_embedding_uncond = predicted_image_embedding.chunk(2) + predicted_image_embedding = torch.lerp( + predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale + ) + + # 9. Renoise latents to next timestep + latents = self.scheduler.step( + model_output=predicted_image_embedding, + timestep=ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 10. Denormalize the latents + latents = latents * self.config.latent_mean - self.config.latent_std + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "np": + latents = latents.cpu().float().numpy() + + if not return_dict: + return (latents,) + + return WuerstchenPriorPipelineOutput(latents) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/py.typed b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/README.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/README.md new file mode 100644 index 000000000..31ad27793 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/README.md @@ -0,0 +1,3 @@ +# Schedulers + +For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview). \ No newline at end of file diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/__init__.py new file mode 100644 index 000000000..720d8ea25 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/__init__.py @@ -0,0 +1,211 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_scipy_available, + is_torch_available, + is_torchsde_available, +) + + +_dummy_modules = {} +_import_structure = {} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_pt_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_pt_objects)) + +else: + _import_structure["deprecated"] = ["KarrasVeScheduler", "ScoreSdeVpScheduler"] + _import_structure["scheduling_amused"] = ["AmusedScheduler"] + _import_structure["scheduling_consistency_decoder"] = ["ConsistencyDecoderScheduler"] + _import_structure["scheduling_consistency_models"] = ["CMStochasticIterativeScheduler"] + _import_structure["scheduling_ddim"] = ["DDIMScheduler"] + _import_structure["scheduling_ddim_inverse"] = ["DDIMInverseScheduler"] + _import_structure["scheduling_ddim_parallel"] = ["DDIMParallelScheduler"] + _import_structure["scheduling_ddpm"] = ["DDPMScheduler"] + _import_structure["scheduling_ddpm_parallel"] = ["DDPMParallelScheduler"] + _import_structure["scheduling_ddpm_wuerstchen"] = ["DDPMWuerstchenScheduler"] + _import_structure["scheduling_deis_multistep"] = ["DEISMultistepScheduler"] + _import_structure["scheduling_dpmsolver_multistep"] = ["DPMSolverMultistepScheduler"] + _import_structure["scheduling_dpmsolver_multistep_inverse"] = ["DPMSolverMultistepInverseScheduler"] + _import_structure["scheduling_dpmsolver_singlestep"] = ["DPMSolverSinglestepScheduler"] + _import_structure["scheduling_edm_dpmsolver_multistep"] = ["EDMDPMSolverMultistepScheduler"] + _import_structure["scheduling_edm_euler"] = ["EDMEulerScheduler"] + _import_structure["scheduling_euler_ancestral_discrete"] = ["EulerAncestralDiscreteScheduler"] + _import_structure["scheduling_euler_discrete"] = ["EulerDiscreteScheduler"] + _import_structure["scheduling_heun_discrete"] = ["HeunDiscreteScheduler"] + _import_structure["scheduling_ipndm"] = ["IPNDMScheduler"] + _import_structure["scheduling_k_dpm_2_ancestral_discrete"] = ["KDPM2AncestralDiscreteScheduler"] + _import_structure["scheduling_k_dpm_2_discrete"] = ["KDPM2DiscreteScheduler"] + _import_structure["scheduling_lcm"] = ["LCMScheduler"] + _import_structure["scheduling_pndm"] = ["PNDMScheduler"] + _import_structure["scheduling_repaint"] = ["RePaintScheduler"] + _import_structure["scheduling_sasolver"] = ["SASolverScheduler"] + _import_structure["scheduling_sde_ve"] = ["ScoreSdeVeScheduler"] + _import_structure["scheduling_tcd"] = ["TCDScheduler"] + _import_structure["scheduling_unclip"] = ["UnCLIPScheduler"] + _import_structure["scheduling_unipc_multistep"] = ["UniPCMultistepScheduler"] + _import_structure["scheduling_utils"] = ["KarrasDiffusionSchedulers", "SchedulerMixin"] + _import_structure["scheduling_vq_diffusion"] = ["VQDiffusionScheduler"] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_flax_objects)) + +else: + _import_structure["scheduling_ddim_flax"] = ["FlaxDDIMScheduler"] + _import_structure["scheduling_ddpm_flax"] = ["FlaxDDPMScheduler"] + _import_structure["scheduling_dpmsolver_multistep_flax"] = ["FlaxDPMSolverMultistepScheduler"] + _import_structure["scheduling_euler_discrete_flax"] = ["FlaxEulerDiscreteScheduler"] + _import_structure["scheduling_karras_ve_flax"] = ["FlaxKarrasVeScheduler"] + _import_structure["scheduling_lms_discrete_flax"] = ["FlaxLMSDiscreteScheduler"] + _import_structure["scheduling_pndm_flax"] = ["FlaxPNDMScheduler"] + _import_structure["scheduling_sde_ve_flax"] = ["FlaxScoreSdeVeScheduler"] + _import_structure["scheduling_utils_flax"] = [ + "FlaxKarrasDiffusionSchedulers", + "FlaxSchedulerMixin", + "FlaxSchedulerOutput", + "broadcast_to_shape_from_left", + ] + + +try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_scipy_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_torch_and_scipy_objects)) + +else: + _import_structure["scheduling_lms_discrete"] = ["LMSDiscreteScheduler"] + +try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_torchsde_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_torch_and_torchsde_objects)) + +else: + _import_structure["scheduling_dpmsolver_sde"] = ["DPMSolverSDEScheduler"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from ..utils import ( + OptionalDependencyNotAvailable, + is_flax_available, + is_scipy_available, + is_torch_available, + is_torchsde_available, + ) + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + else: + from .deprecated import KarrasVeScheduler, ScoreSdeVpScheduler + from .scheduling_amused import AmusedScheduler + from .scheduling_consistency_decoder import ConsistencyDecoderScheduler + from .scheduling_consistency_models import CMStochasticIterativeScheduler + from .scheduling_ddim import DDIMScheduler + from .scheduling_ddim_inverse import DDIMInverseScheduler + from .scheduling_ddim_parallel import DDIMParallelScheduler + from .scheduling_ddpm import DDPMScheduler + from .scheduling_ddpm_parallel import DDPMParallelScheduler + from .scheduling_ddpm_wuerstchen import DDPMWuerstchenScheduler + from .scheduling_deis_multistep import DEISMultistepScheduler + from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler + from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler + from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler + from .scheduling_edm_dpmsolver_multistep import EDMDPMSolverMultistepScheduler + from .scheduling_edm_euler import EDMEulerScheduler + from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler + from .scheduling_euler_discrete import EulerDiscreteScheduler + from .scheduling_heun_discrete import HeunDiscreteScheduler + from .scheduling_ipndm import IPNDMScheduler + from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler + from .scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler + from .scheduling_lcm import LCMScheduler + from .scheduling_pndm import PNDMScheduler + from .scheduling_repaint import RePaintScheduler + from .scheduling_sasolver import SASolverScheduler + from .scheduling_sde_ve import ScoreSdeVeScheduler + from .scheduling_tcd import TCDScheduler + from .scheduling_unclip import UnCLIPScheduler + from .scheduling_unipc_multistep import UniPCMultistepScheduler + from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + from .scheduling_vq_diffusion import VQDiffusionScheduler + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_objects import * # noqa F403 + else: + from .scheduling_ddim_flax import FlaxDDIMScheduler + from .scheduling_ddpm_flax import FlaxDDPMScheduler + from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler + from .scheduling_euler_discrete_flax import FlaxEulerDiscreteScheduler + from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler + from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler + from .scheduling_pndm_flax import FlaxPNDMScheduler + from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler + from .scheduling_utils_flax import ( + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, + ) + + try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 + else: + from .scheduling_lms_discrete import LMSDiscreteScheduler + + try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 + else: + from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + for name, value in _dummy_modules.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/__init__.py new file mode 100644 index 000000000..786707f45 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_pt_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["scheduling_karras_ve"] = ["KarrasVeScheduler"] + _import_structure["scheduling_sde_vp"] = ["ScoreSdeVpScheduler"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + else: + from .scheduling_karras_ve import KarrasVeScheduler + from .scheduling_sde_vp import ScoreSdeVpScheduler + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py new file mode 100644 index 000000000..d776d989a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py @@ -0,0 +1,243 @@ +# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ...utils.torch_utils import randn_tensor +from ..scheduling_utils import SchedulerMixin + + +@dataclass +class KarrasVeOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + derivative (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Derivative of predicted original image sample (x_0). + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + derivative: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +class KarrasVeScheduler(SchedulerMixin, ConfigMixin): + """ + A stochastic scheduler tailored to variance-expanding models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + + + For more details on the parameters, see [Appendix E](https://arxiv.org/abs/2206.00364). The grid search values used + to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. + + + + Args: + sigma_min (`float`, defaults to 0.02): + The minimum noise magnitude. + sigma_max (`float`, defaults to 100): + The maximum noise magnitude. + s_noise (`float`, defaults to 1.007): + The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, + 1.011]. + s_churn (`float`, defaults to 80): + The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. + s_min (`float`, defaults to 0.05): + The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. + s_max (`float`, defaults to 50): + The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. + """ + + order = 2 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.02, + sigma_max: float = 100, + s_noise: float = 1.007, + s_churn: float = 80, + s_min: float = 0.05, + s_max: float = 50, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + # setable values + self.num_inference_steps: int = None + self.timesteps: np.IntTensor = None + self.schedule: torch.FloatTensor = None # sigma(t_i) + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps).to(device) + schedule = [ + ( + self.config.sigma_max**2 + * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) + ) + for i in self.timesteps + ] + self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) + + def add_noise_to_input( + self, sample: torch.FloatTensor, sigma: float, generator: Optional[torch.Generator] = None + ) -> Tuple[torch.FloatTensor, float]: + """ + Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i ≥ 0` to reach a + higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. + + Args: + sample (`torch.FloatTensor`): + The input sample. + sigma (`float`): + generator (`torch.Generator`, *optional*): + A random number generator. + """ + if self.config.s_min <= sigma <= self.config.s_max: + gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) + else: + gamma = 0 + + # sample eps ~ N(0, S_noise^2 * I) + eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) + sigma_hat = sigma + gamma * sigma + sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) + + return sample_hat, sigma_hat + + def step( + self, + model_output: torch.FloatTensor, + sigma_hat: float, + sigma_prev: float, + sample_hat: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[KarrasVeOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + sigma_hat (`float`): + sigma_prev (`float`): + sample_hat (`torch.FloatTensor`): + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + + pred_original_sample = sample_hat + sigma_hat * model_output + derivative = (sample_hat - pred_original_sample) / sigma_hat + sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative + + if not return_dict: + return (sample_prev, derivative) + + return KarrasVeOutput( + prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample + ) + + def step_correct( + self, + model_output: torch.FloatTensor, + sigma_hat: float, + sigma_prev: float, + sample_hat: torch.FloatTensor, + sample_prev: torch.FloatTensor, + derivative: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[KarrasVeOutput, Tuple]: + """ + Corrects the predicted sample based on the `model_output` of the network. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.FloatTensor`): TODO + sample_prev (`torch.FloatTensor`): TODO + derivative (`torch.FloatTensor`): TODO + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. + + Returns: + prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO + + """ + pred_original_sample = sample_prev + sigma_prev * model_output + derivative_corr = (sample_prev - pred_original_sample) / sigma_prev + sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) + + if not return_dict: + return (sample_prev, derivative) + + return KarrasVeOutput( + prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample + ) + + def add_noise(self, original_samples, noise, timesteps): + raise NotImplementedError() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py new file mode 100644 index 000000000..09b02cadc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py @@ -0,0 +1,109 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +import math +from typing import Union + +import torch + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils.torch_utils import randn_tensor +from ..scheduling_utils import SchedulerMixin + + +class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): + """ + `ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 2000): + The number of diffusion steps to train the model. + beta_min (`int`, defaults to 0.1): + beta_max (`int`, defaults to 20): + sampling_eps (`int`, defaults to 1e-3): + The end value of sampling where timesteps decrease progressively from 1 to epsilon. + """ + + order = 1 + + @register_to_config + def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): + self.sigmas = None + self.discrete_sigmas = None + self.timesteps = None + + def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): + """ + Sets the continuous timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) + + def step_pred(self, score, x, t, generator=None): + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + score (): + x (): + t (): + generator (`torch.Generator`, *optional*): + A random number generator. + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # TODO(Patrick) better comments + non-PyTorch + # postprocess model score + log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min + std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) + std = std.flatten() + while len(std.shape) < len(score.shape): + std = std.unsqueeze(-1) + score = -score / std + + # compute + dt = -1.0 / len(self.timesteps) + + beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) + beta_t = beta_t.flatten() + while len(beta_t.shape) < len(x.shape): + beta_t = beta_t.unsqueeze(-1) + drift = -0.5 * beta_t * x + + diffusion = torch.sqrt(beta_t) + drift = drift - diffusion**2 * score + x_mean = x + drift * dt + + # add noise + noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) + x = x_mean + diffusion * math.sqrt(-dt) * noise + + return x, x_mean + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_amused.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_amused.py new file mode 100644 index 000000000..51fbe6a4d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_amused.py @@ -0,0 +1,162 @@ +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import SchedulerMixin + + +def gumbel_noise(t, generator=None): + device = generator.device if generator is not None else t.device + noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device) + return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20)) + + +def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): + confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator) + sorted_confidence = torch.sort(confidence, dim=-1).values + cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) + masking = confidence < cut_off + return masking + + +@dataclass +class AmusedSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: torch.FloatTensor = None + + +class AmusedScheduler(SchedulerMixin, ConfigMixin): + order = 1 + + temperatures: torch.Tensor + + @register_to_config + def __init__( + self, + mask_token_id: int, + masking_schedule: str = "cosine", + ): + self.temperatures = None + self.timesteps = None + + def set_timesteps( + self, + num_inference_steps: int, + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + device: Union[str, torch.device] = None, + ): + self.timesteps = torch.arange(num_inference_steps, device=device).flip(0) + + if isinstance(temperature, (tuple, list)): + self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device) + else: + self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device) + + def step( + self, + model_output: torch.FloatTensor, + timestep: torch.long, + sample: torch.LongTensor, + starting_mask_ratio: int = 1, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[AmusedSchedulerOutput, Tuple]: + two_dim_input = sample.ndim == 3 and model_output.ndim == 4 + + if two_dim_input: + batch_size, codebook_size, height, width = model_output.shape + sample = sample.reshape(batch_size, height * width) + model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1) + + unknown_map = sample == self.config.mask_token_id + + probs = model_output.softmax(dim=-1) + + device = probs.device + probs_ = probs.to(generator.device) if generator is not None else probs # handles when generator is on CPU + if probs_.device.type == "cpu" and probs_.dtype != torch.float32: + probs_ = probs_.float() # multinomial is not implemented for cpu half precision + probs_ = probs_.reshape(-1, probs.size(-1)) + pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device) + pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1]) + pred_original_sample = torch.where(unknown_map, pred_original_sample, sample) + + if timestep == 0: + prev_sample = pred_original_sample + else: + seq_len = sample.shape[1] + step_idx = (self.timesteps == timestep).nonzero() + ratio = (step_idx + 1) / len(self.timesteps) + + if self.config.masking_schedule == "cosine": + mask_ratio = torch.cos(ratio * math.pi / 2) + elif self.config.masking_schedule == "linear": + mask_ratio = 1 - ratio + else: + raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") + + mask_ratio = starting_mask_ratio * mask_ratio + + mask_len = (seq_len * mask_ratio).floor() + # do not mask more than amount previously masked + mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len) + # mask at least one + mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len) + + selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0] + # Ignores the tokens given in the input by overwriting their confidence. + selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) + + masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator) + + # Masks tokens with lower confidence. + prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample) + + if two_dim_input: + prev_sample = prev_sample.reshape(batch_size, height, width) + pred_original_sample = pred_original_sample.reshape(batch_size, height, width) + + if not return_dict: + return (prev_sample, pred_original_sample) + + return AmusedSchedulerOutput(prev_sample, pred_original_sample) + + def add_noise(self, sample, timesteps, generator=None): + step_idx = (self.timesteps == timesteps).nonzero() + ratio = (step_idx + 1) / len(self.timesteps) + + if self.config.masking_schedule == "cosine": + mask_ratio = torch.cos(ratio * math.pi / 2) + elif self.config.masking_schedule == "linear": + mask_ratio = 1 - ratio + else: + raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") + + mask_indices = ( + torch.rand( + sample.shape, device=generator.device if generator is not None else sample.device, generator=generator + ).to(sample.device) + < mask_ratio + ) + + masked_sample = sample.clone() + + masked_sample[mask_indices] = self.config.mask_token_id + + return masked_sample diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_decoder.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_decoder.py new file mode 100644 index 000000000..69ca8a173 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_decoder.py @@ -0,0 +1,180 @@ +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +@dataclass +class ConsistencyDecoderSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin): + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1024, + sigma_data: float = 0.5, + ): + betas = betas_for_alpha_bar(num_train_timesteps) + + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + + self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) + + sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) + + sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) + + self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) + self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 + self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + ): + if num_inference_steps != 2: + raise ValueError("Currently more than 2 inference steps are not supported.") + + self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device) + self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device) + self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device) + self.c_skip = self.c_skip.to(device) + self.c_out = self.c_out.to(device) + self.c_in = self.c_in.to(device) + + @property + def init_noise_sigma(self): + return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]] + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample * self.c_in[timestep] + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[ConsistencyDecoderSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`float`): + The current timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] is returned, otherwise + a tuple is returned where the first element is the sample tensor. + """ + x_0 = self.c_out[timestep] * model_output + self.c_skip[timestep] * sample + + timestep_idx = torch.where(self.timesteps == timestep)[0] + + if timestep_idx == len(self.timesteps) - 1: + prev_sample = x_0 + else: + noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device) + prev_sample = ( + self.sqrt_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * x_0 + + self.sqrt_one_minus_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * noise + ) + + if not return_dict: + return (prev_sample,) + + return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_models.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_models.py new file mode 100644 index 000000000..14d37a390 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_consistency_models.py @@ -0,0 +1,448 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class CMStochasticIterativeSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): + """ + Multistep and onestep sampling for consistency models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 40): + The number of diffusion steps to train the model. + sigma_min (`float`, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. Defaults to 0.002 from the original implementation. + sigma_max (`float`, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. Defaults to 80.0 from the original implementation. + sigma_data (`float`, defaults to 0.5): + The standard deviation of the data distribution from the EDM + [paper](https://huggingface.co/papers/2206.00364). Defaults to 0.5 from the original implementation. + s_noise (`float`, defaults to 1.0): + The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, + 1.011]. Defaults to 1.0 from the original implementation. + rho (`float`, defaults to 7.0): + The parameter for calculating the Karras sigma schedule from the EDM + [paper](https://huggingface.co/papers/2206.00364). Defaults to 7.0 from the original implementation. + clip_denoised (`bool`, defaults to `True`): + Whether to clip the denoised outputs to `(-1, 1)`. + timesteps (`List` or `np.ndarray` or `torch.Tensor`, *optional*): + An explicit timestep schedule that can be optionally specified. The timesteps are expected to be in + increasing order. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 40, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + s_noise: float = 1.0, + rho: float = 7.0, + clip_denoised: bool = True, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + ramp = np.linspace(0, 1, num_train_timesteps) + sigmas = self._convert_to_karras(ramp) + timesteps = self.sigma_to_t(sigmas) + + # setable values + self.num_inference_steps = None + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps) + self.custom_timesteps = False + self.is_scale_input_called = False + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Scales the consistency model input by `(sigma**2 + sigma_data**2) ** 0.5`. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`float` or `torch.FloatTensor`): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + # Get sigma corresponding to timestep + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + sample = sample / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + + self.is_scale_input_called = True + return sample + + def sigma_to_t(self, sigmas: Union[float, np.ndarray]): + """ + Gets scaled timesteps from the Karras sigmas for input to the consistency model. + + Args: + sigmas (`float` or `np.ndarray`): + A single Karras sigma or an array of Karras sigmas. + + Returns: + `float` or `np.ndarray`: + A scaled input timestep or scaled input timestep array. + """ + if not isinstance(sigmas, np.ndarray): + sigmas = np.array(sigmas, dtype=np.float64) + + timesteps = 1000 * 0.25 * np.log(sigmas + 1e-44) + + return timesteps + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `timesteps`.") + + # Follow DDPMScheduler custom timesteps logic + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + self.custom_timesteps = False + + # Map timesteps to Karras sigmas directly for multistep sampling + # See https://github.com/openai/consistency_models/blob/main/cm/karras_diffusion.py#L675 + num_train_timesteps = self.config.num_train_timesteps + ramp = timesteps[::-1].copy() + ramp = ramp / (num_train_timesteps - 1) + sigmas = self._convert_to_karras(ramp) + timesteps = self.sigma_to_t(sigmas) + + sigmas = np.concatenate([sigmas, [self.sigma_min]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + if str(device).startswith("mps"): + # mps does not support float64 + self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) + else: + self.timesteps = torch.from_numpy(timesteps).to(device=device) + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Modified _convert_to_karras implementation that takes in ramp as argument + def _convert_to_karras(self, ramp): + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = self.config.sigma_min + sigma_max: float = self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def get_scalings(self, sigma): + sigma_data = self.config.sigma_data + + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + return c_skip, c_out + + def get_scalings_for_boundary_condition(self, sigma): + """ + Gets the scalings used in the consistency model parameterization (from Appendix C of the + [paper](https://huggingface.co/papers/2303.01469)) to enforce boundary condition. + + + + `epsilon` in the equations for `c_skip` and `c_out` is set to `sigma_min`. + + + + Args: + sigma (`torch.FloatTensor`): + The current sigma in the Karras sigma schedule. + + Returns: + `tuple`: + A two-element tuple where `c_skip` (which weights the current sample) is the first element and `c_out` + (which weights the consistency model output) is the second element. + """ + sigma_min = self.config.sigma_min + sigma_data = self.config.sigma_data + + c_skip = sigma_data**2 / ((sigma - sigma_min) ** 2 + sigma_data**2) + c_out = (sigma - sigma_min) * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + return c_skip, c_out + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[CMStochasticIterativeSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`float`): + The current timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + f" `{self.__class__}.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + sigma_min = self.config.sigma_min + sigma_max = self.config.sigma_max + + if self.step_index is None: + self._init_step_index(timestep) + + # sigma_next corresponds to next_t in original implementation + sigma = self.sigmas[self.step_index] + if self.step_index + 1 < self.config.num_train_timesteps: + sigma_next = self.sigmas[self.step_index + 1] + else: + # Set sigma_next to sigma_min + sigma_next = self.sigmas[-1] + + # Get scalings for boundary conditions + c_skip, c_out = self.get_scalings_for_boundary_condition(sigma) + + # 1. Denoise model output using boundary conditions + denoised = c_out * model_output + c_skip * sample + if self.config.clip_denoised: + denoised = denoised.clamp(-1, 1) + + # 2. Sample z ~ N(0, s_noise^2 * I) + # Noise is not used for onestep sampling. + if len(self.timesteps) > 1: + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + else: + noise = torch.zeros_like(model_output) + z = noise * self.config.s_noise + + sigma_hat = sigma_next.clamp(min=sigma_min, max=sigma_max) + + # 3. Return noisy sample + # tau = sigma_hat, eps = sigma_min + prev_sample = denoised + z * (sigma_hat**2 - sigma_min**2) ** 0.5 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return CMStochasticIterativeSchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim.py new file mode 100644 index 000000000..33d3892a0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim.py @@ -0,0 +1,520 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.FloatTensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> η + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = self._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if eta > 0: + if variance_noise is not None and generator is not None: + raise ValueError( + "Cannot pass both generator and variance_noise. Please make sure that either `generator` or" + " `variance_noise` stays `None`." + ) + + if variance_noise is None: + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + variance = std_dev_t * variance_noise + + prev_sample = prev_sample + variance + + if not return_dict: + return (prev_sample,) + + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_flax.py new file mode 100644 index 000000000..dc3d8455b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_flax.py @@ -0,0 +1,313 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, + get_velocity_common, +) + + +@flax.struct.dataclass +class DDIMSchedulerState: + common: CommonSchedulerState + final_alpha_cumprod: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + final_alpha_cumprod: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput): + state: DDIMSchedulerState + + +class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising + diffusion probabilistic models (DDPMs) with non-Markovian guidance. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2010.02502 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + clip_sample (`bool`, default `True`): + option to clip predicted sample between for numerical stability. The clip range is determined by `clip_sample_range`. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, default `True`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. + `v-prediction` is not supported for this scheduler. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + clip_sample: bool = True, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + final_alpha_cumprod = ( + jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] + ) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DDIMSchedulerState.create( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def scale_model_input( + self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def set_timesteps( + self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> DDIMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DDIMSchedulerState`): + the `FlaxDDIMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + ) + + def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep): + alpha_prod_t = state.common.alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where( + prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def step( + self, + state: DDIMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + eta: float = 0.0, + return_dict: bool = True, + ) -> Union[FlaxDDIMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class + + Returns: + [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> η + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps + + alphas_cumprod = state.common.alphas_cumprod + final_alpha_cumprod = state.final_alpha_cumprod + + # 2. compute alphas, betas + alpha_prod_t = alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clip( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = self._get_variance(state, timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if not return_dict: + return (prev_sample, state) + + return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: DDIMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def get_velocity( + self, + state: DDIMSchedulerState, + sample: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return get_velocity_common(state.common, sample, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_inverse.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_inverse.py new file mode 100644 index 000000000..b4c19e455 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_inverse.py @@ -0,0 +1,374 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from diffusers.utils import BaseOutput, deprecate + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMInverseScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMInverseScheduler` is the reverse scheduler of [`DDIMScheduler`]. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to 0, otherwise + it uses the alpha value at step `num_train_timesteps - 1`. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + ignore_for_config = ["kwargs"] + _deprecated_kwargs = ["set_alpha_to_zero"] + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + clip_sample_range: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + **kwargs, + ): + if kwargs.get("set_alpha_to_zero", None) is not None: + deprecation_message = ( + "The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead." + ) + deprecate("set_alpha_to_zero", "1.0.0", deprecation_message, standard_warn=False) + set_alpha_to_one = kwargs["set_alpha_to_zero"] + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in inverted ddim, we are looking into the next alphas_cumprod + # For the initial step, there is no current alphas_cumprod, and the index is out of bounds + # `set_alpha_to_one` decides whether we set this parameter simply to one + # in this case, self.step() just output the predicted noise + # or whether we use the initial alpha used in training the diffusion model. + self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64)) + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "leading" and "trailing" corresponds to annotation of Table 1. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + variance_noise (`torch.FloatTensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or + `tuple`. + + Returns: + [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + + """ + # 1. get previous step value (=t+1) + prev_timestep = timestep + timestep = min( + timestep - self.config.num_train_timesteps // self.num_inference_steps, self.config.num_train_timesteps - 1 + ) + + # 2. compute alphas, betas + # change original implementation to exactly match noise levels for analogous forward process + alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * pred_epsilon + + # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if not return_dict: + return (prev_sample, pred_original_sample) + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_parallel.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_parallel.py new file mode 100644 index 000000000..225ed736c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddim_parallel.py @@ -0,0 +1,645 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput +class DDIMParallelSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMParallelScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising + diffusion probabilistic models (DDPMs) with non-Markovian guidance. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2010.02502 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + clip_sample (`bool`, default `True`): + option to clip predicted sample for numerical stability. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, default `True`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, default `"leading"`): + The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample + Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, default `False`): + whether to rescale the betas to have zero terminal SNR (proposed by https://arxiv.org/pdf/2305.08891.pdf). + This can enable the model to generate very bright and dark samples instead of limiting it to samples with + medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + _is_ode_scheduler = True + + @register_to_config + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.__init__ + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def _get_variance(self, timestep, prev_timestep=None): + if prev_timestep is None: + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def _batch_get_variance(self, t, prev_t): + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.set_timesteps + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[DDIMParallelSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + eta (`float`): weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped + predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when + `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would + coincide with the one provided as input and `use_clipped_model_output` will have not effect. + generator: random number generator. + variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we + can directly provide the noise for the variance itself. This is useful for methods such as + CycleDiffusion. (https://arxiv.org/abs/2210.05559) + return_dict (`bool`): option for returning tuple rather than DDIMParallelSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. + When returning a tuple, the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> η + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = self._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if eta > 0: + if variance_noise is not None and generator is not None: + raise ValueError( + "Cannot pass both generator and variance_noise. Please make sure that either `generator` or" + " `variance_noise` stays `None`." + ) + + if variance_noise is None: + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + variance = std_dev_t * variance_noise + + prev_sample = prev_sample + variance + + if not return_dict: + return (prev_sample,) + + return DDIMParallelSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def batch_step_no_noise( + self, + model_output: torch.FloatTensor, + timesteps: List[int], + sample: torch.FloatTensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + ) -> torch.FloatTensor: + """ + Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. + Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise + is pre-sampled by the pipeline. + + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timesteps (`List[int]`): + current discrete timesteps in the diffusion chain. This is now a list of integers. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + eta (`float`): weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped + predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when + `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would + coincide with the one provided as input and `use_clipped_model_output` will have not effect. + + Returns: + `torch.FloatTensor`: sample tensor at previous timestep. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + assert eta == 0.0 + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> η + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + t = timesteps + prev_t = t - self.config.num_train_timesteps // self.num_inference_steps + + t = t.view(-1, *([1] * (model_output.ndim - 1))) + prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) + + # 1. compute alphas, betas + self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) + self.final_alpha_cumprod = self.final_alpha_cumprod.to(model_output.device) + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(η)" -> see formula (16) + # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) + variance = self._batch_get_variance(t, prev_t).to(model_output.device).view(*alpha_prod_t_prev.shape) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + return prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm.py new file mode 100644 index 000000000..e1f55a202 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm.py @@ -0,0 +1,562 @@ +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +class DDPMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDPMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDPMScheduler` explores the connections between denoising score matching and Langevin dynamics sampling. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + An array of betas to pass directly to the constructor without using `beta_start` and `beta_end`. + variance_type (`str`, defaults to `"fixed_small"`): + Clip the variance when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, + `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + steps_offset: int = 0, + rescale_betas_zero_snr: int = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.custom_timesteps = False + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + + """ + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + self.custom_timesteps = False + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t, predicted_variance=None, variance_type=None): + prev_t = self.previous_timestep(t) + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t + + # we always take the log of variance, so clamp it to ensure it's not 0 + variance = torch.clamp(variance, min=1e-20) + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = variance + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = torch.log(variance) + variance = torch.exp(0.5 * variance) + elif variance_type == "fixed_large": + variance = current_beta_t + elif variance_type == "fixed_large_log": + # Glide max_log + variance = torch.log(current_beta_t) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = torch.log(variance) + max_log = torch.log(current_beta_t) + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + t = timestep + + prev_t = self.previous_timestep(t) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + device = model_output.device + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=device, dtype=model_output.dtype + ) + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise + elif self.variance_type == "learned_range": + variance = self._get_variance(t, predicted_variance=predicted_variance) + variance = torch.exp(0.5 * variance) * variance_noise + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_flax.py new file mode 100644 index 000000000..6bdfa5eb5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_flax.py @@ -0,0 +1,299 @@ +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, + get_velocity_common, +) + + +@flax.struct.dataclass +class DDPMSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps) + + +@dataclass +class FlaxDDPMSchedulerOutput(FlaxSchedulerOutput): + state: DDPMSchedulerState + + +class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, + `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample between -1 and 1 for numerical stability. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. + `v-prediction` is not supported for this scheduler. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDPMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DDPMSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def scale_model_input( + self, state: DDPMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def set_timesteps( + self, state: DDPMSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> DDPMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DDIMSchedulerState`): + the `FlaxDDPMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + ) + + def _get_variance(self, state: DDPMSchedulerState, t, predicted_variance=None, variance_type=None): + alpha_prod_t = state.common.alphas_cumprod[t] + alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = jnp.clip(variance, a_min=1e-20) + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = jnp.log(jnp.clip(variance, a_min=1e-20)) + elif variance_type == "fixed_large": + variance = state.common.betas[t] + elif variance_type == "fixed_large_log": + # Glide max_log + variance = jnp.log(state.common.betas[t]) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = variance + max_log = state.common.betas[t] + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def step( + self, + state: DDPMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + key: Optional[jax.Array] = None, + return_dict: bool = True, + ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`DDPMSchedulerState`): the `FlaxDDPMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + key (`jax.Array`): a PRNG key. + return_dict (`bool`): option for returning tuple rather than FlaxDDPMSchedulerOutput class + + Returns: + [`FlaxDDPMSchedulerOutput`] or `tuple`: [`FlaxDDPMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + if key is None: + key = jax.random.PRNGKey(0) + + if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = jnp.split(model_output, sample.shape[1], axis=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = state.common.alphas_cumprod[t] + alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " + " for the FlaxDDPMScheduler." + ) + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = jnp.clip(pred_original_sample, -1, 1) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * state.common.betas[t]) / beta_prod_t + current_sample_coeff = state.common.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + def random_variance(): + split_key = jax.random.split(key, num=1) + noise = jax.random.normal(split_key, shape=model_output.shape, dtype=self.dtype) + return (self._get_variance(state, t, predicted_variance=predicted_variance) ** 0.5) * noise + + variance = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype)) + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample, state) + + return FlaxDDPMSchedulerOutput(prev_sample=pred_prev_sample, state=state) + + def add_noise( + self, + state: DDPMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def get_velocity( + self, + state: DDPMSchedulerState, + sample: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return get_velocity_common(state.common, sample, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_parallel.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_parallel.py new file mode 100644 index 000000000..ec4fbd4eb --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_parallel.py @@ -0,0 +1,653 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput +class DDPMParallelSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDPMParallelScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, `squaredcos_cap_v2` or `sigmoid`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, + `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample for numerical stability. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, default `"leading"`): + The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample + Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + _is_ode_scheduler = False + + @register_to_config + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.__init__ + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + steps_offset: int = 0, + rescale_betas_zero_snr: int = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.custom_timesteps = False + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.scale_model_input + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.set_timesteps + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + + """ + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + self.custom_timesteps = False + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._get_variance + def _get_variance(self, t, predicted_variance=None, variance_type=None): + prev_t = self.previous_timestep(t) + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t + + # we always take the log of variance, so clamp it to ensure it's not 0 + variance = torch.clamp(variance, min=1e-20) + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = variance + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = torch.log(variance) + variance = torch.exp(0.5 * variance) + elif variance_type == "fixed_large": + variance = current_beta_t + elif variance_type == "fixed_large_log": + # Glide max_log + variance = torch.log(current_beta_t) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = torch.log(variance) + max_log = torch.log(current_beta_t) + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMParallelSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than DDPMParallelSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. + When returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + prev_t = self.previous_timestep(t) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + device = model_output.device + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=device, dtype=model_output.dtype + ) + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise + elif self.variance_type == "learned_range": + variance = self._get_variance(t, predicted_variance=predicted_variance) + variance = torch.exp(0.5 * variance) * variance_noise + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return DDPMParallelSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def batch_step_no_noise( + self, + model_output: torch.FloatTensor, + timesteps: List[int], + sample: torch.FloatTensor, + ) -> torch.FloatTensor: + """ + Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. + Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise + is pre-sampled by the pipeline. + + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timesteps (`List[int]`): + current discrete timesteps in the diffusion chain. This is now a list of integers. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + + Returns: + `torch.FloatTensor`: sample tensor at previous timestep. + """ + t = timesteps + num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + prev_t = t - self.config.num_train_timesteps // num_inference_steps + + t = t.view(-1, *([1] * (model_output.ndim - 1))) + prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + pass + + # 1. compute alphas, betas + self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMParallelScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + return pred_prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py new file mode 100644 index 000000000..ad4e4f414 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022 Pablo Pernías MIT License +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +class DDPMWuerstchenSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DDPMWuerstchenScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + scaler (`float`): .... + s (`float`): .... + """ + + @register_to_config + def __init__( + self, + scaler: float = 1.0, + s: float = 0.008, + ): + self.scaler = scaler + self.s = torch.tensor([s]) + self._init_alpha_cumprod = torch.cos(self.s / (1 + self.s) * torch.pi * 0.5) ** 2 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + def _alpha_cumprod(self, t, device): + if self.scaler > 1: + t = 1 - (1 - t) ** self.scaler + elif self.scaler < 1: + t = t**self.scaler + alpha_cumprod = torch.cos( + (t + self.s.to(device)) / (1 + self.s.to(device)) * torch.pi * 0.5 + ) ** 2 / self._init_alpha_cumprod.to(device) + return alpha_cumprod.clamp(0.0001, 0.9999) + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): input sample + timestep (`int`, optional): current timestep + + Returns: + `torch.FloatTensor`: scaled input sample + """ + return sample + + def set_timesteps( + self, + num_inference_steps: int = None, + timesteps: Optional[List[int]] = None, + device: Union[str, torch.device] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + num_inference_steps (`Dict[float, int]`): + the number of diffusion steps used when generating samples with a pre-trained model. If passed, then + `timesteps` must be `None`. + device (`str` or `torch.device`, optional): + the device to which the timesteps are moved to. {2 / 3: 20, 0.0: 10} + """ + if timesteps is None: + timesteps = torch.linspace(1.0, 0.0, num_inference_steps + 1, device=device) + if not isinstance(timesteps, torch.Tensor): + timesteps = torch.Tensor(timesteps).to(device) + self.timesteps = timesteps + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMWuerstchenSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than DDPMWuerstchenSchedulerOutput class + + Returns: + [`DDPMWuerstchenSchedulerOutput`] or `tuple`: [`DDPMWuerstchenSchedulerOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + dtype = model_output.dtype + device = model_output.device + t = timestep + + prev_t = self.previous_timestep(t) + + alpha_cumprod = self._alpha_cumprod(t, device).view(t.size(0), *[1 for _ in sample.shape[1:]]) + alpha_cumprod_prev = self._alpha_cumprod(prev_t, device).view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) + alpha = alpha_cumprod / alpha_cumprod_prev + + mu = (1.0 / alpha).sqrt() * (sample - (1 - alpha) * model_output / (1 - alpha_cumprod).sqrt()) + + std_noise = randn_tensor(mu.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) + std = ((1 - alpha) * (1.0 - alpha_cumprod_prev) / (1.0 - alpha_cumprod)).sqrt() * std_noise + pred = mu + std * (prev_t != 0).float().view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) + + if not return_dict: + return (pred.to(dtype),) + + return DDPMWuerstchenSchedulerOutput(prev_sample=pred.to(dtype)) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + device = original_samples.device + dtype = original_samples.dtype + alpha_cumprod = self._alpha_cumprod(timesteps, device=device).view( + timesteps.size(0), *[1 for _ in original_samples.shape[1:]] + ) + noisy_samples = alpha_cumprod.sqrt() * original_samples + (1 - alpha_cumprod).sqrt() * noise + return noisy_samples.to(dtype=dtype) + + def __len__(self): + return self.config.num_train_timesteps + + def previous_timestep(self, timestep): + index = (self.timesteps - timestep[0]).abs().argmin().item() + prev_t = self.timesteps[index + 1][None].expand(timestep.shape[0]) + return prev_t diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_deis_multistep.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_deis_multistep.py new file mode 100644 index 000000000..a4af0c272 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_deis_multistep.py @@ -0,0 +1,786 @@ +# Copyright 2024 FLAIR Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2204.13902 and https://github.com/qsh-zh/deis for more info +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `DEISMultistepScheduler` is a fast high order solver for diffusion ordinary differential equations (ODEs). + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DEIS order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + algorithm_type (`str`, defaults to `deis`): + The algorithm type for the solver. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[np.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "deis", + solver_type: str = "logrho", + lower_order_final: bool = True, + use_karras_sigmas: Optional[bool] = False, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DEIS + if algorithm_type not in ["deis"]: + if algorithm_type in ["dpmsolver", "dpmsolver++"]: + self.register_to_config(algorithm_type="deis") + else: + raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") + + if solver_type not in ["logrho"]: + if solver_type in ["midpoint", "heun", "bh1", "bh2"]: + self.register_to_config(solver_type="logrho") + else: + raise NotImplementedError(f"solver type {solver_type} does is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the DEIS algorithm needs. + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + if self.config.prediction_type == "epsilon": + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DEISMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + if self.config.algorithm_type == "deis": + return (sample - alpha_t * x0_pred) / sigma_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + def deis_first_order_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the first-order DEIS (equivalent to DDIM). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "deis": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + else: + raise NotImplementedError("only support log-rho multistep deis now") + return x_t + + def multistep_deis_second_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the second-order multistep DEIS. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1 + + if self.config.algorithm_type == "deis": + + def ind_fn(t, b, c): + # Integrate[(log(t) - log(c)) / (log(b) - log(c)), {t}] + return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c)) + + coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1) + coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0) + + x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1) + return x_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + def multistep_deis_third_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the third-order multistep DEIS. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + rho_t, rho_s0, rho_s1, rho_s2 = ( + sigma_t / alpha_t, + sigma_s0 / alpha_s0, + sigma_s1 / alpha_s1, + sigma_s2 / alpha_s2, + ) + + if self.config.algorithm_type == "deis": + + def ind_fn(t, b, c, d): + # Integrate[(log(t) - log(c))(log(t) - log(d)) / (log(b) - log(c))(log(b) - log(d)), {t}] + numerator = t * ( + np.log(c) * (np.log(d) - np.log(t) + 1) + - np.log(d) * np.log(t) + + np.log(d) + + np.log(t) ** 2 + - 2 * np.log(t) + + 2 + ) + denominator = (np.log(b) - np.log(c)) * (np.log(b) - np.log(d)) + return numerator / denominator + + coef1 = ind_fn(rho_t, rho_s0, rho_s1, rho_s2) - ind_fn(rho_s0, rho_s0, rho_s1, rho_s2) + coef2 = ind_fn(rho_t, rho_s1, rho_s2, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s2, rho_s0) + coef3 = ind_fn(rho_t, rho_s2, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s2, rho_s0, rho_s1) + + x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1 + coef3 * m2) + + return x_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DEIS. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + lower_order_final = ( + (self.step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.deis_first_order_update(model_output, sample=sample) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_deis_second_order_update(self.model_outputs, sample=sample) + else: + prev_sample = self.multistep_deis_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py new file mode 100644 index 000000000..3bbfc65e2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py @@ -0,0 +1,1029 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + use_lu_lambdas (`bool`, *optional*, defaults to `False`): + Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during + the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of + `lambda(t)`. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma + is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + use_lu_lambdas: Optional[bool] = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." + ) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.use_karras_sigmas: + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + elif self.config.use_lu_lambdas: + lambdas = np.flip(log_sigmas.copy()) + lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) + sigmas = np.exp(lambdas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def _convert_to_lu(self, in_lambdas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Lu et al. (2022).""" + + lambda_min: float = in_lambdas[-1].item() + lambda_max: float = in_lambdas[0].item() + + rho = 1.0 # 1.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = lambda_min ** (1 / rho) + max_inv_rho = lambda_max ** (1 / rho) + lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return lambdas + + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def dpm_solver_first_order_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + x_t = ( + (alpha_t / alpha_s) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * (torch.exp(h) - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + variance_noise: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.FloatTensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`LEdits++`]. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32 + ) + elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + noise = variance_noise.to(device=model_output.device, dtype=torch.float32) + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # Cast sample back to expected dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py new file mode 100644 index 000000000..0b48b499d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py @@ -0,0 +1,643 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, +) + + +@flax.struct.dataclass +class DPMSolverMultistepSchedulerState: + common: CommonSchedulerState + alpha_t: jnp.ndarray + sigma_t: jnp.ndarray + lambda_t: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + # running values + model_outputs: Optional[jnp.ndarray] = None + lower_order_nums: Optional[jnp.int32] = None + prev_timestep: Optional[jnp.int32] = None + cur_sample: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + alpha_t: jnp.ndarray, + sigma_t: jnp.ndarray, + lambda_t: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + alpha_t=alpha_t, + sigma_t=sigma_t, + lambda_t=lambda_t, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput): + state: DPMSolverMultistepSchedulerState + + +class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with + the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality + samples, and it can generate quite good samples even in only 10 steps. + + For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 + + Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We + recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. + + We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space + diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic + thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + solver_order (`int`, default `2`): + the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`, + or `v-prediction`. + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to + use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion + models (such as stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++`. + algorithm_type (`str`, default `dpmsolver++`): + the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the + algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in + https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided + sampling (e.g. stable-diffusion). + solver_type (`str`, default `midpoint`): + the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects + the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are + slightly better, so we recommend to use the `midpoint` type. + lower_order_final (`bool`, default `True`): + whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically + find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + timestep_spacing: str = "linspace", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # Currently we only support VP-type noise schedule + alpha_t = jnp.sqrt(common.alphas_cumprod) + sigma_t = jnp.sqrt(1 - common.alphas_cumprod) + lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t) + + # settings for DPM-Solver + if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]: + raise NotImplementedError(f"{self.config.algorithm_type} does is not implemented for {self.__class__}") + if self.config.solver_type not in ["midpoint", "heun"]: + raise NotImplementedError(f"{self.config.solver_type} does is not implemented for {self.__class__}") + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DPMSolverMultistepSchedulerState.create( + common=common, + alpha_t=alpha_t, + sigma_t=sigma_t, + lambda_t=lambda_t, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def set_timesteps( + self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple + ) -> DPMSolverMultistepSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + shape (`Tuple`): + the shape of the samples to be generated. + """ + last_timestep = self.config.num_train_timesteps + if self.config.timestep_spacing == "linspace": + timesteps = ( + jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + # initial running values + + model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype) + lower_order_nums = jnp.int32(0) + prev_timestep = jnp.int32(-1) + cur_sample = jnp.zeros(shape, dtype=self.dtype) + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + model_outputs=model_outputs, + lower_order_nums=lower_order_nums, + prev_timestep=prev_timestep, + cur_sample=cur_sample, + ) + + def convert_model_output( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs. + + DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to + discretize an integral of the data prediction model. So we need to first convert the model output to the + corresponding type to match the algorithm. + + Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or + DPM-Solver++ for both noise prediction model and data prediction model. + + Args: + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the converted model output. + """ + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type == "dpmsolver++": + if self.config.prediction_type == "epsilon": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " + " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + # Dynamic thresholding in https://arxiv.org/abs/2205.11487 + dynamic_max_val = jnp.percentile( + jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim)) + ) + dynamic_max_val = jnp.maximum( + dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val) + ) + x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val + return x0_pred + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type == "dpmsolver": + if self.config.prediction_type == "epsilon": + return model_output + elif self.config.prediction_type == "sample": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + epsilon = (sample - alpha_t * model_output) / sigma_t + return epsilon + elif self.config.prediction_type == "v_prediction": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + epsilon = alpha_t * model_output + sigma_t * sample + return epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " + " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." + ) + + def dpm_solver_first_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the first-order DPM-Solver (equivalent to DDIM). + + See https://arxiv.org/abs/2206.00927 for the detailed derivation. + + Args: + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0 = prev_timestep, timestep + m0 = model_output + lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0] + alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0] + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0 + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0 + return x_t + + def multistep_dpm_solver_second_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output_list: jnp.ndarray, + timestep_list: List[int], + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the second-order multistep DPM-Solver. + + Args: + model_output_list (`List[jnp.ndarray]`): + direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2] + m0, m1 = model_output_list[-1], model_output_list[-2] + lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1] + alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 + ) + return x_t + + def multistep_dpm_solver_third_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output_list: jnp.ndarray, + timestep_list: List[int], + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the third-order multistep DPM-Solver. + + Args: + model_output_list (`List[jnp.ndarray]`): + direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3] + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + lambda_t, lambda_s0, lambda_s1, lambda_s2 = ( + state.lambda_t[t], + state.lambda_t[s0], + state.lambda_t[s1], + state.lambda_t[s2], + ) + alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def step( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process + from the learned model outputs (most often the predicted noise). + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class + + Returns: + [`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1]) + + model_output = self.convert_model_output(state, model_output, timestep, sample) + + model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0) + model_outputs_new = model_outputs_new.at[-1].set(model_output) + state = state.replace( + model_outputs=model_outputs_new, + prev_timestep=prev_timestep, + cur_sample=sample, + ) + + def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + return self.dpm_solver_first_order_update( + state, + state.model_outputs[-1], + state.timesteps[step_index], + state.prev_timestep, + state.cur_sample, + ) + + def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]]) + return self.multistep_dpm_solver_second_order_update( + state, + state.model_outputs, + timestep_list, + state.prev_timestep, + state.cur_sample, + ) + + def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + timestep_list = jnp.array( + [ + state.timesteps[step_index - 2], + state.timesteps[step_index - 1], + state.timesteps[step_index], + ] + ) + return self.multistep_dpm_solver_third_order_update( + state, + state.model_outputs, + timestep_list, + state.prev_timestep, + state.cur_sample, + ) + + step_2_output = step_2(state) + step_3_output = step_3(state) + + if self.config.solver_order == 2: + return step_2_output + elif self.config.lower_order_final and len(state.timesteps) < 15: + return jax.lax.select( + state.lower_order_nums < 2, + step_2_output, + jax.lax.select( + step_index == len(state.timesteps) - 2, + step_2_output, + step_3_output, + ), + ) + else: + return jax.lax.select( + state.lower_order_nums < 2, + step_2_output, + step_3_output, + ) + + step_1_output = step_1(state) + step_23_output = step_23(state) + + if self.config.solver_order == 1: + prev_sample = step_1_output + + elif self.config.lower_order_final and len(state.timesteps) < 15: + prev_sample = jax.lax.select( + state.lower_order_nums < 1, + step_1_output, + jax.lax.select( + step_index == len(state.timesteps) - 1, + step_1_output, + step_23_output, + ), + ) + + else: + prev_sample = jax.lax.select( + state.lower_order_nums < 1, + step_1_output, + step_23_output, + ) + + state = state.replace( + lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order), + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state) + + def scale_model_input( + self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def add_noise( + self, + state: DPMSolverMultistepSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py new file mode 100644 index 000000000..318b9ed54 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py @@ -0,0 +1,921 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverMultistepInverseScheduler` is the reverse scheduler of [`DPMSolverMultistepScheduler`]. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32).copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.use_karras_sigmas = use_karras_sigmas + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.lambda_min_clipped).item() + self.noisiest_timestep = self.config.num_train_timesteps - 1 - clipped_idx + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.noisiest_timestep, num_inference_steps + 1).round()[:-1].copy().astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = (self.noisiest_timestep + 1) // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.noisiest_timestep + 1, 0, -step_ratio).round()[::-1].copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', " + "'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + timesteps = timesteps.copy().astype(np.int64) + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_max = ( + (1 - self.alphas_cumprod[self.noisiest_timestep]) / self.alphas_cumprod[self.noisiest_timestep] + ) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_max]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + + # when num_inference_steps == num_train_timesteps, we can end up with + # duplicates in timesteps. + _, unique_indices = np.unique(timesteps, return_index=True) + timesteps = timesteps[np.sort(unique_indices)] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.convert_model_output + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.dpm_solver_first_order_update + def dpm_solver_first_order_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + x_t = ( + (alpha_t / alpha_s) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_second_order_update + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * (torch.exp(h) - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_third_order_update + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def _init_step_index(self, timestep): + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + + index_candidates = (self.timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + self._step_index = step_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + variance_noise: Optional[torch.FloatTensor] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.FloatTensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + noise = variance_noise + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.scale_model_input + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + step_indices = [] + for timestep in timesteps: + index_candidates = (schedule_timesteps == timestep).nonzero() + if len(index_candidates) == 0: + step_index = len(schedule_timesteps) - 1 + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + step_indices.append(step_index) + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_sde.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_sde.py new file mode 100644 index 000000000..4721933ae --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_sde.py @@ -0,0 +1,557 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torchsde + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +class BatchedBrownianTree: + """A wrapper around torchsde.BrownianTree that enables batches of entropy.""" + + def __init__(self, x, t0, t1, seed=None, **kwargs): + t0, t1, self.sign = self.sort(t0, t1) + w0 = kwargs.get("w0", torch.zeros_like(x)) + if seed is None: + seed = torch.randint(0, 2**63 - 1, []).item() + self.batched = True + try: + assert len(seed) == x.shape[0] + w0 = w0[0] + except TypeError: + seed = [seed] + self.batched = False + self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed] + + @staticmethod + def sort(a, b): + return (a, b, 1) if a < b else (b, a, -1) + + def __call__(self, t0, t1): + t0, t1, sign = self.sort(t0, t1) + w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign) + return w if self.batched else w[0] + + +class BrownianTreeNoiseSampler: + """A noise sampler backed by a torchsde.BrownianTree. + + Args: + x (Tensor): The tensor whose shape, device and dtype to use to generate + random samples. + sigma_min (float): The low end of the valid interval. + sigma_max (float): The high end of the valid interval. + seed (int or List[int]): The random seed. If a list of seeds is + supplied instead of a single integer, then the noise sampler will use one BrownianTree per batch item, each + with its own seed. + transform (callable): A function that maps sigma to the sampler's + internal timestep. + """ + + def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x): + self.transform = transform + t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max)) + self.tree = BatchedBrownianTree(x, t0, t1, seed) + + def __call__(self, sigma, sigma_next): + t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next)) + return self.tree(t0, t1) / (t1 - t0).abs().sqrt() + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): + """ + DPMSolverSDEScheduler implements the stochastic sampler from the [Elucidating the Design Space of Diffusion-Based + Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + noise_sampler_seed (`int`, *optional*, defaults to `None`): + The random seed to use for the noise sampler. If `None`, a random seed is generated. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self.use_karras_sigmas = use_karras_sigmas + self.noise_sampler = None + self.noise_sampler_seed = noise_sampler_seed + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sigma_input = sigma if self.state_in_first_order else self.mid_point_sigma + sample = sample / ((sigma_input**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(float) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) + + timesteps = torch.from_numpy(timesteps) + second_order_timesteps = torch.from_numpy(second_order_timesteps) + timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) + timesteps[1::2] = second_order_timesteps + + if str(device).startswith("mps"): + # mps does not support float64 + self.timesteps = timesteps.to(device, dtype=torch.float32) + else: + self.timesteps = timesteps.to(device=device) + + # empty first order variables + self.sample = None + self.mid_point_sigma = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.noise_sampler = None + + def _second_order_timesteps(self, sigmas, log_sigmas): + def sigma_fn(_t): + return np.exp(-_t) + + def t_fn(_sigma): + return -np.log(_sigma) + + midpoint_ratio = 0.5 + t = t_fn(sigmas) + delta_time = np.diff(t) + t_proposed = t[:-1] + delta_time * midpoint_ratio + sig_proposed = sigma_fn(t_proposed) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sig_proposed]) + return timesteps + + # copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = in_sigmas[-1].item() + sigma_max: float = in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, self.num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.sample is None + + def step( + self, + model_output: Union[torch.FloatTensor, np.ndarray], + timestep: Union[float, torch.FloatTensor], + sample: Union[torch.FloatTensor, np.ndarray], + return_dict: bool = True, + s_noise: float = 1.0, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor` or `np.ndarray`): + The direct output from learned diffusion model. + timestep (`float` or `torch.FloatTensor`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor` or `np.ndarray`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + s_noise (`float`, *optional*, defaults to 1.0): + Scaling factor for noise added to the sample. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + # Create a noise sampler if it hasn't been created yet + if self.noise_sampler is None: + min_sigma, max_sigma = self.sigmas[self.sigmas > 0].min(), self.sigmas.max() + self.noise_sampler = BrownianTreeNoiseSampler(sample, min_sigma, max_sigma, self.noise_sampler_seed) + + # Define functions to compute sigma and t from each other + def sigma_fn(_t: torch.FloatTensor) -> torch.FloatTensor: + return _t.neg().exp() + + def t_fn(_sigma: torch.FloatTensor) -> torch.FloatTensor: + return _sigma.log().neg() + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order + sigma = self.sigmas[self.step_index - 1] + sigma_next = self.sigmas[self.step_index] + + # Set the midpoint and step size for the current step + midpoint_ratio = 0.5 + t, t_next = t_fn(sigma), t_fn(sigma_next) + delta_time = t_next - t + t_proposed = t + delta_time * midpoint_ratio + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if sigma_next == 0: + derivative = (sample - pred_original_sample) / sigma + dt = sigma_next - sigma + prev_sample = sample + derivative * dt + else: + if self.state_in_first_order: + t_next = t_proposed + else: + sample = self.sample + + sigma_from = sigma_fn(t) + sigma_to = sigma_fn(t_next) + sigma_up = min(sigma_to, (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5) + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + ancestral_t = t_fn(sigma_down) + prev_sample = (sigma_fn(ancestral_t) / sigma_fn(t)) * sample - ( + t - ancestral_t + ).expm1() * pred_original_sample + prev_sample = prev_sample + self.noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * sigma_up + + if self.state_in_first_order: + # store for 2nd order step + self.sample = sample + self.mid_point_sigma = sigma_fn(t_next) + else: + # free for "first order mode" + self.sample = None + self.mid_point_sigma = None + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py new file mode 100644 index 000000000..7bb201de4 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py @@ -0,0 +1,979 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate, logging +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverSinglestepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver` or `dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + final_sigmas_type (`str`, *optional*, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma + is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[np.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + ): + if algorithm_type == "dpmsolver": + deprecation_message = "algorithm_type `dpmsolver` is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types=dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") + + if algorithm_type != "dpmsolver++" and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please chooose `sigma_min` instead." + ) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.sample = None + self.order_list = self.get_order_list(num_train_timesteps) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + def get_order_list(self, num_inference_steps: int) -> List[int]: + """ + Computes the solver order at each time step. + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + steps = num_inference_steps + order = self.config.solver_order + if order > 3: + raise ValueError("Order > 3 is not supported by this scheduler") + if self.config.lower_order_final: + if order == 3: + if steps % 3 == 0: + orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] + elif steps % 3 == 1: + orders = [1, 2, 3] * (steps // 3) + [1] + else: + orders = [1, 2, 3] * (steps // 3) + [1, 2] + elif order == 2: + if steps % 2 == 0: + orders = [1, 2] * (steps // 2 - 1) + [1, 1] + else: + orders = [1, 2] * (steps // 2) + [1] + elif order == 1: + orders = [1] * steps + else: + if order == 3: + orders = [1, 2, 3] * (steps // 3) + elif order == 2: + orders = [1, 2] * (steps // 2) + elif order == 1: + orders = [1] * steps + return orders + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1 - clipped_idx, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f" `final_sigmas_type` must be one of `sigma_min` or `zero`, but got {self.config.final_sigmas_type}" + ) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + self.model_outputs = [None] * self.config.solver_order + self.sample = None + + if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: + logger.warning( + "Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=False`." + ) + self.register_to_config(lower_order_final=True) + + if not self.config.lower_order_final and self.config.final_sigmas_type == "zero": + logger.warning( + " `last_sigmas_type='zero'` is not supported for `lower_order_final=False`. Changing scheduler {self.config} to have `lower_order_final` set to True." + ) + self.register_to_config(lower_order_final=True) + + self.order_list = self.get_order_list(num_inference_steps) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type == "dpmsolver++": + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverSinglestepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type == "dpmsolver": + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned_range"]: + model_output = model_output[:, :3] + return model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + return epsilon + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + return epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverSinglestepScheduler." + ) + + def dpm_solver_first_order_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + return x_t + + def singlestep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the second-order singlestep DPMSolver that computes the solution at time `prev_timestep` from the + time `timestep_list[-2]`. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s1, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m1, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s1) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s1) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s1) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s1) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + return x_t + + def singlestep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the third-order singlestep DPMSolver that computes the solution at time `prev_timestep` from the + time `timestep_list[-3]`. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s2, lambda_s0 - lambda_s2, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m2 + D1_0, D1_1 = (1.0 / r1) * (m1 - m2), (1.0 / r0) * (m0 - m2) + D1 = (r0 * D1_0 - r1 * D1_1) / (r0 - r1) + D2 = 2.0 * (D1_1 - D1_0) / (r0 - r1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s2) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1_1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s2) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s2) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1_1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s2) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def singlestep_dpm_solver_update( + self, + model_output_list: List[torch.FloatTensor], + *args, + sample: torch.FloatTensor = None, + order: int = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the singlestep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + order (`int`): + The solver order at this step. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing `order` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if order == 1: + return self.dpm_solver_first_order_update(model_output_list[-1], sample=sample) + elif order == 2: + return self.singlestep_dpm_solver_second_order_update(model_output_list, sample=sample) + elif order == 3: + return self.singlestep_dpm_solver_third_order_update(model_output_list, sample=sample) + else: + raise ValueError(f"Order must be 1, 2, 3, got {order}") + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the singlestep DPMSolver. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + order = self.order_list[self.step_index] + + # For img2img denoising might start with order>1 which is not possible + # In this case make sure that the first two steps are both order=1 + while self.model_outputs[-order] is None: + order -= 1 + + # For single-step solvers, we use the initial value at each time with order = 1. + if order == 1: + self.sample = sample + + prev_sample = self.singlestep_dpm_solver_update(self.model_outputs, sample=self.sample, order=order) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py new file mode 100644 index 000000000..5fea89bb8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py @@ -0,0 +1,683 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver and https://github.com/NVlabs/edm + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + Implements DPMSolverMultistepScheduler in EDM formulation as presented in Karras et al. 2022 [1]. + `EDMDPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + sigma_min (`float`, *optional*, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. This was set to 0.002 in the EDM paper [1]; a reasonable + range is [0, 10]. + sigma_max (`float`, *optional*, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. This was set to 80.0 in the EDM paper [1]; a reasonable + range is [0.2, 80.0]. + sigma_data (`float`, *optional*, defaults to 0.5): + The standard deviation of the data distribution. This is set to 0.5 in the EDM paper [1]. + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver++` or `sde-dpmsolver++`. The + `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma + is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + num_train_timesteps: int = 1000, + prediction_type: str = "epsilon", + rho: float = 7.0, + solver_order: int = 2, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + ): + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." + ) + + ramp = torch.linspace(0, 1, num_train_timesteps) + sigmas = self._compute_sigmas(ramp) + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + # setable values + self.num_inference_steps = None + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + return (self.config.sigma_max**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_inputs + def precondition_inputs(self, sample, sigma): + c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + scaled_sample = sample * c_in + return scaled_sample + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_noise + def precondition_noise(self, sigma): + if not isinstance(sigma, torch.Tensor): + sigma = torch.tensor([sigma]) + + c_noise = 0.25 * torch.log(sigma) + + return c_noise + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_outputs + def precondition_outputs(self, sample, model_output, sigma): + sigma_data = self.config.sigma_data + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + + if self.config.prediction_type == "epsilon": + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + elif self.config.prediction_type == "v_prediction": + c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + else: + raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") + + denoised = c_skip * sample + c_out * model_output + + return denoised + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.scale_model_input + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = self.precondition_inputs(sample, sigma) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + self.num_inference_steps = num_inference_steps + + ramp = np.linspace(0, 1, self.num_inference_steps) + sigmas = self._compute_sigmas(ramp) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + self.timesteps = self.precondition_noise(sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = self.config.sigma_min + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Taken from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _compute_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = torch.tensor(1) # Inputs are pre-scaled before going into unet, so alpha_t = 1 + sigma_t = sigma + + return alpha_t, sigma_t + + def convert_model_output( + self, + model_output: torch.FloatTensor, + sample: torch.FloatTensor = None, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + sigma = self.sigmas[self.step_index] + x0_pred = self.precondition_outputs(sample, model_output, sigma) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + def dpm_solver_first_order_update( + self, + model_output: torch.FloatTensor, + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.FloatTensor], + sample: torch.FloatTensor = None, + noise: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.FloatTensor], + sample: torch.FloatTensor = None, + ) -> torch.FloatTensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.FloatTensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.FloatTensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.algorithm_type == "sde-dpmsolver++": + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_euler.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_euler.py new file mode 100644 index 000000000..e62a486cc --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_edm_euler.py @@ -0,0 +1,381 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete +class EDMEulerSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +class EDMEulerScheduler(SchedulerMixin, ConfigMixin): + """ + Implements the Euler scheduler in EDM formulation as presented in Karras et al. 2022 [1]. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + sigma_min (`float`, *optional*, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. This was set to 0.002 in the EDM paper [1]; a reasonable + range is [0, 10]. + sigma_max (`float`, *optional*, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. This was set to 80.0 in the EDM paper [1]; a reasonable + range is [0.2, 80.0]. + sigma_data (`float`, *optional*, defaults to 0.5): + The standard deviation of the data distribution. This is set to 0.5 in the EDM paper [1]. + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + rho (`float`, *optional*, defaults to 7.0): + The rho parameter used for calculating the Karras sigma schedule, which is set to 7.0 in the EDM paper [1]. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + num_train_timesteps: int = 1000, + prediction_type: str = "epsilon", + rho: float = 7.0, + ): + # setable values + self.num_inference_steps = None + + ramp = torch.linspace(0, 1, num_train_timesteps) + sigmas = self._compute_sigmas(ramp) + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + return (self.config.sigma_max**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def precondition_inputs(self, sample, sigma): + c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + scaled_sample = sample * c_in + return scaled_sample + + def precondition_noise(self, sigma): + if not isinstance(sigma, torch.Tensor): + sigma = torch.tensor([sigma]) + + c_noise = 0.25 * torch.log(sigma) + + return c_noise + + def precondition_outputs(self, sample, model_output, sigma): + sigma_data = self.config.sigma_data + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + + if self.config.prediction_type == "epsilon": + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + elif self.config.prediction_type == "v_prediction": + c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + else: + raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") + + denoised = c_skip * sample + c_out * model_output + + return denoised + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = self.precondition_inputs(sample, sigma) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + ramp = np.linspace(0, 1, self.num_inference_steps) + sigmas = self._compute_sigmas(ramp) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Taken from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _compute_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EDMEulerSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EDMEulerScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + pred_original_sample = self.precondition_outputs(sample, model_output, sigma_hat) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma_hat + + dt = self.sigmas[self.step_index + 1] - sigma_hat + + prev_sample = sample + derivative * dt + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EDMEulerSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py new file mode 100644 index 000000000..dfab59272 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py @@ -0,0 +1,481 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerAncestralDiscrete +class EulerAncestralDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Ancestral sampling with Euler method steps. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ + ::-1 + ].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + self.timesteps = torch.from_numpy(timesteps).to(device=device) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + sigma_from = self.sigmas[self.step_index] + sigma_to = self.sigmas[self.step_index + 1] + sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5 + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + + dt = sigma_down - sigma + + prev_sample = sample + derivative * dt + + device = model_output.device + noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) + + prev_sample = prev_sample + noise * sigma_up + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EulerAncestralDiscreteSchedulerOutput( + prev_sample=prev_sample, pred_original_sample=pred_original_sample + ) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete.py new file mode 100644 index 000000000..22258abc8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete.py @@ -0,0 +1,576 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete +class EulerDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Euler scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + interpolation_type(`str`, defaults to `"linear"`, *optional*): + The interpolation type to compute intermediate sigmas for the scheduler denoising steps. Should be on of + `"linear"` or `"log_linear"`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + interpolation_type: str = "linear", + use_karras_sigmas: Optional[bool] = False, + sigma_min: Optional[float] = None, + sigma_max: Optional[float] = None, + timestep_spacing: str = "linspace", + timestep_type: str = "discrete", # can be "discrete" or "continuous" + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + sigmas = (((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5).flip(0) + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() + timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) + + # setable values + self.num_inference_steps = None + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if timestep_type == "continuous" and prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]) + else: + self.timesteps = timesteps + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self.is_scale_input_called = False + self.use_karras_sigmas = use_karras_sigmas + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + max_sigma = max(self.sigmas) if isinstance(self.sigmas, list) else self.sigmas.max() + if self.config.timestep_spacing in ["linspace", "trailing"]: + return max_sigma + + return (max_sigma**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ + ::-1 + ].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.interpolation_type == "linear": + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + elif self.config.interpolation_type == "log_linear": + sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp().numpy() + else: + raise ValueError( + f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either" + " 'linear' or 'log_linear'" + ) + + if self.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if self.config.timestep_type == "continuous" and self.config.prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]).to(device=device) + else: + self.timesteps = torch.from_numpy(timesteps.astype(np.float32)).to(device=device) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + # NOTE: "original_sample" should not be an expected prediction_type but is left in for + # backwards compatibility + if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma_hat * model_output + elif self.config.prediction_type == "v_prediction": + # denoised = model_output * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma_hat + + dt = self.sigmas[self.step_index + 1] - sigma_hat + + prev_sample = sample + derivative * dt + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete_flax.py new file mode 100644 index 000000000..55b0c2460 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_euler_discrete_flax.py @@ -0,0 +1,265 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, +) + + +@flax.struct.dataclass +class EulerDiscreteSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + sigmas: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create( + cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray + ): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) + + +@dataclass +class FlaxEulerDiscreteSchedulerOutput(FlaxSchedulerOutput): + state: EulerDiscreteSchedulerState + + +class FlaxEulerDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Euler scheduler (Algorithm 2) from Karras et al. (2022) https://arxiv.org/abs/2206.00364. . Based on the original + k-diffusion implementation by Katherine Crowson: + https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51 + + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> EulerDiscreteSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 + sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + init_noise_sigma = sigmas.max() + else: + init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 + + return EulerDiscreteSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + sigmas=sigmas, + ) + + def scale_model_input(self, state: EulerDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: + """ + Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + timestep (`int`): + current discrete timestep in the diffusion chain. + + Returns: + `jnp.ndarray`: scaled input sample + """ + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, state: EulerDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> EulerDiscreteSchedulerState: + """ + Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + if self.config.timestep_spacing == "linspace": + timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // num_inference_steps + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) + timesteps += 1 + else: + raise ValueError( + f"timestep_spacing must be one of ['linspace', 'leading'], got {self.config.timestep_spacing}" + ) + + sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 + sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + init_noise_sigma = sigmas.max() + else: + init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 + + return state.replace( + timesteps=timesteps, + sigmas=sigmas, + num_inference_steps=num_inference_steps, + init_noise_sigma=init_noise_sigma, + ) + + def step( + self, + state: EulerDiscreteSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxEulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + order: coefficient for multi-step inference. + return_dict (`bool`): option for returning tuple rather than FlaxEulerDiscreteScheduler class + + Returns: + [`FlaxEulerDiscreteScheduler`] or `tuple`: [`FlaxEulerDiscreteScheduler`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + + # dt = sigma_down - sigma + dt = state.sigmas[step_index + 1] - sigma + + prev_sample = sample + derivative * dt + + if not return_dict: + return (prev_sample, state) + + return FlaxEulerDiscreteSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: EulerDiscreteSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + sigma = state.sigmas[timesteps].flatten() + sigma = broadcast_to_shape_from_left(sigma, noise.shape) + + noisy_samples = original_samples + noise * sigma + + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_heun_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_heun_discrete.py new file mode 100644 index 000000000..fc955ac49 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_heun_discrete.py @@ -0,0 +1,482 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Scheduler with Heun steps for discrete beta schedules. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + use_karras_sigmas: Optional[bool] = False, + clip_sample: Optional[bool] = False, + clip_sample_range: float = 1.0, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine") + elif beta_schedule == "exp": + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="exp") + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self.use_karras_sigmas = use_karras_sigmas + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) + + timesteps = torch.from_numpy(timesteps) + timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) + + self.timesteps = timesteps.to(device=device) + + # empty dt and derivative + self.prev_derivative = None + self.dt = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.dt is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: Union[torch.FloatTensor, np.ndarray], + timestep: Union[float, torch.FloatTensor], + sample: Union[torch.FloatTensor, np.ndarray], + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order / Heun's method + sigma = self.sigmas[self.step_index - 1] + sigma_next = self.sigmas[self.step_index] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_next + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_next + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_next - sigma_hat + + # store for 2nd order step + self.prev_derivative = derivative + self.dt = dt + self.sample = sample + else: + # 2. 2nd order / Heun's method + derivative = (sample - pred_original_sample) / sigma_next + derivative = (self.prev_derivative + derivative) / 2 + + # 3. take prev timestep & sample + dt = self.dt + sample = self.sample + + # free dt and derivative + # Note, this puts the scheduler in "first order mode" + self.prev_derivative = None + self.dt = None + self.sample = None + + prev_sample = sample + derivative * dt + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ipndm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ipndm.py new file mode 100644 index 000000000..583afa4d2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_ipndm.py @@ -0,0 +1,224 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +class IPNDMScheduler(SchedulerMixin, ConfigMixin): + """ + A fourth-order Improved Pseudo Linear Multistep scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + """ + + order = 1 + + @register_to_config + def __init__( + self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None + ): + # set `betas`, `alphas`, `timesteps` + self.set_timesteps(num_train_timesteps) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + # running values + self.ets = [] + self._step_index = None + self._begin_index = None + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1] + steps = torch.cat([steps, torch.tensor([0.0])]) + + if self.config.trained_betas is not None: + self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32) + else: + self.betas = torch.sin(steps * math.pi / 2) ** 2 + + self.alphas = (1.0 - self.betas**2) ** 0.5 + + timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1] + self.timesteps = timesteps.to(device) + + self.ets = [] + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the linear multistep method. It performs one forward pass multiple times to approximate the solution. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + if self.step_index is None: + self._init_step_index(timestep) + + timestep_index = self.step_index + prev_timestep_index = self.step_index + 1 + + ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] + self.ets.append(ets) + + if len(self.ets) == 1: + ets = self.ets[-1] + elif len(self.ets) == 2: + ets = (3 * self.ets[-1] - self.ets[-2]) / 2 + elif len(self.ets) == 3: + ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 + else: + ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) + + prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets): + alpha = self.alphas[timestep_index] + sigma = self.betas[timestep_index] + + next_alpha = self.alphas[prev_timestep_index] + next_sigma = self.betas[prev_timestep_index] + + pred = (sample - sigma * ets) / max(alpha, 1e-8) + prev_sample = next_alpha * pred + ets * next_sigma + + return prev_sample + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py new file mode 100644 index 000000000..9521c9c95 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py @@ -0,0 +1,508 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + KDPM2DiscreteScheduler with ancestral sampling is inspired by the DPMSolver2 and Algorithm 2 from the [Elucidating + the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + else: + sigma = self.sigmas_interpol[self.step_index - 1] + + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + + self.log_sigmas = torch.from_numpy(log_sigmas).to(device) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + + # compute up and down sigmas + sigmas_next = sigmas.roll(-1) + sigmas_next[-1] = 0.0 + sigmas_up = (sigmas_next**2 * (sigmas**2 - sigmas_next**2) / sigmas**2) ** 0.5 + sigmas_down = (sigmas_next**2 - sigmas_up**2) ** 0.5 + sigmas_down[-1] = 0.0 + + # compute interpolated sigmas + sigmas_interpol = sigmas.log().lerp(sigmas_down.log(), 0.5).exp() + sigmas_interpol[-2:] = 0.0 + + # set sigmas + self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) + self.sigmas_interpol = torch.cat( + [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]] + ) + self.sigmas_up = torch.cat([sigmas_up[:1], sigmas_up[1:].repeat_interleave(2), sigmas_up[-1:]]) + self.sigmas_down = torch.cat([sigmas_down[:1], sigmas_down[1:].repeat_interleave(2), sigmas_down[-1:]]) + + if str(device).startswith("mps"): + timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) + else: + timesteps = torch.from_numpy(timesteps).to(device) + + sigmas_interpol = sigmas_interpol.cpu() + log_sigmas = self.log_sigmas.cpu() + timesteps_interpol = np.array( + [self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol] + ) + + timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) + interleaved_timesteps = torch.stack((timesteps_interpol[:-2, None], timesteps[1:, None]), dim=-1).flatten() + + self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) + + self.sample = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.sample is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: Union[torch.FloatTensor, np.ndarray], + timestep: Union[float, torch.FloatTensor], + sample: Union[torch.FloatTensor, np.ndarray], + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_interpol = self.sigmas_interpol[self.step_index] + sigma_up = self.sigmas_up[self.step_index] + sigma_down = self.sigmas_down[self.step_index - 1] + else: + # 2nd order / KPDM2's method + sigma = self.sigmas[self.step_index - 1] + sigma_interpol = self.sigmas_interpol[self.step_index - 1] + sigma_up = self.sigmas_up[self.step_index - 1] + sigma_down = self.sigmas_down[self.step_index - 1] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + device = model_output.device + noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_interpol - sigma_hat + + # store for 2nd order step + self.sample = sample + self.dt = dt + prev_sample = sample + derivative * dt + else: + # DPM-Solver-2 + # 2. Convert to an ODE derivative for 2nd order + derivative = (sample - pred_original_sample) / sigma_interpol + # 3. delta timestep + dt = sigma_down - sigma_hat + + sample = self.sample + self.sample = None + + prev_sample = sample + derivative * dt + prev_sample = prev_sample + noise * sigma_up + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py new file mode 100644 index 000000000..5be07b6da --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py @@ -0,0 +1,483 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class KDPM2DiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + KDPM2DiscreteScheduler is inspired by the DPMSolver2 and Algorithm 2 from the [Elucidating the Design Space of + Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + else: + sigma = self.sigmas_interpol[self.step_index] + + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + + self.log_sigmas = torch.from_numpy(log_sigmas).to(device=device) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + + # interpolate sigmas + sigmas_interpol = sigmas.log().lerp(sigmas.roll(1).log(), 0.5).exp() + + self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) + self.sigmas_interpol = torch.cat( + [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]] + ) + + timesteps = torch.from_numpy(timesteps).to(device) + + # interpolate timesteps + sigmas_interpol = sigmas_interpol.cpu() + log_sigmas = self.log_sigmas.cpu() + timesteps_interpol = np.array( + [self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol] + ) + timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) + interleaved_timesteps = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1).flatten() + + self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) + + self.sample = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def state_in_first_order(self): + return self.sample is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def step( + self, + model_output: Union[torch.FloatTensor, np.ndarray], + timestep: Union[float, torch.FloatTensor], + sample: Union[torch.FloatTensor, np.ndarray], + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_interpol = self.sigmas_interpol[self.step_index + 1] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order / KDPM2's method + sigma = self.sigmas[self.step_index - 1] + sigma_interpol = self.sigmas_interpol[self.step_index] + sigma_next = self.sigmas[self.step_index] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_interpol - sigma_hat + + # store for 2nd order step + self.sample = sample + else: + # DPM-Solver-2 + # 2. Convert to an ODE derivative for 2nd order + derivative = (sample - pred_original_sample) / sigma_interpol + + # 3. delta timestep + dt = sigma_next - sigma_hat + + sample = self.sample + self.sample = None + + # upon completion increase step index by one + self._step_index += 1 + + prev_sample = sample + derivative * dt + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_karras_ve_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_karras_ve_flax.py new file mode 100644 index 000000000..4d099604a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_karras_ve_flax.py @@ -0,0 +1,238 @@ +# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp +from jax import random + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils_flax import FlaxSchedulerMixin + + +@flax.struct.dataclass +class KarrasVeSchedulerState: + # setable values + num_inference_steps: Optional[int] = None + timesteps: Optional[jnp.ndarray] = None + schedule: Optional[jnp.ndarray] = None # sigma(t_i) + + @classmethod + def create(cls): + return cls() + + +@dataclass +class FlaxKarrasVeOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Derivative of predicted original image sample (x_0). + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + """ + + prev_sample: jnp.ndarray + derivative: jnp.ndarray + state: KarrasVeSchedulerState + + +class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and + the VE column of Table 1 from [1] for reference. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic + differential equations." https://arxiv.org/abs/2011.13456 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of + Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the + optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. + + Args: + sigma_min (`float`): minimum noise magnitude + sigma_max (`float`): maximum noise magnitude + s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. + A reasonable range is [1.000, 1.011]. + s_churn (`float`): the parameter controlling the overall amount of stochasticity. + A reasonable range is [0, 100]. + s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). + A reasonable range is [0, 10]. + s_max (`float`): the end value of the sigma range where we add noise. + A reasonable range is [0.2, 80]. + """ + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + sigma_min: float = 0.02, + sigma_max: float = 100, + s_noise: float = 1.007, + s_churn: float = 80, + s_min: float = 0.05, + s_max: float = 50, + ): + pass + + def create_state(self): + return KarrasVeSchedulerState.create() + + def set_timesteps( + self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> KarrasVeSchedulerState: + """ + Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`KarrasVeSchedulerState`): + the `FlaxKarrasVeScheduler` state data class. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + + """ + timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() + schedule = [ + ( + self.config.sigma_max**2 + * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) + ) + for i in timesteps + ] + + return state.replace( + num_inference_steps=num_inference_steps, + schedule=jnp.array(schedule, dtype=jnp.float32), + timesteps=timesteps, + ) + + def add_noise_to_input( + self, + state: KarrasVeSchedulerState, + sample: jnp.ndarray, + sigma: float, + key: jax.Array, + ) -> Tuple[jnp.ndarray, float]: + """ + Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a + higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. + + TODO Args: + """ + if self.config.s_min <= sigma <= self.config.s_max: + gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) + else: + gamma = 0 + + # sample eps ~ N(0, S_noise^2 * I) + key = random.split(key, num=1) + eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) + sigma_hat = sigma + gamma * sigma + sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) + + return sample_hat, sigma_hat + + def step( + self, + state: KarrasVeSchedulerState, + model_output: jnp.ndarray, + sigma_hat: float, + sigma_prev: float, + sample_hat: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxKarrasVeOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO + return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class + + Returns: + [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion + chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is + True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + """ + + pred_original_sample = sample_hat + sigma_hat * model_output + derivative = (sample_hat - pred_original_sample) / sigma_hat + sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative + + if not return_dict: + return (sample_prev, derivative, state) + + return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) + + def step_correct( + self, + state: KarrasVeSchedulerState, + model_output: jnp.ndarray, + sigma_hat: float, + sigma_prev: float, + sample_hat: jnp.ndarray, + sample_prev: jnp.ndarray, + derivative: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxKarrasVeOutput, Tuple]: + """ + Correct the predicted sample based on the output model_output of the network. TODO complete description + + Args: + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO + sample_prev (`torch.FloatTensor` or `np.ndarray`): TODO + derivative (`torch.FloatTensor` or `np.ndarray`): TODO + return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class + + Returns: + prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO + + """ + pred_original_sample = sample_prev + sigma_prev * model_output + derivative_corr = (sample_prev - pred_original_sample) / sigma_prev + sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) + + if not return_dict: + return (sample_prev, derivative, state) + + return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) + + def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): + raise NotImplementedError() diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lcm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lcm.py new file mode 100644 index 000000000..846558b38 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lcm.py @@ -0,0 +1,660 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class LCMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + denoised: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor: + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class LCMScheduler(SchedulerMixin, ConfigMixin): + """ + `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config + attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be + accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving + functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + original_inference_steps (`int`, *optional*, defaults to 50): + The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we + will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + timestep_scaling (`float`, defaults to 10.0): + The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions + `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation + error at the default of `10.0` is already pretty small). + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.012, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + original_inference_steps: int = 50, + clip_sample: bool = False, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + timestep_scaling: float = 10.0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + self.custom_timesteps = False + + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def step_index(self): + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + original_inference_steps: Optional[int] = None, + timesteps: Optional[List[int]] = None, + strength: int = 1.0, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`, *optional*): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + original_inference_steps (`int`, *optional*): + The original number of inference steps, which will be used to generate a linearly-spaced timestep + schedule (which is different from the standard `diffusers` implementation). We will then take + `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as + our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps on the training/distillation timestep + schedule is used. If `timesteps` is passed, `num_inference_steps` must be `None`. + """ + # 0. Check inputs + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + # 1. Calculate the LCM original training/distillation timestep schedule. + original_steps = ( + original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps + ) + + if original_steps > self.config.num_train_timesteps: + raise ValueError( + f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + # LCM Timesteps Setting + # The skipping step parameter k from the paper. + k = self.config.num_train_timesteps // original_steps + # LCM Training/Distillation Steps Schedule + # Currently, only a linearly-spaced schedule is supported (same as in the LCM distillation scripts). + lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 + + # 2. Calculate the LCM inference timestep schedule. + if timesteps is not None: + # 2.1 Handle custom timestep schedules. + train_timesteps = set(lcm_origin_timesteps) + non_train_timesteps = [] + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[i] not in train_timesteps: + non_train_timesteps.append(timesteps[i]) + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + # Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1 + if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: + logger.warning( + f"The first timestep on the custom timestep schedule is {timesteps[0]}, not" + f" `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get" + f" unexpected results when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule contains timesteps not on original timestep schedule + if non_train_timesteps: + logger.warning( + f"The custom timestep schedule contains the following timesteps which are not on the original" + f" training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results" + f" when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule is longer than original_steps + if len(timesteps) > original_steps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {original_steps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.num_inference_steps = len(timesteps) + self.custom_timesteps = True + + # Apply strength (e.g. for img2img pipelines) (see StableDiffusionImg2ImgPipeline.get_timesteps) + init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) + t_start = max(self.num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.order :] + # TODO: also reset self.num_inference_steps? + else: + # 2.2 Create the "standard" LCM inference timestep schedule. + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + skipping_step = len(lcm_origin_timesteps) // num_inference_steps + + if skipping_step < 1: + raise ValueError( + f"The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}." + ) + + self.num_inference_steps = num_inference_steps + + if num_inference_steps > original_steps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" + f" {original_steps} because the final timestep schedule will be a subset of the" + f" `original_inference_steps`-sized initial timestep schedule." + ) + + # LCM Inference Steps Schedule + lcm_origin_timesteps = lcm_origin_timesteps[::-1].copy() + # Select (approximately) evenly spaced indices from lcm_origin_timesteps. + inference_indices = np.linspace(0, len(lcm_origin_timesteps), num=num_inference_steps, endpoint=False) + inference_indices = np.floor(inference_indices).astype(np.int64) + timesteps = lcm_origin_timesteps[inference_indices] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) + + self._step_index = None + self._begin_index = None + + def get_scalings_for_boundary_condition_discrete(self, timestep): + self.sigma_data = 0.5 # Default: 0.5 + scaled_timestep = timestep * self.config.timestep_scaling + + c_skip = self.sigma_data**2 / (scaled_timestep**2 + self.sigma_data**2) + c_out = scaled_timestep / (scaled_timestep**2 + self.sigma_data**2) ** 0.5 + return c_skip, c_out + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[LCMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. + Returns: + [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # 1. get previous step value + prev_step_index = self.step_index + 1 + if prev_step_index < len(self.timesteps): + prev_timestep = self.timesteps[prev_step_index] + else: + prev_timestep = timestep + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # 3. Get scalings for boundary conditions + c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) + + # 4. Compute the predicted original sample x_0 based on the model parameterization + if self.config.prediction_type == "epsilon": # noise-prediction + predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() + elif self.config.prediction_type == "sample": # x-prediction + predicted_original_sample = model_output + elif self.config.prediction_type == "v_prediction": # v-prediction + predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for `LCMScheduler`." + ) + + # 5. Clip or threshold "predicted x_0" + if self.config.thresholding: + predicted_original_sample = self._threshold_sample(predicted_original_sample) + elif self.config.clip_sample: + predicted_original_sample = predicted_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 6. Denoise model output using boundary conditions + denoised = c_out * predicted_original_sample + c_skip * sample + + # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference + # Noise is not used on the final timestep of the timestep schedule. + # This also means that noise is not used for one-step sampling. + if self.step_index != self.num_inference_steps - 1: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=denoised.dtype + ) + prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise + else: + prev_sample = denoised + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample, denoised) + + return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete.py new file mode 100644 index 000000000..43a0ba4a2 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -0,0 +1,475 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from scipy import integrate + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete +class LMSDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + A linear multistep scheduler for discrete beta schedules. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas) + + # setable values + self.num_inference_steps = None + self.use_karras_sigmas = use_karras_sigmas + self.set_timesteps(num_train_timesteps, None) + self.derivatives = [] + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] + ) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`float` or `torch.FloatTensor`): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + self.is_scale_input_called = True + return sample + + def get_lms_coefficient(self, order, t, current_order): + """ + Compute the linear multistep coefficient. + + Args: + order (): + t (): + current_order (): + """ + + def lms_derivative(tau): + prod = 1.0 + for k in range(order): + if current_order == k: + continue + prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) + return prod + + integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0] + + return integrated_coeff + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ + ::-1 + ].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas).to(device=device) + self.timesteps = torch.from_numpy(timesteps).to(device=device) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + self.derivatives = [] + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + # copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = in_sigmas[-1].item() + sigma_max: float = in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, self.num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + order: int = 4, + return_dict: bool = True, + ) -> Union[LMSDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float` or `torch.FloatTensor`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + order (`int`, defaults to 4): + The order of the linear multistep method. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if not self.is_scale_input_called: + warnings.warn( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + self.derivatives.append(derivative) + if len(self.derivatives) > order: + self.derivatives.pop(0) + + # 3. Compute linear multistep coefficients + order = min(self.step_index + 1, order) + lms_coeffs = [self.get_lms_coefficient(order, self.step_index, curr_order) for curr_order in range(order)] + + # 4. Compute previous sample based on the derivatives path + prev_sample = sample + sum( + coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives)) + ) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete_flax.py new file mode 100644 index 000000000..f1169cc90 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_lms_discrete_flax.py @@ -0,0 +1,283 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp +from scipy import integrate + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, +) + + +@flax.struct.dataclass +class LMSDiscreteSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + sigmas: jnp.ndarray + num_inference_steps: Optional[int] = None + + # running values + derivatives: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray + ): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) + + +@dataclass +class FlaxLMSSchedulerOutput(FlaxSchedulerOutput): + state: LMSDiscreteSchedulerState + + +class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by + Katherine Crowson: + https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> LMSDiscreteSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + init_noise_sigma = sigmas.max() + + return LMSDiscreteSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + sigmas=sigmas, + ) + + def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: + """ + Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm. + + Args: + state (`LMSDiscreteSchedulerState`): + the `FlaxLMSDiscreteScheduler` state data class instance. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + timestep (`int`): + current discrete timestep in the diffusion chain. + + Returns: + `jnp.ndarray`: scaled input sample + """ + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order): + """ + Compute a linear multistep coefficient. + + Args: + order (TODO): + t (TODO): + current_order (TODO): + """ + + def lms_derivative(tau): + prod = 1.0 + for k in range(order): + if current_order == k: + continue + prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k]) + return prod + + integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=1e-4)[0] + + return integrated_coeff + + def set_timesteps( + self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> LMSDiscreteSchedulerState: + """ + Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`LMSDiscreteSchedulerState`): + the `FlaxLMSDiscreteScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) + + low_idx = jnp.floor(timesteps).astype(jnp.int32) + high_idx = jnp.ceil(timesteps).astype(jnp.int32) + + frac = jnp.mod(timesteps, 1.0) + + sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 + sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx] + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + timesteps = timesteps.astype(jnp.int32) + + # initial running values + derivatives = jnp.zeros((0,) + shape, dtype=self.dtype) + + return state.replace( + timesteps=timesteps, + sigmas=sigmas, + num_inference_steps=num_inference_steps, + derivatives=derivatives, + ) + + def step( + self, + state: LMSDiscreteSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + order: int = 4, + return_dict: bool = True, + ) -> Union[FlaxLMSSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + order: coefficient for multi-step inference. + return_dict (`bool`): option for returning tuple rather than FlaxLMSSchedulerOutput class + + Returns: + [`FlaxLMSSchedulerOutput`] or `tuple`: [`FlaxLMSSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + sigma = state.sigmas[timestep] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + state = state.replace(derivatives=jnp.append(state.derivatives, derivative)) + if len(state.derivatives) > order: + state = state.replace(derivatives=jnp.delete(state.derivatives, 0)) + + # 3. Compute linear multistep coefficients + order = min(timestep + 1, order) + lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)] + + # 4. Compute previous sample based on the derivatives path + prev_sample = sample + sum( + coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(state.derivatives)) + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: LMSDiscreteSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + sigma = state.sigmas[timesteps].flatten() + sigma = broadcast_to_shape_from_left(sigma, noise.shape) + + noisy_samples = original_samples + noise * sigma + + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm.py new file mode 100644 index 000000000..a8f8b0971 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm.py @@ -0,0 +1,476 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class PNDMScheduler(SchedulerMixin, ConfigMixin): + """ + `PNDMScheduler` uses pseudo numerical methods for diffusion models such as the Runge-Kutta and linear multi-step + method. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + skip_prk_steps (`bool`, defaults to `False`): + Allows the scheduler to skip the Runge-Kutta steps defined in the original paper as being required before + PLMS steps. + set_alpha_to_one (`bool`, defaults to `False`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process) + or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) + paper). + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + skip_prk_steps: bool = False, + set_alpha_to_one: bool = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "leading", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + # running values + self.cur_model_output = 0 + self.counter = 0 + self.cur_sample = None + self.ets = [] + + # setable values + self.num_inference_steps = None + self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy() + self.prk_timesteps = None + self.plms_timesteps = None + self.timesteps = None + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + self.num_inference_steps = num_inference_steps + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + self._timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round().astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round() + self._timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + self._timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio))[::-1].astype( + np.int64 + ) + self._timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + if self.config.skip_prk_steps: + # for some models like stable diffusion the prk steps can/should be skipped to + # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation + # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 + self.prk_timesteps = np.array([]) + self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[ + ::-1 + ].copy() + else: + prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile( + np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order + ) + self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy() + self.plms_timesteps = self._timesteps[:-3][ + ::-1 + ].copy() # we copy to avoid having negative strides which are not supported by torch.from_numpy + + timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64) + self.timesteps = torch.from_numpy(timesteps).to(device) + + self.ets = [] + self.counter = 0 + self.cur_model_output = 0 + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise), and calls [`~PNDMScheduler.step_prk`] + or [`~PNDMScheduler.step_plms`] depending on the internal variable `counter`. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps: + return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) + else: + return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) + + def step_prk( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the Runge-Kutta method. It performs four forward passes to approximate the solution to the differential + equation. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2 + prev_timestep = timestep - diff_to_prev + timestep = self.prk_timesteps[self.counter // 4 * 4] + + if self.counter % 4 == 0: + self.cur_model_output += 1 / 6 * model_output + self.ets.append(model_output) + self.cur_sample = sample + elif (self.counter - 1) % 4 == 0: + self.cur_model_output += 1 / 3 * model_output + elif (self.counter - 2) % 4 == 0: + self.cur_model_output += 1 / 3 * model_output + elif (self.counter - 3) % 4 == 0: + model_output = self.cur_model_output + 1 / 6 * model_output + self.cur_model_output = 0 + + # cur_sample should not be `None` + cur_sample = self.cur_sample if self.cur_sample is not None else sample + + prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output) + self.counter += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def step_plms( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the linear multistep method. It performs one forward pass multiple times to approximate the solution. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if not self.config.skip_prk_steps and len(self.ets) < 3: + raise ValueError( + f"{self.__class__} can only be run AFTER scheduler has been run " + "in 'prk' mode for at least 12 iterations " + "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py " + "for more information." + ) + + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + if self.counter != 1: + self.ets = self.ets[-3:] + self.ets.append(model_output) + else: + prev_timestep = timestep + timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps + + if len(self.ets) == 1 and self.counter == 0: + model_output = model_output + self.cur_sample = sample + elif len(self.ets) == 1 and self.counter == 1: + model_output = (model_output + self.ets[-1]) / 2 + sample = self.cur_sample + self.cur_sample = None + elif len(self.ets) == 2: + model_output = (3 * self.ets[-1] - self.ets[-2]) / 2 + elif len(self.ets) == 3: + model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 + else: + model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) + + prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output) + self.counter += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def _get_prev_sample(self, sample, timestep, prev_timestep, model_output): + # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf + # this function computes x_(t−δ) using the formula of (9) + # Note that x_t needs to be added to both sides of the equation + + # Notation ( -> + # alpha_prod_t -> α_t + # alpha_prod_t_prev -> α_(t−δ) + # beta_prod_t -> (1 - α_t) + # beta_prod_t_prev -> (1 - α_(t−δ)) + # sample -> x_t + # model_output -> e_θ(x_t, t) + # prev_sample -> x_(t−δ) + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if self.config.prediction_type == "v_prediction": + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + elif self.config.prediction_type != "epsilon": + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" + ) + + # corresponds to (α_(t−δ) - α_t) divided by + # denominator of x_t in formula (9) and plus 1 + # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = + # sqrt(α_(t−δ)) / sqrt(α_t)) + sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) + + # corresponds to denominator of e_θ(x_t, t) in formula (9) + model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( + alpha_prod_t * beta_prod_t * alpha_prod_t_prev + ) ** (0.5) + + # full formula (9) + prev_sample = ( + sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff + ) + + return prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm_flax.py new file mode 100644 index 000000000..3ac3ba5ca --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_pndm_flax.py @@ -0,0 +1,509 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, +) + + +@flax.struct.dataclass +class PNDMSchedulerState: + common: CommonSchedulerState + final_alpha_cumprod: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + prk_timesteps: Optional[jnp.ndarray] = None + plms_timesteps: Optional[jnp.ndarray] = None + + # running values + cur_model_output: Optional[jnp.ndarray] = None + counter: Optional[jnp.int32] = None + cur_sample: Optional[jnp.ndarray] = None + ets: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + final_alpha_cumprod: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput): + state: PNDMSchedulerState + + +class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques, + namely Runge-Kutta method and a linear multi-step method. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2202.09778 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + skip_prk_steps (`bool`): + allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required + before plms steps; defaults to `False`. + set_alpha_to_one (`bool`, default `False`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + pndm_order: int + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + skip_prk_steps: bool = False, + set_alpha_to_one: bool = False, + steps_offset: int = 0, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> PNDMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + final_alpha_cumprod = ( + jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] + ) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return PNDMSchedulerState.create( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`PNDMSchedulerState`): + the `FlaxPNDMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + shape (`Tuple`): + the shape of the samples to be generated. + """ + + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset + + if self.config.skip_prk_steps: + # for some models like stable diffusion the prk steps can/should be skipped to + # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation + # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 + + prk_timesteps = jnp.array([], dtype=jnp.int32) + plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1] + + else: + prk_timesteps = _timesteps[-self.pndm_order :].repeat(2) + jnp.tile( + jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32), + self.pndm_order, + ) + + prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1] + plms_timesteps = _timesteps[:-3][::-1] + + timesteps = jnp.concatenate([prk_timesteps, plms_timesteps]) + + # initial running values + + cur_model_output = jnp.zeros(shape, dtype=self.dtype) + counter = jnp.int32(0) + cur_sample = jnp.zeros(shape, dtype=self.dtype) + ets = jnp.zeros((4,) + shape, dtype=self.dtype) + + return state.replace( + timesteps=timesteps, + num_inference_steps=num_inference_steps, + prk_timesteps=prk_timesteps, + plms_timesteps=plms_timesteps, + cur_model_output=cur_model_output, + counter=counter, + cur_sample=cur_sample, + ets=ets, + ) + + def scale_model_input( + self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def step( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.config.skip_prk_steps: + prev_sample, state = self.step_plms(state, model_output, timestep, sample) + else: + prk_prev_sample, prk_state = self.step_prk(state, model_output, timestep, sample) + plms_prev_sample, plms_state = self.step_plms(state, model_output, timestep, sample) + + cond = state.counter < len(state.prk_timesteps) + + prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample) + + state = state.replace( + cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), + ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), + cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), + counter=jax.lax.select(cond, prk_state.counter, plms_state.counter), + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state) + + def step_prk( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the + solution to the differential equation. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + diff_to_prev = jnp.where( + state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2 + ) + prev_timestep = timestep - diff_to_prev + timestep = state.prk_timesteps[state.counter // 4 * 4] + + model_output = jax.lax.select( + (state.counter % 4) != 3, + model_output, # remainder 0, 1, 2 + state.cur_model_output + 1 / 6 * model_output, # remainder 3 + ) + + state = state.replace( + cur_model_output=jax.lax.select_n( + state.counter % 4, + state.cur_model_output + 1 / 6 * model_output, # remainder 0 + state.cur_model_output + 1 / 3 * model_output, # remainder 1 + state.cur_model_output + 1 / 3 * model_output, # remainder 2 + jnp.zeros_like(state.cur_model_output), # remainder 3 + ), + ets=jax.lax.select( + (state.counter % 4) == 0, + state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # remainder 0 + state.ets, # remainder 1, 2, 3 + ), + cur_sample=jax.lax.select( + (state.counter % 4) == 0, + sample, # remainder 0 + state.cur_sample, # remainder 1, 2, 3 + ), + ) + + cur_sample = state.cur_sample + prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output) + state = state.replace(counter=state.counter + 1) + + return (prev_sample, state) + + def step_plms( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple + times to approximate the solution. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # NOTE: There is no way to check in the jitted runtime if the prk mode was ran before + + prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps + prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0) + + # Reference: + # if state.counter != 1: + # state.ets.append(model_output) + # else: + # prev_timestep = timestep + # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps + + prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep) + timestep = jnp.where( + state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep + ) + + # Reference: + # if len(state.ets) == 1 and state.counter == 0: + # model_output = model_output + # state.cur_sample = sample + # elif len(state.ets) == 1 and state.counter == 1: + # model_output = (model_output + state.ets[-1]) / 2 + # sample = state.cur_sample + # state.cur_sample = None + # elif len(state.ets) == 2: + # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2 + # elif len(state.ets) == 3: + # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12 + # else: + # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]) + + state = state.replace( + ets=jax.lax.select( + state.counter != 1, + state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # counter != 1 + state.ets, # counter 1 + ), + cur_sample=jax.lax.select( + state.counter != 1, + sample, # counter != 1 + state.cur_sample, # counter 1 + ), + ) + + state = state.replace( + cur_model_output=jax.lax.select_n( + jnp.clip(state.counter, 0, 4), + model_output, # counter 0 + (model_output + state.ets[-1]) / 2, # counter 1 + (3 * state.ets[-1] - state.ets[-2]) / 2, # counter 2 + (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, # counter 3 + (1 / 24) + * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]), # counter >= 4 + ), + ) + + sample = state.cur_sample + model_output = state.cur_model_output + prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output) + state = state.replace(counter=state.counter + 1) + + return (prev_sample, state) + + def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output): + # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf + # this function computes x_(t−δ) using the formula of (9) + # Note that x_t needs to be added to both sides of the equation + + # Notation ( -> + # alpha_prod_t -> α_t + # alpha_prod_t_prev -> α_(t−δ) + # beta_prod_t -> (1 - α_t) + # beta_prod_t_prev -> (1 - α_(t−δ)) + # sample -> x_t + # model_output -> e_θ(x_t, t) + # prev_sample -> x_(t−δ) + alpha_prod_t = state.common.alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where( + prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if self.config.prediction_type == "v_prediction": + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + elif self.config.prediction_type != "epsilon": + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" + ) + + # corresponds to (α_(t−δ) - α_t) divided by + # denominator of x_t in formula (9) and plus 1 + # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = + # sqrt(α_(t−δ)) / sqrt(α_t)) + sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) + + # corresponds to denominator of e_θ(x_t, t) in formula (9) + model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( + alpha_prod_t * beta_prod_t * alpha_prod_t_prev + ) ** (0.5) + + # full formula (9) + prev_sample = ( + sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff + ) + + return prev_sample + + def add_noise( + self, + state: PNDMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_repaint.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_repaint.py new file mode 100644 index 000000000..0bff07c4f --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_repaint.py @@ -0,0 +1,361 @@ +# Copyright 2024 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +class RePaintSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from + the current timestep. `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: torch.FloatTensor + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class RePaintScheduler(SchedulerMixin, ConfigMixin): + """ + `RePaintScheduler` is a scheduler for DDPM inpainting inside a given mask. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, `squaredcos_cap_v2`, or `sigmoid`. + eta (`float`): + The weight of noise for added noise in diffusion step. If its value is between 0.0 and 1.0 it corresponds + to the DDIM scheduler, and if its value is between -0.0 and 1.0 it corresponds to the DDPM scheduler. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample between -1 and 1 for numerical stability. + + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + eta: float = 0.0, + trained_betas: Optional[np.ndarray] = None, + clip_sample: bool = True, + ): + if trained_betas is not None: + self.betas = torch.from_numpy(trained_betas) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + self.final_alpha_cumprod = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.eta = eta + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, + num_inference_steps: int, + jump_length: int = 10, + jump_n_sample: int = 10, + device: Union[str, torch.device] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + jump_length (`int`, defaults to 10): + The number of steps taken forward in time before going backward in time for a single jump (“j” in + RePaint paper). Take a look at Figure 9 and 10 in the paper. + jump_n_sample (`int`, defaults to 10): + The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9 + and 10 in the paper. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + + """ + num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) + self.num_inference_steps = num_inference_steps + + timesteps = [] + + jumps = {} + for j in range(0, num_inference_steps - jump_length, jump_length): + jumps[j] = jump_n_sample - 1 + + t = num_inference_steps + while t >= 1: + t = t - 1 + timesteps.append(t) + + if jumps.get(t, 0) > 0: + jumps[t] = jumps[t] - 1 + for _ in range(jump_length): + t = t + 1 + timesteps.append(t) + + timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps) + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t): + prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from + # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get + # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add + # variance to pred_sample + # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf + # without eta. + # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + original_image: torch.FloatTensor, + mask: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[RePaintSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + original_image (`torch.FloatTensor`): + The original image to inpaint on. + mask (`torch.FloatTensor`): + The mask where a value of 0.0 indicates which part of the original image to inpaint. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + t = timestep + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we + # substitute formula (7) in the algorithm coming from DDPM paper + # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper. + # DDIM schedule gives the same results as DDPM with eta = 1.0 + # Noise is being reused in 7. and 8., but no impact on quality has + # been observed. + + # 5. Add noise + device = model_output.device + noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) + std_dev_t = self.eta * self._get_variance(timestep) ** 0.5 + + variance = 0 + if t > 0 and self.eta > 0: + variance = std_dev_t * noise + + # 6. compute "direction pointing to x_t" of formula (12) + # from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output + + # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance + + # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf + prev_known_part = (alpha_prod_t_prev**0.5) * original_image + ((1 - alpha_prod_t_prev) ** 0.5) * noise + + # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf + pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part + + if not return_dict: + return ( + pred_prev_sample, + pred_original_sample, + ) + + return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def undo_step(self, sample, timestep, generator=None): + n = self.config.num_train_timesteps // self.num_inference_steps + + for i in range(n): + beta = self.betas[timestep + i] + if sample.device.type == "mps": + # randn does not work reproducibly on mps + noise = randn_tensor(sample.shape, dtype=sample.dtype, generator=generator) + noise = noise.to(sample.device) + else: + noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) + + # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf + sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise + + return sample + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.") + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sasolver.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sasolver.py new file mode 100644 index 000000000..b46f6de8a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sasolver.py @@ -0,0 +1,1124 @@ +# Copyright 2024 Shuchen Xue, etc. in University of Chinese Academy of Sciences Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2309.05019 +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import Callable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class SASolverScheduler(SchedulerMixin, ConfigMixin): + """ + `SASolverScheduler` is a fast dedicated high-order solver for diffusion SDEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + predictor_order (`int`, defaults to 2): + The predictor order which can be `1` or `2` or `3` or '4'. It is recommended to use `predictor_order=2` for guided + sampling, and `predictor_order=3` for unconditional sampling. + corrector_order (`int`, defaults to 2): + The corrector order which can be `1` or `2` or `3` or '4'. It is recommended to use `corrector_order=2` for guided + sampling, and `corrector_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + tau_func (`Callable`, *optional*): + Stochasticity during the sampling. Default in init is `lambda t: 1 if t >= 200 and t <= 800 else 0`. SA-Solver + will sample from vanilla diffusion ODE if tau_func is set to `lambda t: 0`. SA-Solver will sample from vanilla + diffusion SDE if tau_func is set to `lambda t: 1`. For more details, please check https://arxiv.org/abs/2309.05019 + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `data_prediction`): + Algorithm type for the solver; can be `data_prediction` or `noise_prediction`. It is recommended to use `data_prediction` + with `solver_order=2` for guided sampling like in Stable Diffusion. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Default = True. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + predictor_order: int = 2, + corrector_order: int = 2, + prediction_type: str = "epsilon", + tau_func: Optional[Callable] = None, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "data_prediction", + lower_order_final: bool = True, + use_karras_sigmas: Optional[bool] = False, + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = ( + torch.linspace( + beta_start**0.5, + beta_end**0.5, + num_train_timesteps, + dtype=torch.float32, + ) + ** 2 + ) + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + if algorithm_type not in ["data_prediction", "noise_prediction"]: + raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.timestep_list = [None] * max(predictor_order, corrector_order - 1) + self.model_outputs = [None] * max(predictor_order, corrector_order - 1) + + if tau_func is None: + self.tau_func = lambda t: 1 if t >= 200 and t <= 800 else 0 + else: + self.tau_func = tau_func + self.predict_x0 = algorithm_type == "data_prediction" + self.lower_order_nums = 0 + self.last_sample = None + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) + ) + + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + self.model_outputs = [ + None, + ] * max(self.config.predictor_order, self.config.corrector_order - 1) + self.lower_order_nums = 0 + self.last_sample = None + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + """ + Convert the model output to the corresponding type the data_prediction/noise_prediction algorithm needs. Noise_prediction is + designed to discretize an integral of the noise prediction model, and data_prediction is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either data_prediction or noise_prediction for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + # SA-Solver_data_prediction needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["data_prediction"]: + if self.config.prediction_type == "epsilon": + # SA-Solver only needs the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the SASolverScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # SA-Solver_noise_prediction needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["noise_prediction"]: + if self.config.prediction_type == "epsilon": + # SA-Solver only needs the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the SASolverScheduler." + ) + + if self.config.thresholding: + alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep] + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def get_coefficients_exponential_negative(self, order, interval_start, interval_end): + """ + Calculate the integral of exp(-x) * x^order dx from interval_start to interval_end + """ + assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" + + if order == 0: + return torch.exp(-interval_end) * (torch.exp(interval_end - interval_start) - 1) + elif order == 1: + return torch.exp(-interval_end) * ( + (interval_start + 1) * torch.exp(interval_end - interval_start) - (interval_end + 1) + ) + elif order == 2: + return torch.exp(-interval_end) * ( + (interval_start**2 + 2 * interval_start + 2) * torch.exp(interval_end - interval_start) + - (interval_end**2 + 2 * interval_end + 2) + ) + elif order == 3: + return torch.exp(-interval_end) * ( + (interval_start**3 + 3 * interval_start**2 + 6 * interval_start + 6) + * torch.exp(interval_end - interval_start) + - (interval_end**3 + 3 * interval_end**2 + 6 * interval_end + 6) + ) + + def get_coefficients_exponential_positive(self, order, interval_start, interval_end, tau): + """ + Calculate the integral of exp(x(1+tau^2)) * x^order dx from interval_start to interval_end + """ + assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" + + # after change of variable(cov) + interval_end_cov = (1 + tau**2) * interval_end + interval_start_cov = (1 + tau**2) * interval_start + + if order == 0: + return ( + torch.exp(interval_end_cov) * (1 - torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau**2) + ) + elif order == 1: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov - 1) + - (interval_start_cov - 1) * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 2) + ) + elif order == 2: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov**2 - 2 * interval_end_cov + 2) + - (interval_start_cov**2 - 2 * interval_start_cov + 2) + * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 3) + ) + elif order == 3: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov**3 - 3 * interval_end_cov**2 + 6 * interval_end_cov - 6) + - (interval_start_cov**3 - 3 * interval_start_cov**2 + 6 * interval_start_cov - 6) + * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 4) + ) + + def lagrange_polynomial_coefficient(self, order, lambda_list): + """ + Calculate the coefficient of lagrange polynomial + """ + + assert order in [0, 1, 2, 3] + assert order == len(lambda_list) - 1 + if order == 0: + return [[1]] + elif order == 1: + return [ + [ + 1 / (lambda_list[0] - lambda_list[1]), + -lambda_list[1] / (lambda_list[0] - lambda_list[1]), + ], + [ + 1 / (lambda_list[1] - lambda_list[0]), + -lambda_list[0] / (lambda_list[1] - lambda_list[0]), + ], + ] + elif order == 2: + denominator1 = (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) + denominator2 = (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) + denominator3 = (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) + return [ + [ + 1 / denominator1, + (-lambda_list[1] - lambda_list[2]) / denominator1, + lambda_list[1] * lambda_list[2] / denominator1, + ], + [ + 1 / denominator2, + (-lambda_list[0] - lambda_list[2]) / denominator2, + lambda_list[0] * lambda_list[2] / denominator2, + ], + [ + 1 / denominator3, + (-lambda_list[0] - lambda_list[1]) / denominator3, + lambda_list[0] * lambda_list[1] / denominator3, + ], + ] + elif order == 3: + denominator1 = ( + (lambda_list[0] - lambda_list[1]) + * (lambda_list[0] - lambda_list[2]) + * (lambda_list[0] - lambda_list[3]) + ) + denominator2 = ( + (lambda_list[1] - lambda_list[0]) + * (lambda_list[1] - lambda_list[2]) + * (lambda_list[1] - lambda_list[3]) + ) + denominator3 = ( + (lambda_list[2] - lambda_list[0]) + * (lambda_list[2] - lambda_list[1]) + * (lambda_list[2] - lambda_list[3]) + ) + denominator4 = ( + (lambda_list[3] - lambda_list[0]) + * (lambda_list[3] - lambda_list[1]) + * (lambda_list[3] - lambda_list[2]) + ) + return [ + [ + 1 / denominator1, + (-lambda_list[1] - lambda_list[2] - lambda_list[3]) / denominator1, + ( + lambda_list[1] * lambda_list[2] + + lambda_list[1] * lambda_list[3] + + lambda_list[2] * lambda_list[3] + ) + / denominator1, + (-lambda_list[1] * lambda_list[2] * lambda_list[3]) / denominator1, + ], + [ + 1 / denominator2, + (-lambda_list[0] - lambda_list[2] - lambda_list[3]) / denominator2, + ( + lambda_list[0] * lambda_list[2] + + lambda_list[0] * lambda_list[3] + + lambda_list[2] * lambda_list[3] + ) + / denominator2, + (-lambda_list[0] * lambda_list[2] * lambda_list[3]) / denominator2, + ], + [ + 1 / denominator3, + (-lambda_list[0] - lambda_list[1] - lambda_list[3]) / denominator3, + ( + lambda_list[0] * lambda_list[1] + + lambda_list[0] * lambda_list[3] + + lambda_list[1] * lambda_list[3] + ) + / denominator3, + (-lambda_list[0] * lambda_list[1] * lambda_list[3]) / denominator3, + ], + [ + 1 / denominator4, + (-lambda_list[0] - lambda_list[1] - lambda_list[2]) / denominator4, + ( + lambda_list[0] * lambda_list[1] + + lambda_list[0] * lambda_list[2] + + lambda_list[1] * lambda_list[2] + ) + / denominator4, + (-lambda_list[0] * lambda_list[1] * lambda_list[2]) / denominator4, + ], + ] + + def get_coefficients_fn(self, order, interval_start, interval_end, lambda_list, tau): + assert order in [1, 2, 3, 4] + assert order == len(lambda_list), "the length of lambda list must be equal to the order" + coefficients = [] + lagrange_coefficient = self.lagrange_polynomial_coefficient(order - 1, lambda_list) + for i in range(order): + coefficient = 0 + for j in range(order): + if self.predict_x0: + coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_positive( + order - 1 - j, interval_start, interval_end, tau + ) + else: + coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_negative( + order - 1 - j, interval_start, interval_end + ) + coefficients.append(coefficient) + assert len(coefficients) == order, "the length of coefficients does not match the order" + return coefficients + + def stochastic_adams_bashforth_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor, + noise: torch.FloatTensor, + order: int, + tau: torch.FloatTensor, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the SA-Predictor. + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of SA-Predictor at this timestep. + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if noise is None: + if len(args) > 2: + noise = args[2] + else: + raise ValueError(" missing `noise` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing `order` as a required keyward argument") + if tau is None: + if len(args) > 4: + tau = args[4] + else: + raise ValueError(" missing `tau` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + sigma_t, sigma_s0 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + ) + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + gradient_part = torch.zeros_like(sample) + h = lambda_t - lambda_s0 + lambda_list = [] + + for i in range(order): + si = self.step_index - i + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + lambda_list.append(lambda_si) + + gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) + + x = sample + + if self.predict_x0: + if ( + order == 2 + ): ## if order = 2 we do a modification that does not influence the convergence order similar to unipc. Note: This is used only for few steps sampling. + # The added term is O(h^3). Empirically we find it will slightly improve the image quality. + # ODE case + # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) + # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) + temp_sigma = self.sigmas[self.step_index - 1] + temp_alpha_s, temp_sigma_s = self._sigma_to_alpha_sigma_t(temp_sigma) + temp_lambda_s = torch.log(temp_alpha_s) - torch.log(temp_sigma_s) + gradient_coefficients[0] += ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) + / (lambda_s0 - temp_lambda_s) + ) + gradient_coefficients[1] -= ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) + / (lambda_s0 - temp_lambda_s) + ) + + for i in range(order): + if self.predict_x0: + gradient_part += ( + (1 + tau**2) + * sigma_t + * torch.exp(-(tau**2) * lambda_t) + * gradient_coefficients[i] + * model_output_list[-(i + 1)] + ) + else: + gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_output_list[-(i + 1)] + + if self.predict_x0: + noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * noise + else: + noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * noise + + if self.predict_x0: + x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part + else: + x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part + + x_t = x_t.to(x.dtype) + return x_t + + def stochastic_adams_moulton_update( + self, + this_model_output: torch.FloatTensor, + *args, + last_sample: torch.FloatTensor, + last_noise: torch.FloatTensor, + this_sample: torch.FloatTensor, + order: int, + tau: torch.FloatTensor, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the SA-Corrector. + + Args: + this_model_output (`torch.FloatTensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.FloatTensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.FloatTensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The order of SA-Corrector at this step. + + Returns: + `torch.FloatTensor`: + The corrected sample tensor at the current timestep. + """ + + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if last_noise is None: + if len(args) > 2: + last_noise = args[2] + else: + raise ValueError(" missing`last_noise` as a required keyward argument") + if this_sample is None: + if len(args) > 3: + this_sample = args[3] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 4: + order = args[4] + else: + raise ValueError(" missing`order` as a required keyward argument") + if tau is None: + if len(args) > 5: + tau = args[5] + else: + raise ValueError(" missing`tau` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + sigma_t, sigma_s0 = ( + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + gradient_part = torch.zeros_like(this_sample) + h = lambda_t - lambda_s0 + lambda_list = [] + for i in range(order): + si = self.step_index - i + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + lambda_list.append(lambda_si) + + model_prev_list = model_output_list + [this_model_output] + + gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) + + x = last_sample + + if self.predict_x0: + if ( + order == 2 + ): ## if order = 2 we do a modification that does not influence the convergence order similar to UniPC. Note: This is used only for few steps sampling. + # The added term is O(h^3). Empirically we find it will slightly improve the image quality. + # ODE case + # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) + # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) + gradient_coefficients[0] += ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) + ) + gradient_coefficients[1] -= ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) + ) + + for i in range(order): + if self.predict_x0: + gradient_part += ( + (1 + tau**2) + * sigma_t + * torch.exp(-(tau**2) * lambda_t) + * gradient_coefficients[i] + * model_prev_list[-(i + 1)] + ) + else: + gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_prev_list[-(i + 1)] + + if self.predict_x0: + noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * last_noise + else: + noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * last_noise + + if self.predict_x0: + x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part + else: + x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part + + x_t = x_t.to(x.dtype) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the SA-Solver. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + use_corrector = self.step_index > 0 and self.last_sample is not None + + model_output_convert = self.convert_model_output(model_output, sample=sample) + + if use_corrector: + current_tau = self.tau_func(self.timestep_list[-1]) + sample = self.stochastic_adams_moulton_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + last_noise=self.last_noise, + this_sample=sample, + order=self.this_corrector_order, + tau=current_tau, + ) + + for i in range(max(self.config.predictor_order, self.config.corrector_order - 1) - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep + + noise = randn_tensor( + model_output.shape, + generator=generator, + device=model_output.device, + dtype=model_output.dtype, + ) + + if self.config.lower_order_final: + this_predictor_order = min(self.config.predictor_order, len(self.timesteps) - self.step_index) + this_corrector_order = min(self.config.corrector_order, len(self.timesteps) - self.step_index + 1) + else: + this_predictor_order = self.config.predictor_order + this_corrector_order = self.config.corrector_order + + self.this_predictor_order = min(this_predictor_order, self.lower_order_nums + 1) # warmup for multistep + self.this_corrector_order = min(this_corrector_order, self.lower_order_nums + 2) # warmup for multistep + assert self.this_predictor_order > 0 + assert self.this_corrector_order > 0 + + self.last_sample = sample + self.last_noise = noise + + current_tau = self.tau_func(self.timestep_list[-1]) + prev_sample = self.stochastic_adams_bashforth_update( + model_output=model_output_convert, + sample=sample, + noise=noise, + order=self.this_predictor_order, + tau=current_tau, + ) + + if self.lower_order_nums < max(self.config.predictor_order, self.config.corrector_order - 1): + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve.py new file mode 100644 index 000000000..8f8dd1877 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve.py @@ -0,0 +1,301 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +@dataclass +class SdeVeOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Mean averaged `prev_sample` over previous timesteps. + """ + + prev_sample: torch.FloatTensor + prev_sample_mean: torch.FloatTensor + + +class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): + """ + `ScoreSdeVeScheduler` is a variance exploding stochastic differential equation (SDE) scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + snr (`float`, defaults to 0.15): + A coefficient weighting the step from the `model_output` sample (from the network) to the random noise. + sigma_min (`float`, defaults to 0.01): + The initial noise scale for the sigma sequence in the sampling procedure. The minimum sigma should mirror + the distribution of the data. + sigma_max (`float`, defaults to 1348.0): + The maximum value used for the range of continuous timesteps passed into the model. + sampling_eps (`float`, defaults to 1e-5): + The end value of sampling where timesteps decrease progressively from 1 to epsilon. + correct_steps (`int`, defaults to 1): + The number of correction steps performed on a produced sample. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 2000, + snr: float = 0.15, + sigma_min: float = 0.01, + sigma_max: float = 1348.0, + sampling_eps: float = 1e-5, + correct_steps: int = 1, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + # setable values + self.timesteps = None + + self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None + ): + """ + Sets the continuous timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + sampling_eps (`float`, *optional*): + The final timestep value (overrides value given during scheduler instantiation). + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + + """ + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + + self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) + + def set_sigmas( + self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None + ): + """ + Sets the noise scales used for the diffusion chain (to be run before inference). The sigmas control the weight + of the `drift` and `diffusion` components of the sample update. + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + sigma_min (`float`, optional): + The initial noise scale value (overrides value given during scheduler instantiation). + sigma_max (`float`, optional): + The final noise scale value (overrides value given during scheduler instantiation). + sampling_eps (`float`, optional): + The final timestep value (overrides value given during scheduler instantiation). + + """ + sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min + sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + if self.timesteps is None: + self.set_timesteps(num_inference_steps, sampling_eps) + + self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) + self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) + self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) + + def get_adjacent_sigma(self, timesteps, t): + return torch.where( + timesteps == 0, + torch.zeros_like(t.to(timesteps.device)), + self.discrete_sigmas[timesteps - 1].to(timesteps.device), + ) + + def step_pred( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SdeVeOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple + is returned where the first element is the sample tensor. + + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + timestep = timestep * torch.ones( + sample.shape[0], device=sample.device + ) # torch.repeat_interleave(timestep, sample.shape[0]) + timesteps = (timestep * (len(self.timesteps) - 1)).long() + + # mps requires indices to be in the same device, so we use cpu as is the default with cuda + timesteps = timesteps.to(self.discrete_sigmas.device) + + sigma = self.discrete_sigmas[timesteps].to(sample.device) + adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) + drift = torch.zeros_like(sample) + diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 + + # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) + # also equation 47 shows the analog from SDE models to ancestral sampling methods + diffusion = diffusion.flatten() + while len(diffusion.shape) < len(sample.shape): + diffusion = diffusion.unsqueeze(-1) + drift = drift - diffusion**2 * model_output + + # equation 6: sample noise for the diffusion term of + noise = randn_tensor( + sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype + ) + prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep + # TODO is the variable diffusion the correct scaling term for the noise? + prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g + + if not return_dict: + return (prev_sample, prev_sample_mean) + + return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) + + def step_correct( + self, + model_output: torch.FloatTensor, + sample: torch.FloatTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Correct the predicted sample based on the `model_output` of the network. This is often run repeatedly after + making the prediction for the previous timestep. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple + is returned where the first element is the sample tensor. + + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" + # sample noise for correction + noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator).to(sample.device) + + # compute step size from the model_output, the noise, and the snr + grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() + noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() + step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 + step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) + # self.repeat_scalar(step_size, sample.shape[0]) + + # compute corrected sample: model_output term and noise term + step_size = step_size.flatten() + while len(step_size.shape) < len(sample.shape): + step_size = step_size.unsqueeze(-1) + prev_sample_mean = sample + step_size * model_output + prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + timesteps = timesteps.to(original_samples.device) + sigmas = self.discrete_sigmas.to(original_samples.device)[timesteps] + noise = ( + noise * sigmas[:, None, None, None] + if noise is not None + else torch.randn_like(original_samples) * sigmas[:, None, None, None] + ) + noisy_samples = noise + original_samples + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve_flax.py new file mode 100644 index 000000000..0a8d45d4a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_sde_ve_flax.py @@ -0,0 +1,280 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp +from jax import random + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left + + +@flax.struct.dataclass +class ScoreSdeVeSchedulerState: + # setable values + timesteps: Optional[jnp.ndarray] = None + discrete_sigmas: Optional[jnp.ndarray] = None + sigmas: Optional[jnp.ndarray] = None + + @classmethod + def create(cls): + return cls() + + +@dataclass +class FlaxSdeVeOutput(FlaxSchedulerOutput): + """ + Output class for the ScoreSdeVeScheduler's step function output. + + Args: + state (`ScoreSdeVeSchedulerState`): + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + prev_sample_mean (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps. + """ + + state: ScoreSdeVeSchedulerState + prev_sample: jnp.ndarray + prev_sample_mean: Optional[jnp.ndarray] = None + + +class FlaxScoreSdeVeScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + The variance exploding stochastic differential equation (SDE) scheduler. + + For more information, see the original paper: https://arxiv.org/abs/2011.13456 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + snr (`float`): + coefficient weighting the step from the model_output sample (from the network) to the random noise. + sigma_min (`float`): + initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the + distribution of the data. + sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. + sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to + epsilon. + correct_steps (`int`): number of correction steps performed on a produced sample. + """ + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 2000, + snr: float = 0.15, + sigma_min: float = 0.01, + sigma_max: float = 1348.0, + sampling_eps: float = 1e-5, + correct_steps: int = 1, + ): + pass + + def create_state(self): + state = ScoreSdeVeSchedulerState.create() + return self.set_sigmas( + state, + self.config.num_train_timesteps, + self.config.sigma_min, + self.config.sigma_max, + self.config.sampling_eps, + ) + + def set_timesteps( + self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, shape: Tuple = (), sampling_eps: float = None + ) -> ScoreSdeVeSchedulerState: + """ + Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + sampling_eps (`float`, optional): + final timestep value (overrides value given at Scheduler instantiation). + + """ + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + + timesteps = jnp.linspace(1, sampling_eps, num_inference_steps) + return state.replace(timesteps=timesteps) + + def set_sigmas( + self, + state: ScoreSdeVeSchedulerState, + num_inference_steps: int, + sigma_min: float = None, + sigma_max: float = None, + sampling_eps: float = None, + ) -> ScoreSdeVeSchedulerState: + """ + Sets the noise scales used for the diffusion chain. Supporting function to be run before inference. + + The sigmas control the weight of the `drift` and `diffusion` components of sample update. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + sigma_min (`float`, optional): + initial noise scale value (overrides value given at Scheduler instantiation). + sigma_max (`float`, optional): + final noise scale value (overrides value given at Scheduler instantiation). + sampling_eps (`float`, optional): + final timestep value (overrides value given at Scheduler instantiation). + """ + sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min + sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + if state.timesteps is None: + state = self.set_timesteps(state, num_inference_steps, sampling_eps) + + discrete_sigmas = jnp.exp(jnp.linspace(jnp.log(sigma_min), jnp.log(sigma_max), num_inference_steps)) + sigmas = jnp.array([sigma_min * (sigma_max / sigma_min) ** t for t in state.timesteps]) + + return state.replace(discrete_sigmas=discrete_sigmas, sigmas=sigmas) + + def get_adjacent_sigma(self, state, timesteps, t): + return jnp.where(timesteps == 0, jnp.zeros_like(t), state.discrete_sigmas[timesteps - 1]) + + def step_pred( + self, + state: ScoreSdeVeSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + key: jax.Array, + return_dict: bool = True, + ) -> Union[FlaxSdeVeOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class + + Returns: + [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + if state.timesteps is None: + raise ValueError( + "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + timestep = timestep * jnp.ones( + sample.shape[0], + ) + timesteps = (timestep * (len(state.timesteps) - 1)).long() + + sigma = state.discrete_sigmas[timesteps] + adjacent_sigma = self.get_adjacent_sigma(state, timesteps, timestep) + drift = jnp.zeros_like(sample) + diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 + + # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) + # also equation 47 shows the analog from SDE models to ancestral sampling methods + diffusion = diffusion.flatten() + diffusion = broadcast_to_shape_from_left(diffusion, sample.shape) + drift = drift - diffusion**2 * model_output + + # equation 6: sample noise for the diffusion term of + key = random.split(key, num=1) + noise = random.normal(key=key, shape=sample.shape) + prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep + # TODO is the variable diffusion the correct scaling term for the noise? + prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g + + if not return_dict: + return (prev_sample, prev_sample_mean, state) + + return FlaxSdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean, state=state) + + def step_correct( + self, + state: ScoreSdeVeSchedulerState, + model_output: jnp.ndarray, + sample: jnp.ndarray, + key: jax.Array, + return_dict: bool = True, + ) -> Union[FlaxSdeVeOutput, Tuple]: + """ + Correct the predicted sample based on the output model_output of the network. This is often run repeatedly + after making the prediction for the previous timestep. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class + + Returns: + [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + if state.timesteps is None: + raise ValueError( + "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" + # sample noise for correction + key = random.split(key, num=1) + noise = random.normal(key=key, shape=sample.shape) + + # compute step size from the model_output, the noise, and the snr + grad_norm = jnp.linalg.norm(model_output) + noise_norm = jnp.linalg.norm(noise) + step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 + step_size = step_size * jnp.ones(sample.shape[0]) + + # compute corrected sample: model_output term and noise term + step_size = step_size.flatten() + step_size = broadcast_to_shape_from_left(step_size, sample.shape) + prev_sample_mean = sample + step_size * model_output + prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise + + if not return_dict: + return (prev_sample, state) + + return FlaxSdeVeOutput(prev_sample=prev_sample, state=state) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_tcd.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_tcd.py new file mode 100644 index 000000000..7eb01b382 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_tcd.py @@ -0,0 +1,686 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..schedulers.scheduling_utils import SchedulerMixin +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class TCDSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_noised_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted noised sample `(x_{s})` based on the model output from the current timestep. + """ + + prev_sample: torch.FloatTensor + pred_noised_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor: + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.FloatTensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.FloatTensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class TCDScheduler(SchedulerMixin, ConfigMixin): + """ + `TCDScheduler` incorporates the `Strategic Stochastic Sampling` introduced by the paper `Trajectory Consistency Distillation`, + extending the original Multistep Consistency Sampling to enable unrestricted trajectory traversal. + + This code is based on the official repo of TCD(https://github.com/jabir-zheng/TCD). + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config + attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be + accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving + functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + original_inference_steps (`int`, *optional*, defaults to 50): + The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we + will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + timestep_scaling (`float`, defaults to 10.0): + The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions + `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation + error at the default of `10.0` is already pretty small). + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.012, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + original_inference_steps: int = 50, + clip_sample: bool = False, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + timestep_scaling: float = 10.0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + self.custom_timesteps = False + + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def step_index(self): + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler._get_variance + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + original_inference_steps: Optional[int] = None, + timesteps: Optional[List[int]] = None, + strength: int = 1.0, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`, *optional*): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + original_inference_steps (`int`, *optional*): + The original number of inference steps, which will be used to generate a linearly-spaced timestep + schedule (which is different from the standard `diffusers` implementation). We will then take + `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as + our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps on the training/distillation timestep + schedule is used. If `timesteps` is passed, `num_inference_steps` must be `None`. + """ + # 0. Check inputs + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + # 1. Calculate the TCD original training/distillation timestep schedule. + original_steps = ( + original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps + ) + + if original_inference_steps is None: + # default option, timesteps align with discrete inference steps + if original_steps > self.config.num_train_timesteps: + raise ValueError( + f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + # TCD Timesteps Setting + # The skipping step parameter k from the paper. + k = self.config.num_train_timesteps // original_steps + # TCD Training/Distillation Steps Schedule + tcd_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 + else: + # customised option, sampled timesteps can be any arbitrary value + tcd_origin_timesteps = np.asarray(list(range(0, int(self.config.num_train_timesteps * strength)))) + + # 2. Calculate the TCD inference timestep schedule. + if timesteps is not None: + # 2.1 Handle custom timestep schedules. + train_timesteps = set(tcd_origin_timesteps) + non_train_timesteps = [] + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[i] not in train_timesteps: + non_train_timesteps.append(timesteps[i]) + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + # Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1 + if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: + logger.warning( + f"The first timestep on the custom timestep schedule is {timesteps[0]}, not" + f" `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get" + f" unexpected results when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule contains timesteps not on original timestep schedule + if non_train_timesteps: + logger.warning( + f"The custom timestep schedule contains the following timesteps which are not on the original" + f" training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results" + f" when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule is longer than original_steps + if original_steps is not None: + if len(timesteps) > original_steps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {original_steps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + else: + if len(timesteps) > self.config.num_train_timesteps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {self.config.num_train_timesteps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.num_inference_steps = len(timesteps) + self.custom_timesteps = True + + # Apply strength (e.g. for img2img pipelines) (see StableDiffusionImg2ImgPipeline.get_timesteps) + init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) + t_start = max(self.num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.order :] + # TODO: also reset self.num_inference_steps? + else: + # 2.2 Create the "standard" TCD inference timestep schedule. + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + if original_steps is not None: + skipping_step = len(tcd_origin_timesteps) // num_inference_steps + + if skipping_step < 1: + raise ValueError( + f"The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}." + ) + + self.num_inference_steps = num_inference_steps + + if original_steps is not None: + if num_inference_steps > original_steps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" + f" {original_steps} because the final timestep schedule will be a subset of the" + f" `original_inference_steps`-sized initial timestep schedule." + ) + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `num_train_timesteps`:" + f" {self.config.num_train_timesteps} because the final timestep schedule will be a subset of the" + f" `num_train_timesteps`-sized initial timestep schedule." + ) + + # TCD Inference Steps Schedule + tcd_origin_timesteps = tcd_origin_timesteps[::-1].copy() + # Select (approximately) evenly spaced indices from tcd_origin_timesteps. + inference_indices = np.linspace(0, len(tcd_origin_timesteps), num=num_inference_steps, endpoint=False) + inference_indices = np.floor(inference_indices).astype(np.int64) + timesteps = tcd_origin_timesteps[inference_indices] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) + + self._step_index = None + self._begin_index = None + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + eta: float = 0.3, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[TCDSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + A stochastic parameter (referred to as `gamma` in the paper) used to control the stochasticity in every step. + When eta = 0, it represents deterministic sampling, whereas eta = 1 indicates full stochastic sampling. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_tcd.TCDSchedulerOutput`] or `tuple`. + Returns: + [`~schedulers.scheduling_utils.TCDSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_tcd.TCDSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + assert 0 <= eta <= 1.0, "gamma must be less than or equal to 1.0" + + # 1. get previous step value + prev_step_index = self.step_index + 1 + if prev_step_index < len(self.timesteps): + prev_timestep = self.timesteps[prev_step_index] + else: + prev_timestep = torch.tensor(0) + + timestep_s = torch.floor((1 - eta) * prev_timestep).to(dtype=torch.long) + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + beta_prod_t = 1 - alpha_prod_t + + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + alpha_prod_s = self.alphas_cumprod[timestep_s] + beta_prod_s = 1 - alpha_prod_s + + # 3. Compute the predicted noised sample x_s based on the model parameterization + if self.config.prediction_type == "epsilon": # noise-prediction + pred_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() + pred_epsilon = model_output + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + elif self.config.prediction_type == "sample": # x-prediction + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + elif self.config.prediction_type == "v_prediction": # v-prediction + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for `TCDScheduler`." + ) + + # 4. Sample and inject noise z ~ N(0, I) for MultiStep Inference + # Noise is not used on the final timestep of the timestep schedule. + # This also means that noise is not used for one-step sampling. + # Eta (referred to as "gamma" in the paper) was introduced to control the stochasticity in every step. + # When eta = 0, it represents deterministic sampling, whereas eta = 1 indicates full stochastic sampling. + if eta > 0: + if self.step_index != self.num_inference_steps - 1: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=pred_noised_sample.dtype + ) + prev_sample = (alpha_prod_t_prev / alpha_prod_s).sqrt() * pred_noised_sample + ( + 1 - alpha_prod_t_prev / alpha_prod_s + ).sqrt() * noise + else: + prev_sample = pred_noised_sample + else: + prev_sample = pred_noised_sample + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample, pred_noised_sample) + + return TCDSchedulerOutput(prev_sample=prev_sample, pred_noised_sample=pred_noised_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def get_velocity( + self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unclip.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unclip.py new file mode 100644 index 000000000..7bc0a0f9b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unclip.py @@ -0,0 +1,352 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP +class UnCLIPSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class UnCLIPScheduler(SchedulerMixin, ConfigMixin): + """ + NOTE: do not use this scheduler. The DDPM scheduler has been updated to support the changes made here. This + scheduler will be removed and replaced with DDPM. + + This is a modified DDPM Scheduler specifically for the karlo unCLIP model. + + This scheduler has some minor variations in how it calculates the learned range variance and dynamically + re-calculates betas based off the timesteps it is skipping. + + The scheduler also uses a slightly different step ratio when computing timesteps to use for inference. + + See [`~DDPMScheduler`] for more information on DDPM scheduling + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small_log` + or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample between `-clip_sample_range` and `clip_sample_range` for numerical + stability. + clip_sample_range (`float`, default `1.0`): + The range to clip the sample between. See `clip_sample`. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process) + or `sample` (directly predicting the noisy sample`) + """ + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + variance_type: str = "fixed_small_log", + clip_sample: bool = True, + clip_sample_range: Optional[float] = 1.0, + prediction_type: str = "epsilon", + beta_schedule: str = "squaredcos_cap_v2", + ): + if beta_schedule != "squaredcos_cap_v2": + raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'") + + self.betas = betas_for_alpha_bar(num_train_timesteps) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): input sample + timestep (`int`, optional): current timestep + + Returns: + `torch.FloatTensor`: scaled input sample + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Note that this scheduler uses a slightly different step ratio than the other diffusers schedulers. The + different step ratio is to mimic the original karlo implementation and does not affect the quality or accuracy + of the results. + + Args: + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + self.num_inference_steps = num_inference_steps + step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None): + if prev_timestep is None: + prev_timestep = t - 1 + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if prev_timestep == t - 1: + beta = self.betas[t] + else: + beta = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = beta_prod_t_prev / beta_prod_t * beta + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small_log": + variance = torch.log(torch.clamp(variance, min=1e-20)) + variance = torch.exp(0.5 * variance) + elif variance_type == "learned_range": + # NOTE difference with DDPM scheduler + min_log = variance.log() + max_log = beta.log() + + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + prev_timestep: Optional[int] = None, + generator=None, + return_dict: bool = True, + ) -> Union[UnCLIPSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + prev_timestep (`int`, *optional*): The previous timestep to predict the previous sample at. + Used to dynamically compute beta. If not given, `t-1` is used and the pre-computed beta is used. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than UnCLIPSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + if prev_timestep is None: + prev_timestep = t - 1 + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if prev_timestep == t - 1: + beta = self.betas[t] + alpha = self.alphas[t] + else: + beta = 1 - alpha_prod_t / alpha_prod_t_prev + alpha = 1 - beta + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" + " for the UnCLIPScheduler." + ) + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = torch.clamp( + pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * beta) / beta_prod_t + current_sample_coeff = alpha ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + variance_noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device + ) + + variance = self._get_variance( + t, + predicted_variance=predicted_variance, + prev_timestep=prev_timestep, + ) + + if self.variance_type == "fixed_small_log": + variance = variance + elif self.variance_type == "learned_range": + variance = (0.5 * variance).exp() + else: + raise ValueError( + f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" + " for the UnCLIPScheduler." + ) + + variance = variance * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unipc_multistep.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unipc_multistep.py new file mode 100644 index 000000000..8ba9a9c7d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_unipc_multistep.py @@ -0,0 +1,880 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2302.04867 and https://github.com/wl-zhao/UniPC for more info +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `UniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, default `2`): + The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` + due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for + unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. + predict_x0 (`bool`, defaults to `True`): + Whether to use the updating algorithm on the predicted x0. + solver_type (`str`, default `bh2`): + Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` + otherwise. + lower_order_final (`bool`, default `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + disable_corrector (`list`, default `[]`): + Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` + and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is + usually disabled during the first few steps. + solver_p (`SchedulerMixin`, default `None`): + Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + predict_x0: bool = True, + solver_type: str = "bh2", + lower_order_final: bool = True, + disable_corrector: List[int] = [], + solver_p: SchedulerMixin = None, + use_karras_sigmas: Optional[bool] = False, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + if solver_type not in ["bh1", "bh2"]: + if solver_type in ["midpoint", "heun", "logrho"]: + self.register_to_config(solver_type="bh2") + else: + raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") + + self.predict_x0 = predict_x0 + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.timestep_list = [None] * solver_order + self.lower_order_nums = 0 + self.disable_corrector = disable_corrector + self.solver_p = solver_p + self.last_sample = None + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increae 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + self.last_sample = None + if self.solver_p: + self.solver_p.set_timesteps(self.num_inference_steps, device=device) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + **kwargs, + ) -> torch.FloatTensor: + r""" + Convert the model output to the corresponding type the UniPC algorithm needs. + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.FloatTensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + + if self.predict_x0: + if self.config.prediction_type == "epsilon": + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + else: + if self.config.prediction_type == "epsilon": + return model_output + elif self.config.prediction_type == "sample": + epsilon = (sample - alpha_t * model_output) / sigma_t + return epsilon + elif self.config.prediction_type == "v_prediction": + epsilon = alpha_t * model_output + sigma_t * sample + return epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the UniPCMultistepScheduler." + ) + + def multistep_uni_p_bh_update( + self, + model_output: torch.FloatTensor, + *args, + sample: torch.FloatTensor = None, + order: int = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. + + Args: + model_output (`torch.FloatTensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of UniP at this timestep (corresponds to the *p* in UniPC-p). + + Returns: + `torch.FloatTensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if order is None: + if len(args) > 2: + order = args[2] + else: + raise ValueError(" missing `order` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + + s0 = self.timestep_list[-1] + m0 = model_output_list[-1] + x = sample + + if self.solver_p: + x_t = self.solver_p.step(model_output, s0, x).prev_sample + return x_t + + sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - i + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) # (B, K) + # for order 2, we use a simplified version + if order == 2: + rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]) + else: + D1s = None + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) + else: + pred_res = 0 + x_t = x_t_ - alpha_t * B_h * pred_res + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) + else: + pred_res = 0 + x_t = x_t_ - sigma_t * B_h * pred_res + + x_t = x_t.to(x.dtype) + return x_t + + def multistep_uni_c_bh_update( + self, + this_model_output: torch.FloatTensor, + *args, + last_sample: torch.FloatTensor = None, + this_sample: torch.FloatTensor = None, + order: int = None, + **kwargs, + ) -> torch.FloatTensor: + """ + One step for the UniC (B(h) version). + + Args: + this_model_output (`torch.FloatTensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.FloatTensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.FloatTensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. + + Returns: + `torch.FloatTensor`: + The corrected sample tensor at the current timestep. + """ + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if this_sample is None: + if len(args) > 2: + this_sample = args[2] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing`order` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + + m0 = model_output_list[-1] + x = last_sample + x_t = this_sample + model_t = this_model_output + + sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = this_sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - (i + 1) + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) + else: + D1s = None + + # for order 1, we use a simplified version + if order == 1: + rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_c = torch.linalg.solve(R, b) + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) + x_t = x_t.to(x.dtype) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep UniPC. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + use_corrector = ( + self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None + ) + + model_output_convert = self.convert_model_output(model_output, sample=sample) + if use_corrector: + sample = self.multistep_uni_c_bh_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + this_sample=sample, + order=self.this_order, + ) + + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep + + if self.config.lower_order_final: + this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) + else: + this_order = self.config.solver_order + + self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep + assert self.this_order > 0 + + self.last_sample = sample + prev_sample = self.multistep_uni_p_bh_update( + model_output=model_output, # pass the original non-converted model output, in case solver-p is used + sample=sample, + order=self.this_order, + ) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): + The input sample. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + else: + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils.py new file mode 100644 index 000000000..5dbdb8288 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils.py @@ -0,0 +1,186 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Union + +import torch +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import BaseOutput, PushToHubMixin + + +SCHEDULER_CONFIG_NAME = "scheduler_config.json" + + +# NOTE: We make this type an enum because it simplifies usage in docs and prevents +# circular imports when used for `_compatibles` within the schedulers module. +# When it's used as a type in pipelines, it really is a Union because the actual +# scheduler instance is passed in. +class KarrasDiffusionSchedulers(Enum): + DDIMScheduler = 1 + DDPMScheduler = 2 + PNDMScheduler = 3 + LMSDiscreteScheduler = 4 + EulerDiscreteScheduler = 5 + HeunDiscreteScheduler = 6 + EulerAncestralDiscreteScheduler = 7 + DPMSolverMultistepScheduler = 8 + DPMSolverSinglestepScheduler = 9 + KDPM2DiscreteScheduler = 10 + KDPM2AncestralDiscreteScheduler = 11 + DEISMultistepScheduler = 12 + UniPCMultistepScheduler = 13 + DPMSolverSDEScheduler = 14 + EDMEulerScheduler = 15 + + +@dataclass +class SchedulerOutput(BaseOutput): + """ + Base class for the output of a scheduler's `step` function. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class SchedulerMixin(PushToHubMixin): + """ + Base class for all schedulers. + + [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving + functionalities. + + [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to + the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`. + + Class attributes: + - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler + class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden + by parent class). + """ + + config_name = SCHEDULER_CONFIG_NAME + _compatibles = [] + has_compatibles = True + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + subfolder: Optional[str] = None, + return_unused_kwargs=False, + **kwargs, + ): + r""" + Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the scheduler + configuration saved with [`~SchedulerMixin.save_pretrained`]. + subfolder (`str`, *optional*): + The subfolder location of a model file within a larger model repository on the Hub or locally. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to resume downloading the model weights and configuration files. If set to `False`, any + incompletely downloaded files are deleted. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. You can also activate the special + ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a + firewalled environment. + + + + """ + config, kwargs, commit_hash = cls.load_config( + pretrained_model_name_or_path=pretrained_model_name_or_path, + subfolder=subfolder, + return_unused_kwargs=True, + return_commit_hash=True, + **kwargs, + ) + return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a scheduler configuration object to a directory so that it can be reloaded using the + [`~SchedulerMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) + + @property + def compatibles(self): + """ + Returns all schedulers that are compatible with this scheduler + + Returns: + `List[SchedulerMixin]`: List of compatible schedulers + """ + return self._get_compatibles() + + @classmethod + def _get_compatibles(cls): + compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) + diffusers_library = importlib.import_module(__name__.split(".")[0]) + compatible_classes = [ + getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) + ] + return compatible_classes diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils_flax.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils_flax.py new file mode 100644 index 000000000..a1d471f91 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_utils_flax.py @@ -0,0 +1,293 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import math +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import BaseOutput, PushToHubMixin + + +SCHEDULER_CONFIG_NAME = "scheduler_config.json" + + +# NOTE: We make this type an enum because it simplifies usage in docs and prevents +# circular imports when used for `_compatibles` within the schedulers module. +# When it's used as a type in pipelines, it really is a Union because the actual +# scheduler instance is passed in. +class FlaxKarrasDiffusionSchedulers(Enum): + FlaxDDIMScheduler = 1 + FlaxDDPMScheduler = 2 + FlaxPNDMScheduler = 3 + FlaxLMSDiscreteScheduler = 4 + FlaxDPMSolverMultistepScheduler = 5 + FlaxEulerDiscreteScheduler = 6 + + +@dataclass +class FlaxSchedulerOutput(BaseOutput): + """ + Base class for the scheduler's step function output. + + Args: + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: jnp.ndarray + + +class FlaxSchedulerMixin(PushToHubMixin): + """ + Mixin containing common functions for the schedulers. + + Class attributes: + - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that + `from_config` can be used from a class different than the one used to save the config (should be overridden + by parent class). + """ + + config_name = SCHEDULER_CONFIG_NAME + ignore_for_config = ["dtype"] + _compatibles = [] + has_compatibles = True + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + subfolder: Optional[str] = None, + return_unused_kwargs=False, + **kwargs, + ): + r""" + Instantiate a Scheduler class from a pre-defined JSON-file. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an + organization name, like `google/ddpm-celebahq-256`. + - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`], + e.g., `./my_model_directory/`. + subfolder (`str`, *optional*): + In case the relevant files are located inside a subfolder of the model repo (either remote in + huggingface.co or downloaded locally), you can specify the folder name here. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated + models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + + + Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to + use this method in a firewalled environment. + + + + """ + config, kwargs = cls.load_config( + pretrained_model_name_or_path=pretrained_model_name_or_path, + subfolder=subfolder, + return_unused_kwargs=True, + **kwargs, + ) + scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs) + + if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False): + state = scheduler.create_state() + + if return_unused_kwargs: + return scheduler, state, unused_kwargs + + return scheduler, state + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the + [`~FlaxSchedulerMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) + + @property + def compatibles(self): + """ + Returns all schedulers that are compatible with this scheduler + + Returns: + `List[SchedulerMixin]`: List of compatible schedulers + """ + return self._get_compatibles() + + @classmethod + def _get_compatibles(cls): + compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) + diffusers_library = importlib.import_module(__name__.split(".")[0]) + compatible_classes = [ + getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) + ] + return compatible_classes + + +def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray: + assert len(shape) >= x.ndim + return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape) + + +def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray: + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + + Returns: + betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs + """ + + def alpha_bar(time_step): + return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return jnp.array(betas, dtype=dtype) + + +@flax.struct.dataclass +class CommonSchedulerState: + alphas: jnp.ndarray + betas: jnp.ndarray + alphas_cumprod: jnp.ndarray + + @classmethod + def create(cls, scheduler): + config = scheduler.config + + if config.trained_betas is not None: + betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype) + elif config.beta_schedule == "linear": + betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype) + elif config.beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + betas = ( + jnp.linspace( + config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype + ) + ** 2 + ) + elif config.beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype) + else: + raise NotImplementedError( + f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" + ) + + alphas = 1.0 - betas + + alphas_cumprod = jnp.cumprod(alphas, axis=0) + + return cls( + alphas=alphas, + betas=betas, + alphas_cumprod=alphas_cumprod, + ) + + +def get_sqrt_alpha_prod( + state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray +): + alphas_cumprod = state.alphas_cumprod + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape) + + return sqrt_alpha_prod, sqrt_one_minus_alpha_prod + + +def add_noise_common( + state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray +): + sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, original_samples, noise, timesteps) + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + +def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): + sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, sample, noise, timesteps) + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_vq_diffusion.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_vq_diffusion.py new file mode 100644 index 000000000..03ba95cad --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/schedulers/scheduling_vq_diffusion.py @@ -0,0 +1,467 @@ +# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import SchedulerMixin + + +@dataclass +class VQDiffusionSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.LongTensor + + +def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.FloatTensor: + """ + Convert batch of vector of class indices into batch of log onehot vectors + + Args: + x (`torch.LongTensor` of shape `(batch size, vector length)`): + Batch of class indices + + num_classes (`int`): + number of classes to be used for the onehot vectors + + Returns: + `torch.FloatTensor` of shape `(batch size, num classes, vector length)`: + Log onehot vectors + """ + x_onehot = F.one_hot(x, num_classes) + x_onehot = x_onehot.permute(0, 2, 1) + log_x = torch.log(x_onehot.float().clamp(min=1e-30)) + return log_x + + +def gumbel_noised(logits: torch.FloatTensor, generator: Optional[torch.Generator]) -> torch.FloatTensor: + """ + Apply gumbel noise to `logits` + """ + uniform = torch.rand(logits.shape, device=logits.device, generator=generator) + gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) + noised = gumbel_noise + logits + return noised + + +def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009): + """ + Cumulative and non-cumulative alpha schedules. + + See section 4.1. + """ + att = ( + np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start) + + alpha_cum_start + ) + att = np.concatenate(([1], att)) + at = att[1:] / att[:-1] + att = np.concatenate((att[1:], [1])) + return at, att + + +def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999): + """ + Cumulative and non-cumulative gamma schedules. + + See section 4.1. + """ + ctt = ( + np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start) + + gamma_cum_start + ) + ctt = np.concatenate(([0], ctt)) + one_minus_ctt = 1 - ctt + one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] + ct = 1 - one_minus_ct + ctt = np.concatenate((ctt[1:], [0])) + return ct, ctt + + +class VQDiffusionScheduler(SchedulerMixin, ConfigMixin): + """ + A scheduler for vector quantized diffusion. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_vec_classes (`int`): + The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked + latent pixel. + num_train_timesteps (`int`, defaults to 100): + The number of diffusion steps to train the model. + alpha_cum_start (`float`, defaults to 0.99999): + The starting cumulative alpha value. + alpha_cum_end (`float`, defaults to 0.00009): + The ending cumulative alpha value. + gamma_cum_start (`float`, defaults to 0.00009): + The starting cumulative gamma value. + gamma_cum_end (`float`, defaults to 0.99999): + The ending cumulative gamma value. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_vec_classes: int, + num_train_timesteps: int = 100, + alpha_cum_start: float = 0.99999, + alpha_cum_end: float = 0.000009, + gamma_cum_start: float = 0.000009, + gamma_cum_end: float = 0.99999, + ): + self.num_embed = num_vec_classes + + # By convention, the index for the mask class is the last class index + self.mask_class = self.num_embed - 1 + + at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end) + ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end) + + num_non_mask_classes = self.num_embed - 1 + bt = (1 - at - ct) / num_non_mask_classes + btt = (1 - att - ctt) / num_non_mask_classes + + at = torch.tensor(at.astype("float64")) + bt = torch.tensor(bt.astype("float64")) + ct = torch.tensor(ct.astype("float64")) + log_at = torch.log(at) + log_bt = torch.log(bt) + log_ct = torch.log(ct) + + att = torch.tensor(att.astype("float64")) + btt = torch.tensor(btt.astype("float64")) + ctt = torch.tensor(ctt.astype("float64")) + log_cumprod_at = torch.log(att) + log_cumprod_bt = torch.log(btt) + log_cumprod_ct = torch.log(ctt) + + self.log_at = log_at.float() + self.log_bt = log_bt.float() + self.log_ct = log_ct.float() + self.log_cumprod_at = log_cumprod_at.float() + self.log_cumprod_bt = log_cumprod_bt.float() + self.log_cumprod_ct = log_cumprod_ct.float() + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps and diffusion process parameters (alpha, beta, gamma) should be moved + to. + """ + self.num_inference_steps = num_inference_steps + timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps).to(device) + + self.log_at = self.log_at.to(device) + self.log_bt = self.log_bt.to(device) + self.log_ct = self.log_ct.to(device) + self.log_cumprod_at = self.log_cumprod_at.to(device) + self.log_cumprod_bt = self.log_cumprod_bt.to(device) + self.log_cumprod_ct = self.log_cumprod_ct.to(device) + + def step( + self, + model_output: torch.FloatTensor, + timestep: torch.long, + sample: torch.LongTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[VQDiffusionSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by the reverse transition distribution. See + [`~VQDiffusionScheduler.q_posterior`] for more details about how the distribution is computer. + + Args: + log_p_x_0: (`torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`): + The log probabilities for the predicted classes of the initial latent pixels. Does not include a + prediction for the masked class as the initial unnoised image cannot be masked. + t (`torch.long`): + The timestep that determines which transition matrices are used. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + generator (`torch.Generator`, or `None`): + A random number generator for the noise applied to `p(x_{t-1} | x_t)` before it is sampled from. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or + `tuple`. + + Returns: + [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + if timestep == 0: + log_p_x_t_min_1 = model_output + else: + log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep) + + log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator) + + x_t_min_1 = log_p_x_t_min_1.argmax(dim=1) + + if not return_dict: + return (x_t_min_1,) + + return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1) + + def q_posterior(self, log_p_x_0, x_t, t): + """ + Calculates the log probabilities for the predicted classes of the image at timestep `t-1`: + + ``` + p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) ) + ``` + + Args: + log_p_x_0 (`torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`): + The log probabilities for the predicted classes of the initial latent pixels. Does not include a + prediction for the masked class as the initial unnoised image cannot be masked. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + t (`torch.Long`): + The timestep that determines which transition matrix is used. + + Returns: + `torch.FloatTensor` of shape `(batch size, num classes, num latent pixels)`: + The log probabilities for the predicted classes of the image at timestep `t-1`. + """ + log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed) + + log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class( + t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True + ) + + log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class( + t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False + ) + + # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + # . . . + # . . . + # . . . + # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) + q = log_p_x_0 - log_q_x_t_given_x_0 + + # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... , + # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) + q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) + + # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n + # . . . + # . . . + # . . . + # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n + q = q - q_log_sum_exp + + # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} + # . . . + # . . . + # . . . + # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} + # c_cumulative_{t-1} ... c_cumulative_{t-1} + q = self.apply_cumulative_transitions(q, t - 1) + + # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n + # . . . + # . . . + # . . . + # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n + # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 + log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp + + # For each column, there are two possible cases. + # + # Where: + # - sum(p_n(x_0))) is summing over all classes for x_0 + # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's) + # - C_j is the class transitioning to + # + # 1. x_t is masked i.e. x_t = c_k + # + # Simplifying the expression, the column vector is: + # . + # . + # . + # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0))) + # . + # . + # . + # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0)) + # + # From equation (11) stated in terms of forward probabilities, the last row is trivially verified. + # + # For the other rows, we can state the equation as ... + # + # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})] + # + # This verifies the other rows. + # + # 2. x_t is not masked + # + # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i: + # . + # . + # . + # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) + # . + # . + # . + # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) + # . + # . + # . + # 0 + # + # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities. + return log_p_x_t_min_1 + + def log_Q_t_transitioning_to_known_class( + self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.FloatTensor, cumulative: bool + ): + """ + Calculates the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each + latent pixel in `x_t`. + + Args: + t (`torch.Long`): + The timestep that determines which transition matrix is used. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + log_onehot_x_t (`torch.FloatTensor` of shape `(batch size, num classes, num latent pixels)`): + The log one-hot vectors of `x_t`. + cumulative (`bool`): + If cumulative is `False`, the single step transition matrix `t-1`->`t` is used. If cumulative is + `True`, the cumulative transition matrix `0`->`t` is used. + + Returns: + `torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`: + Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability + transition matrix. + + When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be + masked. + + Where: + - `q_n` is the probability distribution for the forward process of the `n`th latent pixel. + - C_0 is a class of a latent pixel embedding + - C_k is the class of the masked latent pixel + + non-cumulative result (omitting logarithms): + ``` + q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0) + . . . + . . . + . . . + q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k) + ``` + + cumulative result (omitting logarithms): + ``` + q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0) + . . . + . . . + . . . + q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1}) + ``` + """ + if cumulative: + a = self.log_cumprod_at[t] + b = self.log_cumprod_bt[t] + c = self.log_cumprod_ct[t] + else: + a = self.log_at[t] + b = self.log_bt[t] + c = self.log_ct[t] + + if not cumulative: + # The values in the onehot vector can also be used as the logprobs for transitioning + # from masked latent pixels. If we are not calculating the cumulative transitions, + # we need to save these vectors to be re-appended to the final matrix so the values + # aren't overwritten. + # + # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector + # if x_t is not masked + # + # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector + # if x_t is masked + log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1) + + # `index_to_log_onehot` will add onehot vectors for masked pixels, + # so the default one hot matrix has one too many rows. See the doc string + # for an explanation of the dimensionality of the returned matrix. + log_onehot_x_t = log_onehot_x_t[:, :-1, :] + + # this is a cheeky trick to produce the transition probabilities using log one-hot vectors. + # + # Don't worry about what values this sets in the columns that mark transitions + # to masked latent pixels. They are overwrote later with the `mask_class_mask`. + # + # Looking at the below logspace formula in non-logspace, each value will evaluate to either + # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column + # or + # `0 * a + b = b` where `log_Q_t` has the 0 values in the column. + # + # See equation 7 for more details. + log_Q_t = (log_onehot_x_t + a).logaddexp(b) + + # The whole column of each masked pixel is `c` + mask_class_mask = x_t == self.mask_class + mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1) + log_Q_t[mask_class_mask] = c + + if not cumulative: + log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1) + + return log_Q_t + + def apply_cumulative_transitions(self, q, t): + bsz = q.shape[0] + a = self.log_cumprod_at[t] + b = self.log_cumprod_bt[t] + c = self.log_cumprod_ct[t] + + num_latent_pixels = q.shape[2] + c = c.expand(bsz, 1, num_latent_pixels) + + q = (q + a).logaddexp(b) + q = torch.cat((q, c), dim=1) + + return q diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/training_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/training_utils.py new file mode 100644 index 000000000..25e02a3d1 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/training_utils.py @@ -0,0 +1,453 @@ +import contextlib +import copy +import random +from typing import Any, Dict, Iterable, List, Optional, Union + +import numpy as np +import torch + +from .models import UNet2DConditionModel +from .utils import ( + convert_state_dict_to_diffusers, + convert_state_dict_to_peft, + deprecate, + is_peft_available, + is_torch_npu_available, + is_torchvision_available, + is_transformers_available, +) + + +if is_transformers_available(): + import transformers + +if is_peft_available(): + from peft import set_peft_model_state_dict + +if is_torchvision_available(): + from torchvision import transforms + +if is_torch_npu_available(): + import torch_npu # noqa: F401 + + +def set_seed(seed: int): + """ + Args: + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. + seed (`int`): The seed to set. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if is_torch_npu_available(): + torch.npu.manual_seed_all(seed) + else: + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + + +def compute_snr(noise_scheduler, timesteps): + """ + Computes SNR as per + https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 + """ + alphas_cumprod = noise_scheduler.alphas_cumprod + sqrt_alphas_cumprod = alphas_cumprod**0.5 + sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 + + # Expand the tensors. + # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 + sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() + while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): + sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] + alpha = sqrt_alphas_cumprod.expand(timesteps.shape) + + sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() + while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): + sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] + sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) + + # Compute SNR. + snr = (alpha / sigma) ** 2 + return snr + + +def resolve_interpolation_mode(interpolation_type: str): + """ + Maps a string describing an interpolation function to the corresponding torchvision `InterpolationMode` enum. The + full list of supported enums is documented at + https://pytorch.org/vision/0.9/transforms.html#torchvision.transforms.functional.InterpolationMode. + + Args: + interpolation_type (`str`): + A string describing an interpolation method. Currently, `bilinear`, `bicubic`, `box`, `nearest`, + `nearest_exact`, `hamming`, and `lanczos` are supported, corresponding to the supported interpolation modes + in torchvision. + + Returns: + `torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` + transform. + """ + if not is_torchvision_available(): + raise ImportError( + "Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function." + ) + + if interpolation_type == "bilinear": + interpolation_mode = transforms.InterpolationMode.BILINEAR + elif interpolation_type == "bicubic": + interpolation_mode = transforms.InterpolationMode.BICUBIC + elif interpolation_type == "box": + interpolation_mode = transforms.InterpolationMode.BOX + elif interpolation_type == "nearest": + interpolation_mode = transforms.InterpolationMode.NEAREST + elif interpolation_type == "nearest_exact": + interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT + elif interpolation_type == "hamming": + interpolation_mode = transforms.InterpolationMode.HAMMING + elif interpolation_type == "lanczos": + interpolation_mode = transforms.InterpolationMode.LANCZOS + else: + raise ValueError( + f"The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation" + f" modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." + ) + + return interpolation_mode + + +def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: + r""" + Returns: + A state dict containing just the LoRA parameters. + """ + lora_state_dict = {} + + for name, module in unet.named_modules(): + if hasattr(module, "set_lora_layer"): + lora_layer = getattr(module, "lora_layer") + if lora_layer is not None: + current_lora_layer_sd = lora_layer.state_dict() + for lora_layer_matrix_name, lora_param in current_lora_layer_sd.items(): + # The matrix name can either be "down" or "up". + lora_state_dict[f"{name}.lora.{lora_layer_matrix_name}"] = lora_param + + return lora_state_dict + + +def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): + if not isinstance(model, list): + model = [model] + for m in model: + for param in m.parameters(): + # only upcast trainable parameters into fp32 + if param.requires_grad: + param.data = param.to(dtype) + + +def _set_state_dict_into_text_encoder( + lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module +): + """ + Sets the `lora_state_dict` into `text_encoder` coming from `transformers`. + + Args: + lora_state_dict: The state dictionary to be set. + prefix: String identifier to retrieve the portion of the state dict that belongs to `text_encoder`. + text_encoder: Where the `lora_state_dict` is to be set. + """ + + text_encoder_state_dict = { + f'{k.replace(prefix, "")}': v for k, v in lora_state_dict.items() if k.startswith(prefix) + } + text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) + set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default") + + +# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 +class EMAModel: + """ + Exponential Moving Average of models weights + """ + + def __init__( + self, + parameters: Iterable[torch.nn.Parameter], + decay: float = 0.9999, + min_decay: float = 0.0, + update_after_step: int = 0, + use_ema_warmup: bool = False, + inv_gamma: Union[float, int] = 1.0, + power: Union[float, int] = 2 / 3, + model_cls: Optional[Any] = None, + model_config: Dict[str, Any] = None, + **kwargs, + ): + """ + Args: + parameters (Iterable[torch.nn.Parameter]): The parameters to track. + decay (float): The decay factor for the exponential moving average. + min_decay (float): The minimum decay factor for the exponential moving average. + update_after_step (int): The number of steps to wait before starting to update the EMA weights. + use_ema_warmup (bool): Whether to use EMA warmup. + inv_gamma (float): + Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. + power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. + device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA + weights will be stored on CPU. + + @crowsonkb's notes on EMA Warmup: + If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan + to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), + gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 + at 215.4k steps). + """ + + if isinstance(parameters, torch.nn.Module): + deprecation_message = ( + "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " + "Please pass the parameters of the module instead." + ) + deprecate( + "passing a `torch.nn.Module` to `ExponentialMovingAverage`", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + parameters = parameters.parameters() + + # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility + use_ema_warmup = True + + if kwargs.get("max_value", None) is not None: + deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." + deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) + decay = kwargs["max_value"] + + if kwargs.get("min_value", None) is not None: + deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." + deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) + min_decay = kwargs["min_value"] + + parameters = list(parameters) + self.shadow_params = [p.clone().detach() for p in parameters] + + if kwargs.get("device", None) is not None: + deprecation_message = "The `device` argument is deprecated. Please use `to` instead." + deprecate("device", "1.0.0", deprecation_message, standard_warn=False) + self.to(device=kwargs["device"]) + + self.temp_stored_params = None + + self.decay = decay + self.min_decay = min_decay + self.update_after_step = update_after_step + self.use_ema_warmup = use_ema_warmup + self.inv_gamma = inv_gamma + self.power = power + self.optimization_step = 0 + self.cur_decay_value = None # set in `step()` + + self.model_cls = model_cls + self.model_config = model_config + + @classmethod + def from_pretrained(cls, path, model_cls) -> "EMAModel": + _, ema_kwargs = model_cls.load_config(path, return_unused_kwargs=True) + model = model_cls.from_pretrained(path) + + ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config) + + ema_model.load_state_dict(ema_kwargs) + return ema_model + + def save_pretrained(self, path): + if self.model_cls is None: + raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") + + if self.model_config is None: + raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") + + model = self.model_cls.from_config(self.model_config) + state_dict = self.state_dict() + state_dict.pop("shadow_params", None) + + model.register_to_config(**state_dict) + self.copy_to(model.parameters()) + model.save_pretrained(path) + + def get_decay(self, optimization_step: int) -> float: + """ + Compute the decay factor for the exponential moving average. + """ + step = max(0, optimization_step - self.update_after_step - 1) + + if step <= 0: + return 0.0 + + if self.use_ema_warmup: + cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power + else: + cur_decay_value = (1 + step) / (10 + step) + + cur_decay_value = min(cur_decay_value, self.decay) + # make sure decay is not smaller than min_decay + cur_decay_value = max(cur_decay_value, self.min_decay) + return cur_decay_value + + @torch.no_grad() + def step(self, parameters: Iterable[torch.nn.Parameter]): + if isinstance(parameters, torch.nn.Module): + deprecation_message = ( + "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " + "Please pass the parameters of the module instead." + ) + deprecate( + "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + parameters = parameters.parameters() + + parameters = list(parameters) + + self.optimization_step += 1 + + # Compute the decay factor for the exponential moving average. + decay = self.get_decay(self.optimization_step) + self.cur_decay_value = decay + one_minus_decay = 1 - decay + + context_manager = contextlib.nullcontext + if is_transformers_available() and transformers.deepspeed.is_deepspeed_zero3_enabled(): + import deepspeed + + for s_param, param in zip(self.shadow_params, parameters): + if is_transformers_available() and transformers.deepspeed.is_deepspeed_zero3_enabled(): + context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) + + with context_manager(): + if param.requires_grad: + s_param.sub_(one_minus_decay * (s_param - param)) + else: + s_param.copy_(param) + + def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: + """ + Copy current averaged parameters into given collection of parameters. + + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored moving averages. If `None`, the parameters with which this + `ExponentialMovingAverage` was initialized will be used. + """ + parameters = list(parameters) + for s_param, param in zip(self.shadow_params, parameters): + param.data.copy_(s_param.to(param.device).data) + + def to(self, device=None, dtype=None) -> None: + r"""Move internal buffers of the ExponentialMovingAverage to `device`. + + Args: + device: like `device` argument to `torch.Tensor.to` + """ + # .to() on the tensors handles None correctly + self.shadow_params = [ + p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) + for p in self.shadow_params + ] + + def state_dict(self) -> dict: + r""" + Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during + checkpointing to save the ema state dict. + """ + # Following PyTorch conventions, references to tensors are returned: + # "returns a reference to the state and not its copy!" - + # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict + return { + "decay": self.decay, + "min_decay": self.min_decay, + "optimization_step": self.optimization_step, + "update_after_step": self.update_after_step, + "use_ema_warmup": self.use_ema_warmup, + "inv_gamma": self.inv_gamma, + "power": self.power, + "shadow_params": self.shadow_params, + } + + def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: + r""" + Args: + Save the current parameters for restoring later. + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] + + def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: + r""" + Args: + Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: + affecting the original optimization process. Store the parameters before the `copy_to()` method. After + validation (or model saving), use this to restore the former parameters. + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. If `None`, the parameters with which this + `ExponentialMovingAverage` was initialized will be used. + """ + if self.temp_stored_params is None: + raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`") + for c_param, param in zip(self.temp_stored_params, parameters): + param.data.copy_(c_param.data) + + # Better memory-wise. + self.temp_stored_params = None + + def load_state_dict(self, state_dict: dict) -> None: + r""" + Args: + Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the + ema state dict. + state_dict (dict): EMA state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # deepcopy, to be consistent with module API + state_dict = copy.deepcopy(state_dict) + + self.decay = state_dict.get("decay", self.decay) + if self.decay < 0.0 or self.decay > 1.0: + raise ValueError("Decay must be between 0 and 1") + + self.min_decay = state_dict.get("min_decay", self.min_decay) + if not isinstance(self.min_decay, float): + raise ValueError("Invalid min_decay") + + self.optimization_step = state_dict.get("optimization_step", self.optimization_step) + if not isinstance(self.optimization_step, int): + raise ValueError("Invalid optimization_step") + + self.update_after_step = state_dict.get("update_after_step", self.update_after_step) + if not isinstance(self.update_after_step, int): + raise ValueError("Invalid update_after_step") + + self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) + if not isinstance(self.use_ema_warmup, bool): + raise ValueError("Invalid use_ema_warmup") + + self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) + if not isinstance(self.inv_gamma, (float, int)): + raise ValueError("Invalid inv_gamma") + + self.power = state_dict.get("power", self.power) + if not isinstance(self.power, (float, int)): + raise ValueError("Invalid power") + + shadow_params = state_dict.get("shadow_params", None) + if shadow_params is not None: + self.shadow_params = shadow_params + if not isinstance(self.shadow_params, list): + raise ValueError("shadow_params must be a list") + if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): + raise ValueError("shadow_params must all be Tensors") diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/__init__.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/__init__.py new file mode 100644 index 000000000..4e2f07f2b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/__init__.py @@ -0,0 +1,124 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +from packaging import version + +from .. import __version__ +from .constants import ( + CONFIG_NAME, + DEPRECATED_REVISION_ARGS, + DIFFUSERS_DYNAMIC_MODULE_NAME, + FLAX_WEIGHTS_NAME, + HF_MODULES_CACHE, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + MIN_PEFT_VERSION, + ONNX_EXTERNAL_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + SAFETENSORS_FILE_EXTENSION, + SAFETENSORS_WEIGHTS_NAME, + USE_PEFT_BACKEND, + WEIGHTS_NAME, +) +from .deprecation_utils import deprecate +from .doc_utils import replace_example_docstring +from .dynamic_modules_utils import get_class_from_dynamic_module +from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video +from .hub_utils import ( + PushToHubMixin, + _add_variant, + _get_model_file, + extract_commit_hash, + http_user_agent, +) +from .import_utils import ( + BACKENDS_MAPPING, + DIFFUSERS_SLOW_IMPORT, + ENV_VARS_TRUE_AND_AUTO_VALUES, + ENV_VARS_TRUE_VALUES, + USE_JAX, + USE_TF, + USE_TORCH, + DummyObject, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_accelerate_available, + is_accelerate_version, + is_bs4_available, + is_flax_available, + is_ftfy_available, + is_inflect_available, + is_invisible_watermark_available, + is_k_diffusion_available, + is_k_diffusion_version, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_peft_available, + is_scipy_available, + is_tensorboard_available, + is_torch_available, + is_torch_npu_available, + is_torch_version, + is_torch_xla_available, + is_torchsde_available, + is_torchvision_available, + is_transformers_available, + is_transformers_version, + is_unidecode_available, + is_wandb_available, + is_xformers_available, + requires_backends, +) +from .loading_utils import load_image +from .logging import get_logger +from .outputs import BaseOutput +from .peft_utils import ( + check_peft_version, + delete_adapter_layers, + get_adapter_name, + get_peft_kwargs, + recurse_remove_peft_layers, + scale_lora_layers, + set_adapter_layers, + set_weights_and_activate_adapters, + unscale_lora_layers, +) +from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil +from .state_dict_utils import ( + convert_all_state_dict_to_peft, + convert_state_dict_to_diffusers, + convert_state_dict_to_kohya, + convert_state_dict_to_peft, + convert_unet_state_dict_to_peft, +) + + +logger = get_logger(__name__) + + +def check_min_version(min_version): + if version.parse(__version__) < version.parse(min_version): + if "dev" in min_version: + error_message = ( + "This example requires a source install from HuggingFace diffusers (see " + "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," + ) + else: + error_message = f"This example requires a minimum version of {min_version}," + error_message += f" but the version found is {__version__}.\n" + raise ImportError(error_message) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/accelerate_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/accelerate_utils.py new file mode 100644 index 000000000..99a8b3a47 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/accelerate_utils.py @@ -0,0 +1,48 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Accelerate utilities: Utilities related to accelerate +""" + +from packaging import version + +from .import_utils import is_accelerate_available + + +if is_accelerate_available(): + import accelerate + + +def apply_forward_hook(method): + """ + Decorator that applies a registered CpuOffload hook to an arbitrary function rather than `forward`. This is useful + for cases where a PyTorch module provides functions other than `forward` that should trigger a move to the + appropriate acceleration device. This is the case for `encode` and `decode` in [`AutoencoderKL`]. + + This decorator looks inside the internal `_hf_hook` property to find a registered offload hook. + + :param method: The method to decorate. This method should be a method of a PyTorch module. + """ + if not is_accelerate_available(): + return method + accelerate_version = version.parse(accelerate.__version__).base_version + if version.parse(accelerate_version) < version.parse("0.17.0"): + return method + + def wrapper(self, *args, **kwargs): + if hasattr(self, "_hf_hook") and hasattr(self._hf_hook, "pre_forward"): + self._hf_hook.pre_forward(self) + return method(self, *args, **kwargs) + + return wrapper diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/constants.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/constants.py new file mode 100644 index 000000000..bc4268a32 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/constants.py @@ -0,0 +1,55 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os + +from huggingface_hub.constants import HF_HOME +from packaging import version + +from ..dependency_versions_check import dep_version_check +from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available + + +MIN_PEFT_VERSION = "0.6.0" +MIN_TRANSFORMERS_VERSION = "4.34.0" +_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES + + +CONFIG_NAME = "config.json" +WEIGHTS_NAME = "diffusion_pytorch_model.bin" +FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" +ONNX_WEIGHTS_NAME = "model.onnx" +SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" +SAFETENSORS_FILE_EXTENSION = "safetensors" +ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" +HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") +DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" +HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules")) +DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] + +# Below should be `True` if the current version of `peft` and `transformers` are compatible with +# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are +# available. +# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0. +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) >= version.parse(MIN_PEFT_VERSION) +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) >= version.parse(MIN_TRANSFORMERS_VERSION) + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version + +if USE_PEFT_BACKEND and _CHECK_PEFT: + dep_version_check("peft") diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/deprecation_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/deprecation_utils.py new file mode 100644 index 000000000..f482deddd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/deprecation_utils.py @@ -0,0 +1,49 @@ +import inspect +import warnings +from typing import Any, Dict, Optional, Union + +from packaging import version + + +def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): + from .. import __version__ + + deprecated_kwargs = take_from + values = () + if not isinstance(args[0], tuple): + args = (args,) + + for attribute, version_name, message in args: + if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): + raise ValueError( + f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" + f" version {__version__} is >= {version_name}" + ) + + warning = None + if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: + values += (deprecated_kwargs.pop(attribute),) + warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." + elif hasattr(deprecated_kwargs, attribute): + values += (getattr(deprecated_kwargs, attribute),) + warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." + elif deprecated_kwargs is None: + warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." + + if warning is not None: + warning = warning + " " if standard_warn else "" + warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) + + if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: + call_frame = inspect.getouterframes(inspect.currentframe())[1] + filename = call_frame.filename + line_number = call_frame.lineno + function = call_frame.function + key, value = next(iter(deprecated_kwargs.items())) + raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") + + if len(values) == 0: + return + elif len(values) == 1: + return values[0] + return values diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/doc_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/doc_utils.py new file mode 100644 index 000000000..03b6b7a5a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/doc_utils.py @@ -0,0 +1,38 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Doc utilities: Utilities related to documentation +""" +import re + + +def replace_example_docstring(example_docstring): + def docstring_decorator(fn): + func_doc = fn.__doc__ + lines = func_doc.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + lines[i] = example_docstring + func_doc = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, " + f"current docstring is:\n{func_doc}" + ) + fn.__doc__ = func_doc + return fn + + return docstring_decorator diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_and_transformers_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_and_transformers_objects.py new file mode 100644 index 000000000..5e65e5349 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_and_transformers_objects.py @@ -0,0 +1,77 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionXLPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_objects.py new file mode 100644 index 000000000..5fa8dbc81 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_flax_objects.py @@ -0,0 +1,212 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class FlaxControlNetModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxModelMixin(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxUNet2DConditionModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxAutoencoderKL(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDiffusionPipeline(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDDIMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDDPMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxEulerDiscreteScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxKarrasVeScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxLMSDiscreteScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxPNDMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxSchedulerMixin(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxScoreSdeVeScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_note_seq_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_note_seq_objects.py new file mode 100644 index 000000000..c02d0b015 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_note_seq_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class MidiProcessor(metaclass=DummyObject): + _backends = ["note_seq"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["note_seq"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["note_seq"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["note_seq"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_onnx_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_onnx_objects.py new file mode 100644 index 000000000..bde5f6ad0 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_onnx_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class OnnxRuntimeModel(metaclass=DummyObject): + _backends = ["onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["onnx"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_pt_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_pt_objects.py new file mode 100644 index 000000000..14947848a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_pt_objects.py @@ -0,0 +1,1170 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AsymmetricAutoencoderKL(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderKL(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderKLTemporalDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderTiny(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ConsistencyDecoderVAE(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class I2VGenXLUNet(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class Kandinsky3UNet(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ModelMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class MotionAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class MultiAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PriorTransformer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class T2IAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class T5FilmDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class Transformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet1DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet2DConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet3DConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNetMotionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNetSpatioTemporalConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UVit2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VQModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +def get_constant_schedule(*args, **kwargs): + requires_backends(get_constant_schedule, ["torch"]) + + +def get_constant_schedule_with_warmup(*args, **kwargs): + requires_backends(get_constant_schedule_with_warmup, ["torch"]) + + +def get_cosine_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_schedule_with_warmup, ["torch"]) + + +def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) + + +def get_linear_schedule_with_warmup(*args, **kwargs): + requires_backends(get_linear_schedule_with_warmup, ["torch"]) + + +def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): + requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) + + +def get_scheduler(*args, **kwargs): + requires_backends(get_scheduler, ["torch"]) + + +class AudioPipelineOutput(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForImage2Image(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForInpainting(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForText2Image(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class BlipDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class BlipDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CLIPImageProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ConsistencyModelPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DanceDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DiTPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ImagePipelineOutput(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KarrasVePipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LDMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LDMSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PNDMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class RePaintPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ScoreSdeVePipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class StableDiffusionMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AmusedScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CMStochasticIterativeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMInverseScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMParallelScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMParallelScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMWuerstchenScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DEISMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverMultistepInverseScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverSinglestepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EDMDPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EDMEulerScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EulerAncestralDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EulerDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class HeunDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class IPNDMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KarrasVeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KDPM2AncestralDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KDPM2DiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LCMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PNDMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class RePaintScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SASolverScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SchedulerMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ScoreSdeVeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class TCDScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UnCLIPScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UniPCMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VQDiffusionScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EMAModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_librosa_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_librosa_objects.py new file mode 100644 index 000000000..2088bc4a7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_librosa_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AudioDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "librosa"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "librosa"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + +class Mel(metaclass=DummyObject): + _backends = ["torch", "librosa"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "librosa"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_scipy_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_scipy_objects.py new file mode 100644 index 000000000..a1ff25863 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_scipy_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class LMSDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch", "scipy"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "scipy"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "scipy"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "scipy"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_torchsde_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_torchsde_objects.py new file mode 100644 index 000000000..a81bbb316 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_torchsde_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class DPMSolverSDEScheduler(metaclass=DummyObject): + _backends = ["torch", "torchsde"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "torchsde"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py new file mode 100644 index 000000000..2ab00c54c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "k_diffusion"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + +class StableDiffusionXLKDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "k_diffusion"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py new file mode 100644 index 000000000..b7afad822 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py @@ -0,0 +1,92 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class OnnxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class StableDiffusionOnnxPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_objects.py new file mode 100644 index 000000000..f64c15702 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -0,0 +1,1607 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AltDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AltDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffVideoToVideoPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2ProjectionModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2UNet2DConditionModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDMPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CLIPImageProjection(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CycleDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class I2VGenXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFImg2ImgSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFInpaintingPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFInpaintingSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ImageTextPipelineOutput(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Kandinsky3Img2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Kandinsky3Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyInpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyPriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22CombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22ControlnetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Img2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22InpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22PriorEmb2EmbPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22PriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LatentConsistencyModelPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LDMTextToImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LEditsPPPipelineStableDiffusion(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LEditsPPPipelineStableDiffusionXL(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class MusicLDMPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PaintByExamplePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PIAPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PixArtAlphaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class SemanticStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ShapEImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ShapEPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadeCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadeDecoderPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadePriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionAdapterPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionAttendAndExcitePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionDiffEditPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionGLIGENPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionGLIGENTextImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInstructPix2PixPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionLatentUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionLDM3DPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionModelEditingPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPanoramaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionParadigmsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPipelineSafe(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPix2PixZeroPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionSAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLInstructPix2PixPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableUnCLIPImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableUnCLIPPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableVideoDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoSDPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoZeroPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoZeroSDXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UnCLIPImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UnCLIPPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserTextDecoder(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VideoToVideoSDPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VQDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenDecoderPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenPriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py new file mode 100644 index 000000000..fbde04e33 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class SpectrogramDiffusionPipeline(metaclass=DummyObject): + _backends = ["transformers", "torch", "note_seq"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dynamic_modules_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dynamic_modules_utils.py new file mode 100644 index 000000000..a4c704a91 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/dynamic_modules_utils.py @@ -0,0 +1,452 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" + +import importlib +import inspect +import json +import os +import re +import shutil +import sys +from pathlib import Path +from typing import Dict, Optional, Union +from urllib import request + +from huggingface_hub import cached_download, hf_hub_download, model_info +from huggingface_hub.utils import validate_hf_hub_args +from packaging import version + +from .. import __version__ +from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging + + +COMMUNITY_PIPELINES_URL = ( + "https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py" +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def get_diffusers_versions(): + url = "https://pypi.org/pypi/diffusers/json" + releases = json.loads(request.urlopen(url).read())["releases"].keys() + return sorted(releases, key=lambda x: version.Version(x)) + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + """ + init_hf_modules() + dynamic_module_path = Path(HF_MODULES_CACHE) / name + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def get_relative_imports(module_file): + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file): + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def check_imports(filename): + """ + Check if the current Python environment contains all the libraries that are imported in a file. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + + # Unique-ify and test we got them all + imports = list(set(imports)) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def get_class_in_module(class_name, module_path): + """ + Import a module on the cache directory for modules and extract a class from it. + """ + module_path = module_path.replace(os.path.sep, ".") + module = importlib.import_module(module_path) + + if class_name is None: + return find_pipeline_class(module) + return getattr(module, class_name) + + +def find_pipeline_class(loaded_module): + """ + Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class + inheriting from `DiffusionPipeline`. + """ + from ..pipelines import DiffusionPipeline + + cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass)) + + pipeline_class = None + for cls_name, cls in cls_members.items(): + if ( + cls_name != DiffusionPipeline.__name__ + and issubclass(cls, DiffusionPipeline) + and cls.__module__.split(".")[0] != "diffusers" + ): + if pipeline_class is not None: + raise ValueError( + f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" + f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" + f" {loaded_module}." + ) + pipeline_class = cls + + return pipeline_class + + +@validate_hf_hub_args +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, +): + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private + or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `str`: The path to the module inside the cache. + """ + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) + + if os.path.isfile(module_file_or_url): + resolved_module_file = module_file_or_url + submodule = "local" + elif pretrained_model_name_or_path.count("/") == 0: + available_versions = get_diffusers_versions() + # cut ".dev0" + latest_version = "v" + ".".join(__version__.split(".")[:3]) + + # retrieve github version that matches + if revision is None: + revision = latest_version if latest_version[1:] in available_versions else "main" + logger.info(f"Defaulting to latest_version: {revision}.") + elif revision in available_versions: + revision = f"v{revision}" + elif revision == "main": + revision = revision + else: + raise ValueError( + f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" + f" {', '.join(available_versions + ['main'])}." + ) + + # community pipeline on GitHub + github_url = COMMUNITY_PIPELINES_URL.format(revision=revision, pipeline=pretrained_model_name_or_path) + try: + resolved_module_file = cached_download( + github_url, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=False, + ) + submodule = "git" + module_file = pretrained_model_name_or_path + ".py" + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + else: + try: + # Load from URL or cache if already cached + resolved_module_file = hf_hub_download( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + ) + submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/"))) + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == "local" or submodule == "git": + # We always copy local files (we could hash the file to see if there was a change, and give them the name of + # that hash, to only copy when there is a modification but it seems overkill for now). + # The only reason we do the copy is to avoid putting too many folders in sys.path. + shutil.copy(resolved_module_file, submodule_path / module_file) + for module_needed in modules_needed: + module_needed = f"{module_needed}.py" + shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed) + else: + # Get the commit hash + # TODO: we will get this info in the etag soon, so retrieve it from there and not here. + commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + shutil.copy(resolved_module_file, submodule_path / module_file) + # Make sure we also have every file with relative + for module_needed in modules_needed: + if not (submodule_path / module_needed).exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return os.path.join(full_submodule, module_file) + + +@validate_hf_hub_args +def get_class_from_dynamic_module( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + class_name: Optional[str] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private + or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") + ```""" + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return get_class_in_module(class_name, final_module.replace(".py", "")) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/export_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/export_utils.py new file mode 100644 index 000000000..bb5307756 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/export_utils.py @@ -0,0 +1,140 @@ +import io +import random +import struct +import tempfile +from contextlib import contextmanager +from typing import List, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps + +from .import_utils import ( + BACKENDS_MAPPING, + is_opencv_available, +) +from .logging import get_logger + + +global_rng = random.Random() + +logger = get_logger(__name__) + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=1000 // fps, + loop=0, + ) + return output_gif_path + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + if isinstance(video_frames[0], np.ndarray): + video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] + + elif isinstance(video_frames[0], PIL.Image.Image): + video_frames = [np.array(frame) for frame in video_frames] + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/hub_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/hub_utils.py new file mode 100644 index 000000000..e554b42dd --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/hub_utils.py @@ -0,0 +1,493 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import re +import sys +import tempfile +import traceback +import warnings +from pathlib import Path +from typing import Dict, List, Optional, Union +from uuid import uuid4 + +from huggingface_hub import ( + ModelCard, + ModelCardData, + create_repo, + hf_hub_download, + upload_folder, +) +from huggingface_hub.constants import HF_HUB_CACHE, HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE +from huggingface_hub.file_download import REGEX_COMMIT_HASH +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + is_jinja_available, + validate_hf_hub_args, +) +from packaging import version +from requests import HTTPError + +from .. import __version__ +from .constants import ( + DEPRECATED_REVISION_ARGS, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, +) +from .import_utils import ( + ENV_VARS_TRUE_VALUES, + _flax_version, + _jax_version, + _onnxruntime_version, + _torch_version, + is_flax_available, + is_onnx_available, + is_torch_available, +) +from .logging import get_logger + + +logger = get_logger(__name__) + +MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" +SESSION_ID = uuid4().hex + + +def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: + """ + Formats a user-agent string with basic info about a request. + """ + ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" + if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: + return ua + "; telemetry/off" + if is_torch_available(): + ua += f"; torch/{_torch_version}" + if is_flax_available(): + ua += f"; jax/{_jax_version}" + ua += f"; flax/{_flax_version}" + if is_onnx_available(): + ua += f"; onnxruntime/{_onnxruntime_version}" + # CI will set this value to True + if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: + ua += "; is_ci/true" + if isinstance(user_agent, dict): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def load_or_create_model_card( + repo_id_or_path: str = None, + token: Optional[str] = None, + is_pipeline: bool = False, + from_training: bool = False, + model_description: Optional[str] = None, + base_model: str = None, + prompt: Optional[str] = None, + license: Optional[str] = None, + widget: Optional[List[dict]] = None, + inference: Optional[bool] = None, +) -> ModelCard: + """ + Loads or creates a model card. + + Args: + repo_id_or_path (`str`): + The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card. + token (`str`, *optional*): + Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more details. + is_pipeline (`bool`): + Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. + from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script. + model_description (`str`, *optional*): Model description to add to the model card. Helpful when using + `load_or_create_model_card` from a training script. + base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful + for DreamBooth-like training. + prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training. + license: (`str`, *optional*): License of the output artifact. Helpful when using + `load_or_create_model_card` from a training script. + widget (`List[dict]`, *optional*): Widget to accompany a gallery template. + inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using + `load_or_create_model_card` from a training script. + """ + if not is_jinja_available(): + raise ValueError( + "Modelcard rendering is based on Jinja templates." + " Please make sure to have `jinja` installed before using `load_or_create_model_card`." + " To install it, please run `pip install Jinja2`." + ) + + try: + # Check if the model card is present on the remote repo + model_card = ModelCard.load(repo_id_or_path, token=token) + except (EntryNotFoundError, RepositoryNotFoundError): + # Otherwise create a model card from template + if from_training: + model_card = ModelCard.from_template( + card_data=ModelCardData( # Card metadata object that will be converted to YAML block + license=license, + library_name="diffusers", + inference=inference, + base_model=base_model, + instance_prompt=prompt, + widget=widget, + ), + template_path=MODEL_CARD_TEMPLATE_PATH, + model_description=model_description, + ) + else: + card_data = ModelCardData() + component = "pipeline" if is_pipeline else "model" + if model_description is None: + model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." + model_card = ModelCard.from_template(card_data, model_description=model_description) + + return model_card + + +def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard: + """Populates the `model_card` with library name and optional tags.""" + if model_card.data.library_name is None: + model_card.data.library_name = "diffusers" + + if tags is not None: + if isinstance(tags, str): + tags = [tags] + if model_card.data.tags is None: + model_card.data.tags = [] + for tag in tags: + model_card.data.tags.append(tag) + + return model_card + + +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): + """ + Extracts the commit hash from a resolved filename toward a cache file. + """ + if resolved_file is None or commit_hash is not None: + return commit_hash + resolved_file = str(Path(resolved_file).as_posix()) + search = re.search(r"snapshots/([^/]+)/", resolved_file) + if search is None: + return None + commit_hash = search.groups()[0] + return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None + + +# Old default cache path, potentially to be migrated. +# This logic was more or less taken from `transformers`, with the following differences: +# - Diffusers doesn't use custom environment variables to specify the cache path. +# - There is no need to migrate the cache format, just move the files to the new location. +hf_cache_home = os.path.expanduser( + os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) +) +old_diffusers_cache = os.path.join(hf_cache_home, "diffusers") + + +def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None: + if new_cache_dir is None: + new_cache_dir = HF_HUB_CACHE + if old_cache_dir is None: + old_cache_dir = old_diffusers_cache + + old_cache_dir = Path(old_cache_dir).expanduser() + new_cache_dir = Path(new_cache_dir).expanduser() + for old_blob_path in old_cache_dir.glob("**/blobs/*"): + if old_blob_path.is_file() and not old_blob_path.is_symlink(): + new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir) + new_blob_path.parent.mkdir(parents=True, exist_ok=True) + os.replace(old_blob_path, new_blob_path) + try: + os.symlink(new_blob_path, old_blob_path) + except OSError: + logger.warning( + "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." + ) + # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). + + +cache_version_file = os.path.join(HF_HUB_CACHE, "version_diffusers_cache.txt") +if not os.path.isfile(cache_version_file): + cache_version = 0 +else: + with open(cache_version_file) as f: + try: + cache_version = int(f.read()) + except ValueError: + cache_version = 0 + +if cache_version < 1: + old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 + if old_cache_is_not_empty: + logger.warning( + "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " + "existing cached models. This is a one-time operation, you can interrupt it or run it " + "later by calling `diffusers.utils.hub_utils.move_cache()`." + ) + try: + move_cache() + except Exception as e: + trace = "\n".join(traceback.format_tb(e.__traceback__)) + logger.error( + f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " + "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " + "message and we will do our best to help." + ) + +if cache_version < 1: + try: + os.makedirs(HF_HUB_CACHE, exist_ok=True) + with open(cache_version_file, "w") as f: + f.write("1") + except Exception: + logger.warning( + f"There was a problem when trying to write in your cache folder ({HF_HUB_CACHE}). Please, ensure " + "the directory exists and can be written to." + ) + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + splits = splits[:-1] + [variant] + splits[-1:] + weights_name = ".".join(splits) + + return weights_name + + +@validate_hf_hub_args +def _get_model_file( + pretrained_model_name_or_path: Union[str, Path], + *, + weights_name: str, + subfolder: Optional[str] = None, + cache_dir: Optional[str] = None, + force_download: bool = False, + proxies: Optional[Dict] = None, + resume_download: bool = False, + local_files_only: bool = False, + token: Optional[str] = None, + user_agent: Optional[Union[Dict, str]] = None, + revision: Optional[str] = None, + commit_hash: Optional[str] = None, +): + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + if os.path.isfile(pretrained_model_name_or_path): + return pretrained_model_name_or_path + elif os.path.isdir(pretrained_model_name_or_path): + if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): + # Load from a PyTorch checkpoint + model_file = os.path.join(pretrained_model_name_or_path, weights_name) + return model_file + elif subfolder is not None and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + ): + model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + return model_file + else: + raise EnvironmentError( + f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." + ) + else: + # 1. First check if deprecated way of loading from branches is used + if ( + revision in DEPRECATED_REVISION_ARGS + and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) + and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") + ): + try: + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=_add_variant(weights_name, revision), + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + warnings.warn( + f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", + FutureWarning, + ) + return model_file + except: # noqa: E722 + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", + FutureWarning, + ) + try: + # 2. Load model file as usual + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=weights_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + return model_file + + except RepositoryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " + "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " + "token having permission to this repo with `token` or log in with `huggingface-cli " + "login`." + ) + except RevisionNotFoundError: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " + "this model name. Check the model page at " + f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) + except EntryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." + ) + except HTTPError as err: + raise EnvironmentError( + f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" + ) + except ValueError: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a file named {weights_name} or" + " \nCheckout your internet connection or see how to run the library in" + " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." + ) + except EnvironmentError: + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a file named {weights_name}" + ) + + +class PushToHubMixin: + """ + A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. + """ + + def _upload_folder( + self, + working_dir: Union[str, os.PathLike], + repo_id: str, + token: Optional[str] = None, + commit_message: Optional[str] = None, + create_pr: bool = False, + ): + """ + Uploads all files in `working_dir` to `repo_id`. + """ + if commit_message is None: + if "Model" in self.__class__.__name__: + commit_message = "Upload model" + elif "Scheduler" in self.__class__.__name__: + commit_message = "Upload scheduler" + else: + commit_message = f"Upload {self.__class__.__name__}" + + logger.info(f"Uploading the files of {working_dir} to {repo_id}.") + return upload_folder( + repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr + ) + + def push_to_hub( + self, + repo_id: str, + commit_message: Optional[str] = None, + private: Optional[bool] = None, + token: Optional[str] = None, + create_pr: bool = False, + safe_serialization: bool = True, + variant: Optional[str] = None, + ) -> str: + """ + Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your model, scheduler, or pipeline files to. It should + contain your organization name when pushing to an organization. `repo_id` can also be a path to a local + directory. + commit_message (`str`, *optional*): + Message to commit while pushing. Default to `"Upload {object}"`. + private (`bool`, *optional*): + Whether or not the repository created should be private. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. The token generated when running + `huggingface-cli login` (stored in `~/.huggingface`). + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether or not to convert the model weights to the `safetensors` format. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + + Examples: + + ```python + from diffusers import UNet2DConditionModel + + unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") + + # Push the `unet` to your namespace with the name "my-finetuned-unet". + unet.push_to_hub("my-finetuned-unet") + + # Push the `unet` to an organization with the name "my-finetuned-unet". + unet.push_to_hub("your-org/my-finetuned-unet") + ``` + """ + repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id + + # Create a new empty model card and eventually tag it + model_card = load_or_create_model_card(repo_id, token=token) + model_card = populate_model_card(model_card) + + # Save all files. + save_kwargs = {"safe_serialization": safe_serialization} + if "Scheduler" not in self.__class__.__name__: + save_kwargs.update({"variant": variant}) + + with tempfile.TemporaryDirectory() as tmpdir: + self.save_pretrained(tmpdir, **save_kwargs) + + # Update model card if needed: + model_card.save(os.path.join(tmpdir, "README.md")) + + return self._upload_folder( + tmpdir, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/import_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/import_utils.py new file mode 100644 index 000000000..a3ee31c91 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/import_utils.py @@ -0,0 +1,726 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Import utilities: Utilities related to imports and our lazy inits. +""" + +import importlib.util +import operator as op +import os +import sys +from collections import OrderedDict +from itertools import chain +from types import ModuleType +from typing import Any, Union + +from huggingface_hub.utils import is_jinja_available # noqa: F401 +from packaging import version +from packaging.version import Version, parse + +from . import logging + + +# The package importlib_metadata is in a different place, depending on the python version. +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() +USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper() +DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper() +DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +_torch_version = "N/A" +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available = importlib.util.find_spec("torch") is not None + if _torch_available: + try: + _torch_version = importlib_metadata.version("torch") + logger.info(f"PyTorch version {_torch_version} available.") + except importlib_metadata.PackageNotFoundError: + _torch_available = False +else: + logger.info("Disabling PyTorch because USE_TORCH is set") + _torch_available = False + +_torch_xla_available = importlib.util.find_spec("torch_xla") is not None +if _torch_xla_available: + try: + _torch_xla_version = importlib_metadata.version("torch_xla") + logger.info(f"PyTorch XLA version {_torch_xla_version} available.") + except ImportError: + _torch_xla_available = False + +# check whether torch_npu is available +_torch_npu_available = importlib.util.find_spec("torch_npu") is not None +if _torch_npu_available: + try: + _torch_npu_version = importlib_metadata.version("torch_npu") + logger.info(f"torch_npu version {_torch_npu_version} available.") + except ImportError: + _torch_npu_available = False + +_jax_version = "N/A" +_flax_version = "N/A" +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None + if _flax_available: + try: + _jax_version = importlib_metadata.version("jax") + _flax_version = importlib_metadata.version("flax") + logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") + except importlib_metadata.PackageNotFoundError: + _flax_available = False +else: + _flax_available = False + +if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES: + _safetensors_available = importlib.util.find_spec("safetensors") is not None + if _safetensors_available: + try: + _safetensors_version = importlib_metadata.version("safetensors") + logger.info(f"Safetensors version {_safetensors_version} available.") + except importlib_metadata.PackageNotFoundError: + _safetensors_available = False +else: + logger.info("Disabling Safetensors because USE_TF is set") + _safetensors_available = False + +_transformers_available = importlib.util.find_spec("transformers") is not None +try: + _transformers_version = importlib_metadata.version("transformers") + logger.debug(f"Successfully imported transformers version {_transformers_version}") +except importlib_metadata.PackageNotFoundError: + _transformers_available = False + + +_inflect_available = importlib.util.find_spec("inflect") is not None +try: + _inflect_version = importlib_metadata.version("inflect") + logger.debug(f"Successfully imported inflect version {_inflect_version}") +except importlib_metadata.PackageNotFoundError: + _inflect_available = False + + +_unidecode_available = importlib.util.find_spec("unidecode") is not None +try: + _unidecode_version = importlib_metadata.version("unidecode") + logger.debug(f"Successfully imported unidecode version {_unidecode_version}") +except importlib_metadata.PackageNotFoundError: + _unidecode_available = False + + +_onnxruntime_version = "N/A" +_onnx_available = importlib.util.find_spec("onnxruntime") is not None +if _onnx_available: + candidates = ( + "onnxruntime", + "onnxruntime-gpu", + "ort_nightly_gpu", + "onnxruntime-directml", + "onnxruntime-openvino", + "ort_nightly_directml", + "onnxruntime-rocm", + "onnxruntime-training", + ) + _onnxruntime_version = None + # For the metadata, we have to look for both onnxruntime and onnxruntime-gpu + for pkg in candidates: + try: + _onnxruntime_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _onnx_available = _onnxruntime_version is not None + if _onnx_available: + logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}") + +# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed. +# _opencv_available = importlib.util.find_spec("opencv-python") is not None +try: + candidates = ( + "opencv-python", + "opencv-contrib-python", + "opencv-python-headless", + "opencv-contrib-python-headless", + ) + _opencv_version = None + for pkg in candidates: + try: + _opencv_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _opencv_available = _opencv_version is not None + if _opencv_available: + logger.debug(f"Successfully imported cv2 version {_opencv_version}") +except importlib_metadata.PackageNotFoundError: + _opencv_available = False + +_scipy_available = importlib.util.find_spec("scipy") is not None +try: + _scipy_version = importlib_metadata.version("scipy") + logger.debug(f"Successfully imported scipy version {_scipy_version}") +except importlib_metadata.PackageNotFoundError: + _scipy_available = False + +_librosa_available = importlib.util.find_spec("librosa") is not None +try: + _librosa_version = importlib_metadata.version("librosa") + logger.debug(f"Successfully imported librosa version {_librosa_version}") +except importlib_metadata.PackageNotFoundError: + _librosa_available = False + +_accelerate_available = importlib.util.find_spec("accelerate") is not None +try: + _accelerate_version = importlib_metadata.version("accelerate") + logger.debug(f"Successfully imported accelerate version {_accelerate_version}") +except importlib_metadata.PackageNotFoundError: + _accelerate_available = False + +_xformers_available = importlib.util.find_spec("xformers") is not None +try: + _xformers_version = importlib_metadata.version("xformers") + if _torch_available: + _torch_version = importlib_metadata.version("torch") + if version.Version(_torch_version) < version.Version("1.12"): + raise ValueError("xformers is installed in your environment and requires PyTorch >= 1.12") + + logger.debug(f"Successfully imported xformers version {_xformers_version}") +except importlib_metadata.PackageNotFoundError: + _xformers_available = False + +_k_diffusion_available = importlib.util.find_spec("k_diffusion") is not None +try: + _k_diffusion_version = importlib_metadata.version("k_diffusion") + logger.debug(f"Successfully imported k-diffusion version {_k_diffusion_version}") +except importlib_metadata.PackageNotFoundError: + _k_diffusion_available = False + +_note_seq_available = importlib.util.find_spec("note_seq") is not None +try: + _note_seq_version = importlib_metadata.version("note_seq") + logger.debug(f"Successfully imported note-seq version {_note_seq_version}") +except importlib_metadata.PackageNotFoundError: + _note_seq_available = False + +_wandb_available = importlib.util.find_spec("wandb") is not None +try: + _wandb_version = importlib_metadata.version("wandb") + logger.debug(f"Successfully imported wandb version {_wandb_version }") +except importlib_metadata.PackageNotFoundError: + _wandb_available = False + + +_tensorboard_available = importlib.util.find_spec("tensorboard") +try: + _tensorboard_version = importlib_metadata.version("tensorboard") + logger.debug(f"Successfully imported tensorboard version {_tensorboard_version}") +except importlib_metadata.PackageNotFoundError: + _tensorboard_available = False + + +_compel_available = importlib.util.find_spec("compel") +try: + _compel_version = importlib_metadata.version("compel") + logger.debug(f"Successfully imported compel version {_compel_version}") +except importlib_metadata.PackageNotFoundError: + _compel_available = False + + +_ftfy_available = importlib.util.find_spec("ftfy") is not None +try: + _ftfy_version = importlib_metadata.version("ftfy") + logger.debug(f"Successfully imported ftfy version {_ftfy_version}") +except importlib_metadata.PackageNotFoundError: + _ftfy_available = False + + +_bs4_available = importlib.util.find_spec("bs4") is not None +try: + # importlib metadata under different name + _bs4_version = importlib_metadata.version("beautifulsoup4") + logger.debug(f"Successfully imported ftfy version {_bs4_version}") +except importlib_metadata.PackageNotFoundError: + _bs4_available = False + +_torchsde_available = importlib.util.find_spec("torchsde") is not None +try: + _torchsde_version = importlib_metadata.version("torchsde") + logger.debug(f"Successfully imported torchsde version {_torchsde_version}") +except importlib_metadata.PackageNotFoundError: + _torchsde_available = False + +_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None +try: + _invisible_watermark_version = importlib_metadata.version("invisible-watermark") + logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}") +except importlib_metadata.PackageNotFoundError: + _invisible_watermark_available = False + + +_peft_available = importlib.util.find_spec("peft") is not None +try: + _peft_version = importlib_metadata.version("peft") + logger.debug(f"Successfully imported peft version {_peft_version}") +except importlib_metadata.PackageNotFoundError: + _peft_available = False + +_torchvision_available = importlib.util.find_spec("torchvision") is not None +try: + _torchvision_version = importlib_metadata.version("torchvision") + logger.debug(f"Successfully imported torchvision version {_torchvision_version}") +except importlib_metadata.PackageNotFoundError: + _torchvision_available = False + + +def is_torch_available(): + return _torch_available + + +def is_torch_xla_available(): + return _torch_xla_available + + +def is_torch_npu_available(): + return _torch_npu_available + + +def is_flax_available(): + return _flax_available + + +def is_transformers_available(): + return _transformers_available + + +def is_inflect_available(): + return _inflect_available + + +def is_unidecode_available(): + return _unidecode_available + + +def is_onnx_available(): + return _onnx_available + + +def is_opencv_available(): + return _opencv_available + + +def is_scipy_available(): + return _scipy_available + + +def is_librosa_available(): + return _librosa_available + + +def is_xformers_available(): + return _xformers_available + + +def is_accelerate_available(): + return _accelerate_available + + +def is_k_diffusion_available(): + return _k_diffusion_available + + +def is_note_seq_available(): + return _note_seq_available + + +def is_wandb_available(): + return _wandb_available + + +def is_tensorboard_available(): + return _tensorboard_available + + +def is_compel_available(): + return _compel_available + + +def is_ftfy_available(): + return _ftfy_available + + +def is_bs4_available(): + return _bs4_available + + +def is_torchsde_available(): + return _torchsde_available + + +def is_invisible_watermark_available(): + return _invisible_watermark_available + + +def is_peft_available(): + return _peft_available + + +def is_torchvision_available(): + return _torchvision_available + + +# docstyle-ignore +FLAX_IMPORT_ERROR = """ +{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/google/flax and follow the ones that match your environment. +""" + +# docstyle-ignore +INFLECT_IMPORT_ERROR = """ +{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install +inflect` +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +""" + +# docstyle-ignore +ONNX_IMPORT_ERROR = """ +{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip +install onnxruntime` +""" + +# docstyle-ignore +OPENCV_IMPORT_ERROR = """ +{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip +install opencv-python` +""" + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install +scipy` +""" + +# docstyle-ignore +LIBROSA_IMPORT_ERROR = """ +{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the +installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment. +""" + +# docstyle-ignore +TRANSFORMERS_IMPORT_ERROR = """ +{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip +install transformers` +""" + +# docstyle-ignore +UNIDECODE_IMPORT_ERROR = """ +{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install +Unidecode` +""" + +# docstyle-ignore +K_DIFFUSION_IMPORT_ERROR = """ +{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip +install k-diffusion` +""" + +# docstyle-ignore +NOTE_SEQ_IMPORT_ERROR = """ +{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip +install note-seq` +""" + +# docstyle-ignore +WANDB_IMPORT_ERROR = """ +{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip +install wandb` +""" + +# docstyle-ignore +TENSORBOARD_IMPORT_ERROR = """ +{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip +install tensorboard` +""" + + +# docstyle-ignore +COMPEL_IMPORT_ERROR = """ +{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel` +""" + +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +FTFY_IMPORT_ERROR = """ +{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the +installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TORCHSDE_IMPORT_ERROR = """ +{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde` +""" + +# docstyle-ignore +INVISIBLE_WATERMARK_IMPORT_ERROR = """ +{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0` +""" + + +BACKENDS_MAPPING = OrderedDict( + [ + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), + ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), + ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), + ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), + ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)), + ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), + ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), + ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), + ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)), + ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)), + ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)), + ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)), + ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)), + ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), + ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)), + ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)), + ] +) + + +def requires_backends(obj, backends): + if not isinstance(backends, (list, tuple)): + backends = [backends] + + name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) + + if name in [ + "VersatileDiffusionTextToImagePipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionDualGuidedPipeline", + "StableDiffusionImageVariationPipeline", + "UnCLIPPipeline", + ] and is_transformers_version("<", "4.25.0"): + raise ImportError( + f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version( + "<", "4.26.0" + ): + raise ImportError( + f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + +class DummyObject(type): + """ + Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by + `requires_backend` each time a user tries to access any method of that class. + """ + + def __getattr__(cls, key): + if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]: + return super().__getattr__(cls, key) + requires_backends(cls, cls._backends) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Args: + Compares a library version to some requirement using a given operation. + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib_metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338 +def is_torch_version(operation: str, version: str): + """ + Args: + Compares the current PyTorch version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(parse(_torch_version), operation, version) + + +def is_transformers_version(operation: str, version: str): + """ + Args: + Compares the current Transformers version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _transformers_available: + return False + return compare_versions(parse(_transformers_version), operation, version) + + +def is_accelerate_version(operation: str, version: str): + """ + Args: + Compares the current Accelerate version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _accelerate_available: + return False + return compare_versions(parse(_accelerate_version), operation, version) + + +def is_k_diffusion_version(operation: str, version: str): + """ + Args: + Compares the current k-diffusion version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _k_diffusion_available: + return False + return compare_versions(parse(_k_diffusion_version), operation, version) + + +def get_objects_from_module(module): + """ + Args: + Returns a dict of object names and values in a module, while skipping private/internal objects + module (ModuleType): + Module to extract the objects from. + + Returns: + dict: Dictionary of object names and corresponding values + """ + + objects = {} + for name in dir(module): + if name.startswith("_"): + continue + objects[name] = getattr(module, name) + + return objects + + +class OptionalDependencyNotAvailable(BaseException): + """An error indicating that an optional dependency of Diffusers was not found in the environment.""" + + +class _LazyModule(ModuleType): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + # Very heavily inspired by optuna.integration._IntegrationModule + # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py + def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + super().__init__(name) + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure + + # Needed for autocompletion in an IDE + def __dir__(self): + result = super().__dir__() + # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether + # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. + for attr in self.__all__: + if attr not in result: + result.append(attr) + return result + + def __getattr__(self, name: str) -> Any: + if name in self._objects: + return self._objects[name] + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError(f"module {self.__name__} has no attribute {name}") + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str): + try: + return importlib.import_module("." + module_name, self.__name__) + except Exception as e: + raise RuntimeError( + f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" + f" traceback):\n{e}" + ) from e + + def __reduce__(self): + return (self.__class__, (self._name, self.__file__, self._import_structure)) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/loading_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/loading_utils.py new file mode 100644 index 000000000..18f6ead64 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/loading_utils.py @@ -0,0 +1,49 @@ +import os +from typing import Callable, Union + +import PIL.Image +import PIL.ImageOps +import requests + + +def load_image( + image: Union[str, PIL.Image.Image], convert_method: Callable[[PIL.Image.Image], PIL.Image.Image] = None +) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + convert_method (Callable[[PIL.Image.Image], PIL.Image.Image], optional): + A conversion method to apply to the image after loading it. + When set to `None` the image will be converted "RGB". + + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path." + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image." + ) + + image = PIL.ImageOps.exif_transpose(image) + + if convert_method is not None: + image = convert_method(image) + else: + image = image.convert("RGB") + + return image diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/logging.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/logging.py new file mode 100644 index 000000000..37bc05d0c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/logging.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2024 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities.""" + +import logging +import os +import sys +import threading +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Dict, Optional + +from tqdm import auto as tqdm_lib + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + +_tqdm_active = True + + +def _get_default_logging_level() -> int: + """ + If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is + not - fall back to `_default_log_level` + """ + env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + _default_handler.flush = sys.stderr.flush + + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(_get_default_logging_level()) + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict() -> Dict[str, int]: + return log_levels + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified name. + + This function is not supposed to be directly accessed unless you are writing a custom diffusers module. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """ + Return the current level for the 🤗 Diffusers' root logger as an `int`. + + Returns: + `int`: + Logging level integers which can be one of: + + - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `40`: `diffusers.logging.ERROR` + - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `20`: `diffusers.logging.INFO` + - `10`: `diffusers.logging.DEBUG` + + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Set the verbosity level for the 🤗 Diffusers' root logger. + + Args: + verbosity (`int`): + Logging level which can be one of: + + - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `diffusers.logging.ERROR` + - `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `diffusers.logging.INFO` + - `diffusers.logging.DEBUG` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info() -> None: + """Set the verbosity to the `INFO` level.""" + return set_verbosity(INFO) + + +def set_verbosity_warning() -> None: + """Set the verbosity to the `WARNING` level.""" + return set_verbosity(WARNING) + + +def set_verbosity_debug() -> None: + """Set the verbosity to the `DEBUG` level.""" + return set_verbosity(DEBUG) + + +def set_verbosity_error() -> None: + """Set the verbosity to the `ERROR` level.""" + return set_verbosity(ERROR) + + +def disable_default_handler() -> None: + """Disable the default handler of the 🤗 Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the 🤗 Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def add_handler(handler: logging.Handler) -> None: + """adds a handler to the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None + _get_library_root_logger().addHandler(handler) + + +def remove_handler(handler: logging.Handler) -> None: + """removes given handler from the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None and handler in _get_library_root_logger().handlers + _get_library_root_logger().removeHandler(handler) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is disabled by default. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent + double logging if the root logger has been configured. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True + + +def enable_explicit_format() -> None: + """ + Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows: + ``` + [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE + ``` + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") + handler.setFormatter(formatter) + + +def reset_format() -> None: + """ + Resets the formatting for 🤗 Diffusers' loggers. + + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + handler.setFormatter(None) + + +def warning_advice(self, *args, **kwargs) -> None: + """ + This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this + warning will not be printed + """ + no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) + if no_advisory_warnings: + return + self.warning(*args, **kwargs) + + +logging.Logger.warning_advice = warning_advice + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar() -> None: + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + + +def disable_progress_bar() -> None: + """Disable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/model_card_template.md b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/model_card_template.md new file mode 100644 index 000000000..f41b71e24 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/model_card_template.md @@ -0,0 +1,24 @@ +--- +{{ card_data }} +--- + + + +{{ model_description }} + +## Intended uses & limitations + +#### How to use + +```python +# TODO: add an example code snippet for running this diffusion pipeline +``` + +#### Limitations and bias + +[TODO: provide examples of latent issues and potential remediations] + +## Training details + +[TODO: describe the data used to train the model] diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/outputs.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/outputs.py new file mode 100644 index 000000000..6080a86b8 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/outputs.py @@ -0,0 +1,137 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generic utilities +""" + +from collections import OrderedDict +from dataclasses import fields, is_dataclass +from typing import Any, Tuple + +import numpy as np + +from .import_utils import is_torch_available, is_torch_version + + +def is_tensor(x) -> bool: + """ + Tests if `x` is a `torch.Tensor` or `np.ndarray`. + """ + if is_torch_available(): + import torch + + if isinstance(x, torch.Tensor): + return True + + return isinstance(x, np.ndarray) + + +class BaseOutput(OrderedDict): + """ + Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a + tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular + Python dictionary. + + + + You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple + first. + + + """ + + def __init_subclass__(cls) -> None: + """Register subclasses as pytree nodes. + + This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with + `static_graph=True` with modules that output `ModelOutput` subclasses. + """ + if is_torch_available(): + import torch.utils._pytree + + if is_torch_version("<", "2.2"): + torch.utils._pytree._register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + ) + else: + torch.utils._pytree.register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + ) + + def __post_init__(self) -> None: + class_fields = fields(self) + + # Safety and consistency checks + if not len(class_fields): + raise ValueError(f"{self.__class__.__name__} has no fields.") + + first_field = getattr(self, class_fields[0].name) + other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) + + if other_fields_are_none and isinstance(first_field, dict): + for key, value in first_field.items(): + self[key] = value + else: + for field in class_fields: + v = getattr(self, field.name) + if v is not None: + self[field.name] = v + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __getitem__(self, k: Any) -> Any: + if isinstance(k, str): + inner_dict = dict(self.items()) + return inner_dict[k] + else: + return self.to_tuple()[k] + + def __setattr__(self, name: Any, value: Any) -> None: + if name in self.keys() and value is not None: + # Don't call self.__setitem__ to avoid recursion errors + super().__setitem__(name, value) + super().__setattr__(name, value) + + def __setitem__(self, key, value): + # Will raise a KeyException if needed + super().__setitem__(key, value) + # Don't call self.__setattr__ to avoid recursion errors + super().__setattr__(key, value) + + def __reduce__(self): + if not is_dataclass(self): + return super().__reduce__() + callable, _args, *remaining = super().__reduce__() + args = tuple(getattr(self, field.name) for field in fields(self)) + return callable, args, *remaining + + def to_tuple(self) -> Tuple[Any, ...]: + """ + Convert self to a tuple containing all the attributes/keys that are not `None`. + """ + return tuple(self[k] for k in self.keys()) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/peft_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/peft_utils.py new file mode 100644 index 000000000..85d16c7b5 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/peft_utils.py @@ -0,0 +1,268 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PEFT utilities: Utilities related to peft library +""" +import collections +import importlib +from typing import Optional + +from packaging import version + +from .import_utils import is_peft_available, is_torch_available + + +if is_torch_available(): + import torch + + +def recurse_remove_peft_layers(model): + r""" + Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + has_base_layer_pattern = False + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + has_base_layer_pattern = hasattr(module, "base_layer") + break + + if has_base_layer_pattern: + from peft.utils import _get_submodules + + key_list = [key for key, _ in model.named_modules() if "lora" not in key] + for key in key_list: + try: + parent, target, target_name = _get_submodules(model, key) + except AttributeError: + continue + if hasattr(target, "base_layer"): + setattr(parent, target_name, target.get_base_layer()) + else: + # This is for backwards compatibility with PEFT <= 0.6.2. + # TODO can be removed once that PEFT version is no longer supported. + from peft.tuners.lora import LoraLayer + + for name, module in model.named_children(): + if len(list(module.children())) > 0: + ## compound module, go inside it + recurse_remove_peft_layers(module) + + module_replaced = False + + if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear): + new_module = torch.nn.Linear(module.in_features, module.out_features, bias=module.bias is not None).to( + module.weight.device + ) + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d): + new_module = torch.nn.Conv2d( + module.in_channels, + module.out_channels, + module.kernel_size, + module.stride, + module.padding, + module.dilation, + module.groups, + ).to(module.weight.device) + + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + + if module_replaced: + setattr(model, name, new_module) + del module + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return model + + +def scale_lora_layers(model, weight): + """ + Adjust the weightage given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`): + The weight to be given to the LoRA layers. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + module.scale_layer(weight) + + +def unscale_lora_layers(model, weight: Optional[float] = None): + """ + Removes the previously passed weight given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`, *optional*): + The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be + re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct + value. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if weight is not None and weight != 0: + module.unscale_layer(weight) + elif weight is not None and weight == 0: + for adapter_name in module.active_adapters: + # if weight == 0 unscale should re-set the scale to the original value. + module.set_scale(adapter_name, 1.0) + + +def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True): + rank_pattern = {} + alpha_pattern = {} + r = lora_alpha = list(rank_dict.values())[0] + + if len(set(rank_dict.values())) > 1: + # get the rank occuring the most number of times + r = collections.Counter(rank_dict.values()).most_common()[0][0] + + # for modules with rank different from the most occuring rank, add it to the `rank_pattern` + rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) + rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} + + if network_alpha_dict is not None and len(network_alpha_dict) > 0: + if len(set(network_alpha_dict.values())) > 1: + # get the alpha occuring the most number of times + lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] + + # for modules with alpha different from the most occuring alpha, add it to the `alpha_pattern` + alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) + if is_unet: + alpha_pattern = { + ".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v + for k, v in alpha_pattern.items() + } + else: + alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()} + else: + lora_alpha = set(network_alpha_dict.values()).pop() + + # layer names without the Diffusers specific + target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()}) + + lora_config_kwargs = { + "r": r, + "lora_alpha": lora_alpha, + "rank_pattern": rank_pattern, + "alpha_pattern": alpha_pattern, + "target_modules": target_modules, + } + return lora_config_kwargs + + +def get_adapter_name(model): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return f"default_{len(module.r)}" + return "default_0" + + +def set_adapter_layers(model, enabled=True): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # The recent version of PEFT needs to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=enabled) + else: + module.disable_adapters = not enabled + + +def delete_adapter_layers(model, adapter_name): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "delete_adapter"): + module.delete_adapter(adapter_name) + else: + raise ValueError( + "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" + ) + + # For transformers integration - we need to pop the adapter from the config + if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"): + model.peft_config.pop(adapter_name, None) + # In case all adapters are deleted, we need to delete the config + # and make sure to set the flag to False + if len(model.peft_config) == 0: + del model.peft_config + model._hf_peft_config_loaded = None + + +def set_weights_and_activate_adapters(model, adapter_names, weights): + from peft.tuners.tuners_utils import BaseTunerLayer + + # iterate over each adapter, make it active and set the corresponding scaling weight + for adapter_name, weight in zip(adapter_names, weights): + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + else: + module.active_adapter = adapter_name + module.set_scale(adapter_name, weight) + + # set multiple active adapters + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_names) + else: + module.active_adapter = adapter_names + + +def check_peft_version(min_version: str) -> None: + r""" + Checks if the version of PEFT is compatible. + + Args: + version (`str`): + The version of PEFT to check against. + """ + if not is_peft_available(): + raise ValueError("PEFT is not installed. Please install it with `pip install peft`") + + is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version) + + if not is_peft_version_compatible: + raise ValueError( + f"The version of PEFT you are using is not compatible, please use a version that is greater" + f" than {min_version}" + ) diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/pil_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/pil_utils.py new file mode 100644 index 000000000..76678070b --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/pil_utils.py @@ -0,0 +1,67 @@ +from typing import List + +import PIL.Image +import PIL.ImageOps +from packaging import version +from PIL import Image + + +if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): + PIL_INTERPOLATION = { + "linear": PIL.Image.Resampling.BILINEAR, + "bilinear": PIL.Image.Resampling.BILINEAR, + "bicubic": PIL.Image.Resampling.BICUBIC, + "lanczos": PIL.Image.Resampling.LANCZOS, + "nearest": PIL.Image.Resampling.NEAREST, + } +else: + PIL_INTERPOLATION = { + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + "nearest": PIL.Image.NEAREST, + } + + +def pt_to_pil(images): + """ + Convert a torch image to a PIL image. + """ + images = (images / 2 + 0.5).clamp(0, 1) + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + images = numpy_to_pil(images) + return images + + +def numpy_to_pil(images): + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + +def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image: + """ + Prepares a single grid of images. Useful for visualization purposes. + """ + assert len(images) == rows * cols + + if resize is not None: + images = [img.resize((resize, resize)) for img in images] + + w, h = images[0].size + grid = Image.new("RGB", size=(cols * w, rows * h)) + + for i, img in enumerate(images): + grid.paste(img, box=(i % cols * w, i // cols * h)) + return grid diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/state_dict_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/state_dict_utils.py new file mode 100644 index 000000000..c4566636d --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/state_dict_utils.py @@ -0,0 +1,324 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +State dict utilities: utility methods for converting state dicts easily +""" +import enum + +from .logging import get_logger + + +logger = get_logger(__name__) + + +class StateDictType(enum.Enum): + """ + The mode to use when converting state dicts. + """ + + DIFFUSERS_OLD = "diffusers_old" + KOHYA_SS = "kohya_ss" + PEFT = "peft" + DIFFUSERS = "diffusers" + + +# We need to define a proper mapping for Unet since it uses different output keys than text encoder +# e.g. to_q_lora -> q_proj / to_q +UNET_TO_DIFFUSERS = { + ".to_out_lora.up": ".to_out.0.lora_B", + ".to_out_lora.down": ".to_out.0.lora_A", + ".to_q_lora.down": ".to_q.lora_A", + ".to_q_lora.up": ".to_q.lora_B", + ".to_k_lora.down": ".to_k.lora_A", + ".to_k_lora.up": ".to_k.lora_B", + ".to_v_lora.down": ".to_v.lora_A", + ".to_v_lora.up": ".to_v.lora_B", + ".lora.up": ".lora_B", + ".lora.down": ".lora_A", +} + + +DIFFUSERS_TO_PEFT = { + ".q_proj.lora_linear_layer.up": ".q_proj.lora_B", + ".q_proj.lora_linear_layer.down": ".q_proj.lora_A", + ".k_proj.lora_linear_layer.up": ".k_proj.lora_B", + ".k_proj.lora_linear_layer.down": ".k_proj.lora_A", + ".v_proj.lora_linear_layer.up": ".v_proj.lora_B", + ".v_proj.lora_linear_layer.down": ".v_proj.lora_A", + ".out_proj.lora_linear_layer.up": ".out_proj.lora_B", + ".out_proj.lora_linear_layer.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", +} + +DIFFUSERS_OLD_TO_PEFT = { + ".to_q_lora.up": ".q_proj.lora_B", + ".to_q_lora.down": ".q_proj.lora_A", + ".to_k_lora.up": ".k_proj.lora_B", + ".to_k_lora.down": ".k_proj.lora_A", + ".to_v_lora.up": ".v_proj.lora_B", + ".to_v_lora.down": ".v_proj.lora_A", + ".to_out_lora.up": ".out_proj.lora_B", + ".to_out_lora.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", +} + +PEFT_TO_DIFFUSERS = { + ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", + ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", + ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", + ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", + ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", + ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", + ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", + ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", + "to_k.lora_A": "to_k.lora.down", + "to_k.lora_B": "to_k.lora.up", + "to_q.lora_A": "to_q.lora.down", + "to_q.lora_B": "to_q.lora.up", + "to_v.lora_A": "to_v.lora.down", + "to_v.lora_B": "to_v.lora.up", + "to_out.0.lora_A": "to_out.0.lora.down", + "to_out.0.lora_B": "to_out.0.lora.up", +} + +DIFFUSERS_OLD_TO_DIFFUSERS = { + ".to_q_lora.up": ".q_proj.lora_linear_layer.up", + ".to_q_lora.down": ".q_proj.lora_linear_layer.down", + ".to_k_lora.up": ".k_proj.lora_linear_layer.up", + ".to_k_lora.down": ".k_proj.lora_linear_layer.down", + ".to_v_lora.up": ".v_proj.lora_linear_layer.up", + ".to_v_lora.down": ".v_proj.lora_linear_layer.down", + ".to_out_lora.up": ".out_proj.lora_linear_layer.up", + ".to_out_lora.down": ".out_proj.lora_linear_layer.down", +} + +PEFT_TO_KOHYA_SS = { + "lora_A": "lora_down", + "lora_B": "lora_up", + # This is not a comprehensive dict as kohya format requires replacing `.` with `_` in keys, + # adding prefixes and adding alpha values + # Check `convert_state_dict_to_kohya` for more +} + +PEFT_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_PEFT, + StateDictType.DIFFUSERS: DIFFUSERS_TO_PEFT, +} + +DIFFUSERS_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, + StateDictType.PEFT: PEFT_TO_DIFFUSERS, +} + +KOHYA_STATE_DICT_MAPPINGS = {StateDictType.PEFT: PEFT_TO_KOHYA_SS} + +KEYS_TO_ALWAYS_REPLACE = { + ".processor.": ".", +} + + +def convert_state_dict(state_dict, mapping): + r""" + Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + mapping (`dict[str, str]`): + The mapping to use for conversion, the mapping should be a dictionary with the following structure: + - key: the pattern to replace + - value: the pattern to replace with + + Returns: + converted_state_dict (`dict`) + The converted state dict. + """ + converted_state_dict = {} + for k, v in state_dict.items(): + # First, filter out the keys that we always want to replace + for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): + if pattern in k: + new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] + k = k.replace(pattern, new_pattern) + + for pattern in mapping.keys(): + if pattern in k: + new_pattern = mapping[pattern] + k = k.replace(pattern, new_pattern) + break + converted_state_dict[k] = v + return converted_state_dict + + +def convert_state_dict_to_peft(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to the PEFT format The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or + new diffusers format (`DIFFUSERS`). The method only supports the conversion from diffusers old/new to PEFT for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + """ + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any("lora_linear_layer" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in PEFT_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = PEFT_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to new diffusers format. The state dict can be from previous diffusers format + (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will + return the state dict as is. + + The method only supports the conversion from diffusers old, PEFT to diffusers new for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + elif any("lora_linear_layer" in k for k in state_dict.keys()): + # nothing to do + return state_dict + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_unet_state_dict_to_peft(state_dict): + r""" + Converts a state dict from UNet format to diffusers format - i.e. by removing some keys + """ + mapping = UNET_TO_DIFFUSERS + return convert_state_dict(state_dict, mapping) + + +def convert_all_state_dict_to_peft(state_dict): + r""" + Attempts to first `convert_state_dict_to_peft`, and if it doesn't detect `lora_linear_layer` + for a valid `DIFFUSERS` LoRA for example, attempts to exclusively convert the Unet `convert_unet_state_dict_to_peft` + """ + try: + peft_dict = convert_state_dict_to_peft(state_dict) + except Exception as e: + if str(e) == "Could not automatically infer state dict type": + peft_dict = convert_unet_state_dict_to_peft(state_dict) + else: + raise + + if not any("lora_A" in key or "lora_B" in key for key in peft_dict.keys()): + raise ValueError("Your LoRA was not converted to PEFT") + + return peft_dict + + +def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs): + r""" + Converts a `PEFT` state dict to `Kohya` format that can be used in AUTOMATIC1111, ComfyUI, SD.Next, InvokeAI, etc. + The method only supports the conversion from PEFT to Kohya for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + try: + import torch + except ImportError: + logger.error("Converting PEFT state dicts to Kohya requires torch to be installed.") + raise + + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + if any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + + if original_type not in KOHYA_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + # Use the convert_state_dict function with the appropriate mapping + kohya_ss_partial_state_dict = convert_state_dict(state_dict, KOHYA_STATE_DICT_MAPPINGS[StateDictType.PEFT]) + kohya_ss_state_dict = {} + + # Additional logic for replacing header, alpha parameters `.` with `_` in all keys + for kohya_key, weight in kohya_ss_partial_state_dict.items(): + if "text_encoder_2." in kohya_key: + kohya_key = kohya_key.replace("text_encoder_2.", "lora_te2.") + elif "text_encoder." in kohya_key: + kohya_key = kohya_key.replace("text_encoder.", "lora_te1.") + elif "unet" in kohya_key: + kohya_key = kohya_key.replace("unet", "lora_unet") + kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) + kohya_key = kohya_key.replace(peft_adapter_name, "") # Kohya doesn't take names + kohya_ss_state_dict[kohya_key] = weight + if "lora_down" in kohya_key: + alpha_key = f'{kohya_key.split(".")[0]}.alpha' + kohya_ss_state_dict[alpha_key] = torch.tensor(len(weight)) + + return kohya_ss_state_dict diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/testing_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/testing_utils.py new file mode 100644 index 000000000..edbf6f31a --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/testing_utils.py @@ -0,0 +1,967 @@ +import functools +import importlib +import inspect +import io +import logging +import multiprocessing +import os +import random +import re +import struct +import sys +import tempfile +import time +import unittest +import urllib.parse +from contextlib import contextmanager +from distutils.util import strtobool +from io import BytesIO, StringIO +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps +import requests +from numpy.linalg import norm +from packaging import version + +from .import_utils import ( + BACKENDS_MAPPING, + is_compel_available, + is_flax_available, + is_note_seq_available, + is_onnx_available, + is_opencv_available, + is_peft_available, + is_torch_available, + is_torch_version, + is_torchsde_available, + is_transformers_available, +) +from .logging import get_logger + + +global_rng = random.Random() + +logger = get_logger(__name__) + +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) > version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + torch_device = "cuda" if torch.cuda.is_available() else "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"): + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print(";".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = strtobool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def require_python39_or_higher(test_case): + def python39_available(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major == 3 and minor >= 9 + + return unittest.skipUnless(python39_available(), "test requires Python 3.9 or higher")(test_case) + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str): + response = requests.get(url) + response.raise_for_status() + arry = torch.load(BytesIO(response.content)) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Copied from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests. They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(test_func_ref): + @functools.wraps(test_func_ref) + def wrapper(*args, **kwargs): + retry_count = 1 + + while retry_count < max_attempts: + try: + return test_func_ref(*args, **kwargs) + + except Exception as err: + print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return test_func_ref(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f'{results["error"]}') + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers.testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "mps": None, "default": None} + BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "mps": lambda: 0, "default": 0} + BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed} + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if fn is None: + return None + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/torch_utils.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/torch_utils.py new file mode 100644 index 000000000..cc9c050a7 --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/torch_utils.py @@ -0,0 +1,147 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PyTorch utilities: Utilities related to PyTorch +""" +from typing import List, Optional, Tuple, Union + +from . import logging +from .import_utils import is_torch_available, is_torch_version + + +if is_torch_available(): + import torch + from torch.fft import fftn, fftshift, ifftn, ifftshift + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +try: + from torch._dynamo import allow_in_graph as maybe_allow_in_graph +except (ImportError, ModuleNotFoundError): + + def maybe_allow_in_graph(cls): + return cls + + +def randn_tensor( + shape: Union[Tuple, List], + generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, + device: Optional["torch.device"] = None, + dtype: Optional["torch.dtype"] = None, + layout: Optional["torch.layout"] = None, +): + """A helper function to create random tensors on the desired `device` with the desired `dtype`. When + passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor + is always created on the CPU. + """ + # device on which tensor is created defaults to device + rand_device = device + batch_size = shape[0] + + layout = layout or torch.strided + device = device or torch.device("cpu") + + if generator is not None: + gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type + if gen_device_type != device.type and gen_device_type == "cpu": + rand_device = "cpu" + if device != "mps": + logger.info( + f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." + f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" + f" slighly speed up this function by passing a generator that was created on the {device} device." + ) + elif gen_device_type != device.type and gen_device_type == "cuda": + raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") + + # make sure generator list of length 1 is treated like a non-list + if isinstance(generator, list) and len(generator) == 1: + generator = generator[0] + + if isinstance(generator, list): + shape = (1,) + shape[1:] + latents = [ + torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) + for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0).to(device) + else: + latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) + + return latents + + +def is_compiled_module(module) -> bool: + """Check whether the module was compiled with torch.compile()""" + if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): + return False + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": + """Fourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497). + + This version of the method comes from here: + https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 + """ + x = x_in + B, C, H, W = x.shape + + # Non-power of 2 images must be float32 + if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: + x = x.to(dtype=torch.float32) + + # FFT + x_freq = fftn(x, dim=(-2, -1)) + x_freq = fftshift(x_freq, dim=(-2, -1)) + + B, C, H, W = x_freq.shape + mask = torch.ones((B, C, H, W), device=x.device) + + crow, ccol = H // 2, W // 2 + mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale + x_freq = x_freq * mask + + # IFFT + x_freq = ifftshift(x_freq, dim=(-2, -1)) + x_filtered = ifftn(x_freq, dim=(-2, -1)).real + + return x_filtered.to(dtype=x_in.dtype) + + +def apply_freeu( + resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs +) -> Tuple["torch.Tensor", "torch.Tensor"]: + """Applies the FreeU mechanism as introduced in https: + //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. + + Args: + resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. + hidden_states (`torch.Tensor`): Inputs to the underlying block. + res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. + s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. + s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + if resolution_idx == 0: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) + if resolution_idx == 1: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) + + return hidden_states, res_hidden_states diff --git a/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/versions.py b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/versions.py new file mode 100644 index 000000000..945a3977c --- /dev/null +++ b/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/src/diffusers/utils/versions.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for working with package versions +""" + +import importlib.metadata +import operator +import re +import sys +from typing import Optional + +from packaging import version + + +ops = { + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) + if not ops[op](version.parse(got_ver), version.parse(want_ver)): + raise ImportError( + f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" + ) + + +def require_version(requirement: str, hint: Optional[str] = None) -> None: + """ + Perform a runtime check of the dependency versions, using the exact same syntax used by pip. + + The installed module version comes from the *site-packages* dir via *importlib.metadata*. + + Args: + requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" + hint (`str`, *optional*): what suggestion to print in case of requirements not being met + + Example: + + ```python + require_version("pandas>1.1.2") + require_version("numpy>1.18.5", "this is important to have for whatever reason") + ```""" + + hint = f"\n{hint}" if hint is not None else "" + + # non-versioned check + if re.match(r"^[\w_\-\d]+$", requirement): + pkg, op, want_ver = requirement, None, None + else: + match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" + f" got {requirement}" + ) + pkg, want_full = match[0] + want_range = want_full.split(",") # there could be multiple requirements + wanted = {} + for w in want_range: + match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," + f" but got {requirement}" + ) + op, want_ver = match[0] + wanted[op] = want_ver + if op not in ops: + raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") + + # special case + if pkg == "python": + got_ver = ".".join([str(x) for x in sys.version_info[:3]]) + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + return + + # check if any version is installed + try: + got_ver = importlib.metadata.version(pkg) + except importlib.metadata.PackageNotFoundError: + raise importlib.metadata.PackageNotFoundError( + f"The '{requirement}' distribution was not found and is required by this application. {hint}" + ) + + # check that the right version is installed if version number or a range was provided + if want_ver is not None: + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + + +def require_version_core(requirement): + """require_version wrapper which emits a core-specific hint on failure""" + hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" + return require_version(requirement, hint) diff --git a/tests/executables/stable-diffusion/init_torch.sh b/tests/executables/stable-diffusion/init_torch.sh new file mode 100644 index 000000000..37ed859e4 --- /dev/null +++ b/tests/executables/stable-diffusion/init_torch.sh @@ -0,0 +1,26 @@ +#!/bin/bash +ROOT_DIR="$(cd "$(dirname "$0")/../.."; pwd)" +SRC_DIR=$ROOT_DIR/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch +DATA_DIR=$ROOT_DIR/data + +# install packages +pip3 install --no-index --find-links=$DATA_DIR/packages IXPyLogger==1.0.0 +pip3 install --upgrade pillow +pip3 install huggingface_hub==0.25.1 +pip3 install $DATA_DIR/packages/addons/transformers-4.38.1-py3-none-any.whl +pip3 install -r $SRC_DIR/examples/text_to_image/requirements.txt --cache-dir=$DATA_DIR/packages +bash $SRC_DIR/build_diffusers.sh && bash $SRC_DIR/install_diffusers.sh + +# unzip dataset and checkpoints +if [[ ! -d "$DATA_DIR/datasets/pokemon-blip-captions" ]]; then + echo "Unarchive pokemon-blip-captions.tar" + tar -xvf $DATA_DIR/datasets/pokemon-blip-captions.tar -C $DATA_DIR/datasets +fi +if [[ ! -d "$DATA_DIR/model_zoo/stabilityai" ]]; then + echo "Unarchive stabilityai.tar" + tar -xvf $DATA_DIR/model_zoo/stabilityai.tar -C $DATA_DIR/model_zoo +fi +if [[ ! -d "$DATA_DIR/model_zoo/stable-diffusion-v1-5" ]]; then + echo "Unarchive stable-diffusion-v1-5.zip" + unzip $DATA_DIR/model_zoo/stable-diffusion-v1-5.zip -d $DATA_DIR/model_zoo +fi diff --git a/tests/executables/stable-diffusion/train_sd2.1_pokemon_dist_1x8_torch.sh b/tests/executables/stable-diffusion/train_sd2.1_pokemon_dist_1x8_torch.sh new file mode 100644 index 000000000..36ce85c69 --- /dev/null +++ b/tests/executables/stable-diffusion/train_sd2.1_pokemon_dist_1x8_torch.sh @@ -0,0 +1,63 @@ +source ../_utils/global_environment_variables.sh +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +export ENABLE_FLASH_ATTENTION_WITH_IXATTNBKD=0 + + +: ${BATCH_SIZE:=8} + +ROOT_DIR="$(cd "$(dirname "$0")/../.."; pwd)" +SRC_DIR=$ROOT_DIR/multimodal/diffusion_model/stable-diffusion-2.1-pokemon/pytorch/examples/text_to_image +DATASET_NAME=$ROOT_DIR/data/datasets/pokemon-blip-captions +MODEL_NAME=$ROOT_DIR/data/model_zoo/stabilityai/stable-diffusion-2-1 +export DRT_MEMCPYUSEKERNEL=20000000000 + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +# 训练用的一些环境变量,可以提高性能 +export CLIP_FLASH_ATTN=1 +export USE_NHWC_GN=1 +export USE_IXFORMER_GEGLU=0 +export USE_APEX_LN=1 +export ENABLE_FLASH_ATTENTION_WITH_IXDNN=1 +echo $ENABLE_FLASH_ATTENTION_WITH_IXDNN + + +# 根据BI环境调整卡数 +cd $SRC_DIR +actual_num_devices=$(ixsmi --list-gpus | wc -l) +num_devices=$(awk '/num_processes:/ {print $2}' $SRC_DIR/default_config.yaml) +echo $num_devices +echo $actual_num_devices +if [ "$num_devices" != "$actual_num_devices" ]; then + echo "num_devices not matches actual_num_devices." + sed -i "s/^num_processes:.*/num_processes: $actual_num_devices/" $SRC_DIR/default_config.yaml +fi + +# 开始训练 +accelerate launch --config_file default_config.yaml --mixed_precision="fp16" train_text_to_image.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --dataset_name=$DATASET_NAME \ + --resolution=512 \ + --seed 42 \ + --center_crop \ + --random_flip \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --gradient_checkpointing \ + --learning_rate=1e-05 \ + --max_grad_norm=1 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --output_dir="sd-pokemon-model-3" \ + --max_train_steps=100 \ + --NHWC \ + --dataloader_num_workers=32 \ + --apex_fused_adam "$@"; check_status + + exit ${EXIT_STATUS} -- Gitee From 954dd9256b5e3689025a4e51865dc87a7e6a9e29 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 14:52:09 +0800 Subject: [PATCH 14/18] sync resnet50 paddle all --- .../resnet50/paddlepaddle/.gitignore | 16 + .../resnet50/paddlepaddle/LICENSE | 201 +++ .../resnet50/paddlepaddle/MANIFEST.in | 7 + .../resnet50/paddlepaddle/README_ch.md | 151 ++ .../resnet50/paddlepaddle/README_en.md | 135 ++ .../resnet50/paddlepaddle/__init__.py | 17 + .../resnet50/paddlepaddle/hubconf.py | 788 +++++++++ .../resnet50/paddlepaddle/ppcls/__init__.py | 20 + .../paddlepaddle/ppcls/arch/__init__.py | 134 ++ .../ppcls/arch/backbone/__init__.py | 34 + .../ppcls/arch/backbone/base/__init__.py | 0 .../ppcls/arch/backbone/base/theseus_layer.py | 301 ++++ .../backbone/legendary_models/__init__.py | 8 + .../arch/backbone/legendary_models/resnet.py | 591 +++++++ .../ppcls/arch/backbone/model_zoo/__init__.py | 0 .../arch/backbone/model_zoo/resnet_vc.py | 309 ++++ .../arch/backbone/variant_models/__init__.py | 3 + .../backbone/variant_models/resnet_variant.py | 23 + .../paddlepaddle/ppcls/arch/gears/__init__.py | 32 + .../ppcls/arch/gears/arcmargin.py | 72 + .../ppcls/arch/gears/circlemargin.py | 59 + .../ppcls/arch/gears/cosmargin.py | 55 + .../paddlepaddle/ppcls/arch/gears/fc.py | 35 + .../ppcls/arch/gears/identity_head.py | 9 + .../ppcls/arch/gears/vehicle_neck.py | 52 + .../paddlepaddle/ppcls/arch/slim/__init__.py | 16 + .../paddlepaddle/ppcls/arch/slim/prune.py | 65 + .../paddlepaddle/ppcls/arch/slim/quant.py | 55 + .../resnet50/paddlepaddle/ppcls/arch/utils.py | 53 + .../DataAugment/ResNet50_AutoAugment.yaml | 129 ++ .../DataAugment/ResNet50_Baseline.yaml | 128 ++ .../ImageNet/DataAugment/ResNet50_Cutmix.yaml | 128 ++ .../ImageNet/DataAugment/ResNet50_Cutout.yaml | 131 ++ .../DataAugment/ResNet50_GridMask.yaml | 134 ++ .../DataAugment/ResNet50_HideAndSeek.yaml | 129 ++ .../ImageNet/DataAugment/ResNet50_Mixup.yaml | 128 ++ .../DataAugment/ResNet50_RandAugment.yaml | 131 ++ .../DataAugment/ResNet50_RandomErasing.yaml | 134 ++ .../configs/ImageNet/ResNet/ResNet50.yaml | 132 ++ .../ImageNet/ResNet/ResNet50_amp_4x8.yaml | 140 ++ .../configs/ImageNet/ResNet/ResNet50_vd.yaml | 130 ++ .../configs/quick_start/ResNet50_vd.yaml | 107 ++ .../paddlepaddle/ppcls/engine/__init__.py | 0 .../paddlepaddle/ppcls/engine/engine.py | 468 ++++++ .../ppcls/engine/evaluation/__init__.py | 16 + .../ppcls/engine/evaluation/classification.py | 174 ++ .../ppcls/engine/evaluation/retrieval.py | 179 ++ .../ppcls/engine/train/__init__.py | 14 + .../paddlepaddle/ppcls/engine/train/train.py | 82 + .../paddlepaddle/ppcls/engine/train/utils.py | 72 + .../paddlepaddle/ppcls/loss/__init__.py | 47 + .../paddlepaddle/ppcls/loss/celoss.py | 67 + .../paddlepaddle/ppcls/loss/comfunc.py | 45 + .../paddlepaddle/ppcls/loss/distanceloss.py | 43 + .../paddlepaddle/ppcls/metric/__init__.py | 51 + .../paddlepaddle/ppcls/metric/metrics.py | 306 ++++ .../paddlepaddle/ppcls/optimizer/__init__.py | 72 + .../ppcls/optimizer/learning_rate.py | 326 ++++ .../paddlepaddle/ppcls/optimizer/optimizer.py | 217 +++ .../paddlepaddle/ppcls/static/program.py | 449 +++++ .../paddlepaddle/ppcls/static/save_load.py | 139 ++ .../paddlepaddle/ppcls/static/train.py | 212 +++ .../paddlepaddle/ppcls/utils/__init__.py | 27 + .../paddlepaddle/ppcls/utils/check.py | 151 ++ .../paddlepaddle/ppcls/utils/config.py | 210 +++ .../paddlepaddle/ppcls/utils/download.py | 319 ++++ .../resnet50/paddlepaddle/ppcls/utils/ema.py | 63 + .../feature_maps_visualization/fm_vis.py | 97 ++ .../feature_maps_visualization/resnet.py | 535 ++++++ .../utils/feature_maps_visualization/utils.py | 85 + .../paddlepaddle/ppcls/utils/gallery2fc.py | 119 ++ .../ppcls/utils/imagenet1k_label_list.txt | 1000 ++++++++++++ .../paddlepaddle/ppcls/utils/logger.py | 138 ++ .../paddlepaddle/ppcls/utils/metrics.py | 107 ++ .../resnet50/paddlepaddle/ppcls/utils/misc.py | 63 + .../paddlepaddle/ppcls/utils/model_zoo.py | 213 +++ .../paddlepaddle/ppcls/utils/pretrained.list | 121 ++ .../paddlepaddle/ppcls/utils/profiler.py | 111 ++ .../paddlepaddle/ppcls/utils/save_load.py | 136 ++ .../paddlepaddle/ppcls_2.5/__init__.py | 20 + .../paddlepaddle/ppcls_2.5/arch/__init__.py | 134 ++ .../ppcls_2.5/arch/backbone/__init__.py | 34 + .../ppcls_2.5/arch/backbone/base/__init__.py | 0 .../arch/backbone/base/theseus_layer.py | 301 ++++ .../backbone/legendary_models/__init__.py | 8 + .../arch/backbone/legendary_models/resnet.py | 591 +++++++ .../arch/backbone/model_zoo/__init__.py | 0 .../arch/backbone/model_zoo/resnet_vc.py | 309 ++++ .../arch/backbone/variant_models/__init__.py | 3 + .../backbone/variant_models/resnet_variant.py | 23 + .../ppcls_2.5/arch/gears/__init__.py | 32 + .../ppcls_2.5/arch/gears/arcmargin.py | 72 + .../ppcls_2.5/arch/gears/circlemargin.py | 59 + .../ppcls_2.5/arch/gears/cosmargin.py | 55 + .../paddlepaddle/ppcls_2.5/arch/gears/fc.py | 35 + .../ppcls_2.5/arch/gears/identity_head.py | 9 + .../ppcls_2.5/arch/gears/vehicle_neck.py | 52 + .../ppcls_2.5/arch/slim/__init__.py | 16 + .../paddlepaddle/ppcls_2.5/arch/slim/prune.py | 65 + .../paddlepaddle/ppcls_2.5/arch/slim/quant.py | 55 + .../paddlepaddle/ppcls_2.5/arch/utils.py | 53 + .../DataAugment/ResNet50_AutoAugment.yaml | 129 ++ .../DataAugment/ResNet50_Baseline.yaml | 128 ++ .../ImageNet/DataAugment/ResNet50_Cutmix.yaml | 128 ++ .../ImageNet/DataAugment/ResNet50_Cutout.yaml | 131 ++ .../DataAugment/ResNet50_GridMask.yaml | 134 ++ .../DataAugment/ResNet50_HideAndSeek.yaml | 129 ++ .../ImageNet/DataAugment/ResNet50_Mixup.yaml | 128 ++ .../DataAugment/ResNet50_RandAugment.yaml | 131 ++ .../DataAugment/ResNet50_RandomErasing.yaml | 134 ++ .../configs/ImageNet/ResNet/ResNet50.yaml | 132 ++ .../ImageNet/ResNet/ResNet50_amp_4x8.yaml | 140 ++ .../configs/ImageNet/ResNet/ResNet50_vd.yaml | 130 ++ .../configs/quick_start/ResNet50_vd.yaml | 107 ++ .../paddlepaddle/ppcls_2.5/engine/__init__.py | 0 .../paddlepaddle/ppcls_2.5/engine/engine.py | 468 ++++++ .../ppcls_2.5/engine/evaluation/__init__.py | 16 + .../engine/evaluation/classification.py | 174 ++ .../ppcls_2.5/engine/evaluation/retrieval.py | 179 ++ .../ppcls_2.5/engine/train/__init__.py | 14 + .../ppcls_2.5/engine/train/train.py | 82 + .../ppcls_2.5/engine/train/utils.py | 72 + .../paddlepaddle/ppcls_2.5/loss/__init__.py | 47 + .../paddlepaddle/ppcls_2.5/loss/celoss.py | 67 + .../paddlepaddle/ppcls_2.5/loss/comfunc.py | 45 + .../ppcls_2.5/loss/distanceloss.py | 43 + .../paddlepaddle/ppcls_2.5/metric/__init__.py | 51 + .../paddlepaddle/ppcls_2.5/metric/metrics.py | 306 ++++ .../ppcls_2.5/optimizer/__init__.py | 72 + .../ppcls_2.5/optimizer/learning_rate.py | 326 ++++ .../ppcls_2.5/optimizer/optimizer.py | 217 +++ .../paddlepaddle/ppcls_2.5/static/program.py | 449 +++++ .../ppcls_2.5/static/save_load.py | 139 ++ .../paddlepaddle/ppcls_2.5/static/train.py | 212 +++ .../paddlepaddle/ppcls_2.5/utils/__init__.py | 27 + .../paddlepaddle/ppcls_2.5/utils/check.py | 151 ++ .../paddlepaddle/ppcls_2.5/utils/config.py | 210 +++ .../paddlepaddle/ppcls_2.5/utils/download.py | 319 ++++ .../paddlepaddle/ppcls_2.5/utils/ema.py | 63 + .../feature_maps_visualization/fm_vis.py | 97 ++ .../feature_maps_visualization/resnet.py | 535 ++++++ .../utils/feature_maps_visualization/utils.py | 85 + .../ppcls_2.5/utils/gallery2fc.py | 119 ++ .../ppcls_2.5/utils/imagenet1k_label_list.txt | 1000 ++++++++++++ .../paddlepaddle/ppcls_2.5/utils/logger.py | 138 ++ .../paddlepaddle/ppcls_2.5/utils/metrics.py | 107 ++ .../paddlepaddle/ppcls_2.5/utils/misc.py | 63 + .../paddlepaddle/ppcls_2.5/utils/model_zoo.py | 213 +++ .../ppcls_2.5/utils/pretrained.list | 121 ++ .../paddlepaddle/ppcls_2.5/utils/profiler.py | 111 ++ .../paddlepaddle/ppcls_2.5/utils/save_load.py | 136 ++ .../paddlepaddle/ppcls_2.6/__init__.py | 20 + .../paddlepaddle/ppcls_2.6/arch/__init__.py | 177 ++ .../ppcls_2.6/arch/backbone/__init__.py | 118 ++ .../ppcls_2.6/arch/backbone/base/__init__.py | 0 .../arch/backbone/base/dbb/dbb_block.py | 365 +++++ .../arch/backbone/base/dbb/dbb_transforms.py | 73 + .../arch/backbone/base/theseus_layer.py | 398 +++++ .../backbone/legendary_models/__init__.py | 8 + .../legendary_models/custom_devices_layers.py | 37 + .../arch/backbone/legendary_models/esnet.py | 369 +++++ .../arch/backbone/legendary_models/hrnet.py | 797 +++++++++ .../backbone/legendary_models/inception_v3.py | 559 +++++++ .../backbone/legendary_models/mobilenet_v1.py | 259 +++ .../backbone/legendary_models/mobilenet_v3.py | 591 +++++++ .../backbone/legendary_models/mobilenet_v4.py | 836 ++++++++++ .../backbone/legendary_models/pp_hgnet.py | 376 +++++ .../backbone/legendary_models/pp_hgnet_v2.py | 706 ++++++++ .../backbone/legendary_models/pp_lcnet.py | 520 ++++++ .../backbone/legendary_models/pp_lcnet_v2.py | 417 +++++ .../arch/backbone/legendary_models/resnet.py | 653 ++++++++ .../legendary_models/swin_transformer.py | 1002 ++++++++++++ .../arch/backbone/legendary_models/vgg.py | 261 +++ .../arch/backbone/model_zoo/__init__.py | 0 .../arch/backbone/model_zoo/adaface_ir_net.py | 529 ++++++ .../arch/backbone/model_zoo/alexnet.py | 170 ++ .../ppcls_2.6/arch/backbone/model_zoo/cae.py | 860 ++++++++++ .../arch/backbone/model_zoo/convnext.py | 282 ++++ .../arch/backbone/model_zoo/cspnet.py | 377 +++++ .../backbone/model_zoo/cswin_transformer.py | 651 ++++++++ .../ppcls_2.6/arch/backbone/model_zoo/cvt.py | 723 +++++++++ .../arch/backbone/model_zoo/darknet.py | 199 +++ .../arch/backbone/model_zoo/densenet.py | 346 ++++ .../model_zoo/distilled_vision_transformer.py | 273 ++++ .../ppcls_2.6/arch/backbone/model_zoo/dla.py | 529 ++++++ .../ppcls_2.6/arch/backbone/model_zoo/dpn.py | 453 ++++++ .../arch/backbone/model_zoo/dsnet.py | 701 ++++++++ .../arch/backbone/model_zoo/efficientnet.py | 1028 ++++++++++++ .../backbone/model_zoo/efficientnet_v2.py | 994 ++++++++++++ .../arch/backbone/model_zoo/fasternet.py | 399 +++++ .../arch/backbone/model_zoo/foundation_vit.py | 1261 ++++++++++++++ .../arch/backbone/model_zoo/ghostnet.py | 364 +++++ .../arch/backbone/model_zoo/googlenet.py | 365 +++++ .../arch/backbone/model_zoo/hardnet.py | 294 ++++ .../arch/backbone/model_zoo/inception_v4.py | 479 ++++++ .../arch/backbone/model_zoo/levit.py | 590 +++++++ .../arch/backbone/model_zoo/micronet.py | 618 +++++++ .../arch/backbone/model_zoo/mixnet.py | 812 +++++++++ .../arch/backbone/model_zoo/mobilefacenet.py | 166 ++ .../arch/backbone/model_zoo/mobilenet_v2.py | 316 ++++ .../arch/backbone/model_zoo/mobilenext.py | 262 +++ .../arch/backbone/model_zoo/mobilevit.py | 479 ++++++ .../arch/backbone/model_zoo/mobilevit_v2.py | 593 +++++++ .../arch/backbone/model_zoo/mobilevit_v3.py | 1445 +++++++++++++++++ .../arch/backbone/model_zoo/nextvit.py | 643 ++++++++ .../arch/backbone/model_zoo/peleenet.py | 264 +++ .../arch/backbone/model_zoo/pvt_v2.py | 493 ++++++ .../arch/backbone/model_zoo/rednet.py | 204 +++ .../arch/backbone/model_zoo/regnet.py | 531 ++++++ .../arch/backbone/model_zoo/repvgg.py | 451 +++++ .../arch/backbone/model_zoo/res2net.py | 266 +++ .../arch/backbone/model_zoo/res2net_vd.py | 308 ++++ .../arch/backbone/model_zoo/resnest.py | 780 +++++++++ .../arch/backbone/model_zoo/resnet_vc.py | 311 ++++ .../arch/backbone/model_zoo/resnext.py | 303 ++++ .../arch/backbone/model_zoo/resnext101_wsl.py | 506 ++++++ .../arch/backbone/model_zoo/resnext_vd.py | 319 ++++ .../arch/backbone/model_zoo/rexnet.py | 283 ++++ .../arch/backbone/model_zoo/se_resnet_vd.py | 392 +++++ .../arch/backbone/model_zoo/se_resnext.py | 369 +++++ .../arch/backbone/model_zoo/se_resnext_vd.py | 311 ++++ .../arch/backbone/model_zoo/shufflenet_v2.py | 364 +++++ .../arch/backbone/model_zoo/squeezenet.py | 196 +++ .../arch/backbone/model_zoo/starnet.py | 197 +++ .../arch/backbone/model_zoo/svtrnet.py | 699 ++++++++ .../backbone/model_zoo/swin_transformer_v2.py | 1061 ++++++++++++ .../arch/backbone/model_zoo/tinynet.py | 196 +++ .../ppcls_2.6/arch/backbone/model_zoo/tnt.py | 410 +++++ .../arch/backbone/model_zoo/twins.py | 692 ++++++++ .../arch/backbone/model_zoo/uniformer.py | 552 +++++++ .../ppcls_2.6/arch/backbone/model_zoo/van.py | 362 +++++ .../backbone/model_zoo/vision_transformer.py | 459 ++++++ .../arch/backbone/model_zoo/wideresnet.py | 236 +++ .../arch/backbone/model_zoo/xception.py | 393 +++++ .../backbone/model_zoo/xception_deeplab.py | 423 +++++ .../arch/backbone/variant_models/__init__.py | 5 + .../variant_models/efficientnet_variant.py | 44 + .../variant_models/foundation_vit_variant.py | 52 + .../variant_models/pp_lcnet_variant.py | 29 + .../variant_models/pp_lcnetv2_variant.py | 56 + .../backbone/variant_models/resnet_variant.py | 203 +++ .../swin_transformer_variant.py | 355 ++++ .../backbone/variant_models/vgg_variant.py | 28 + .../ppcls_2.6/arch/distill/afd_attention.py | 123 ++ .../ppcls_2.6/arch/gears/__init__.py | 74 + .../ppcls_2.6/arch/gears/adamargin.py | 113 ++ .../ppcls_2.6/arch/gears/arcmargin.py | 74 + .../ppcls_2.6/arch/gears/bnneck.py | 56 + .../ppcls_2.6/arch/gears/circlemargin.py | 61 + .../ppcls_2.6/arch/gears/cosmargin.py | 57 + .../paddlepaddle/ppcls_2.6/arch/gears/fc.py | 48 + .../ppcls_2.6/arch/gears/frfn_neck.py | 32 + .../ppcls_2.6/arch/gears/identity_head.py | 9 + .../ppcls_2.6/arch/gears/metabnneck.py | 122 ++ .../ppcls_2.6/arch/gears/ml_decoder.py | 124 ++ .../ppcls_2.6/arch/gears/vehicle_neck.py | 52 + .../ppcls_2.6/arch/slim/__init__.py | 16 + .../paddlepaddle/ppcls_2.6/arch/slim/prune.py | 64 + .../paddlepaddle/ppcls_2.6/arch/slim/quant.py | 63 + .../paddlepaddle/ppcls_2.6/arch/utils.py | 99 ++ .../PPLCNet_x1_0_pedestrian_attribute.yaml | 148 ++ .../Attr/PPLCNet_x1_0_vehicle_attribute.yaml | 149 ++ .../configs/Attr/StrongBaselineAttr.yaml | 113 ++ .../CAE/cae_base_patch16_224_finetune.yaml | 169 ++ .../CAE/cae_large_patch16_224_finetune.yaml | 169 ++ .../CLIP_vit_base_patch16_224_finetune.yaml | 162 ++ .../CLIP_vit_large_patch14_224_finetune.yaml | 162 ++ .../Cartoonface/ResNet50_icartoon.yaml | 149 ++ .../ppcls_2.6/configs/DeepHash/DCH.yaml | 141 ++ .../ppcls_2.6/configs/DeepHash/DSHSD.yaml | 142 ++ .../ppcls_2.6/configs/DeepHash/LCDSH.yaml | 138 ++ ...FaceRecognition_ArcFace_MobileFaceNet.yaml | 128 ++ .../FaceRecognition_ArcFace_ResNet50.yaml | 131 ++ .../Gallery2FC_PPLCNet_x2_5.yaml | 51 + .../GeneralRecognition_PPLCNet_x2_5.yaml | 148 ++ ...eneralRecognition_PPLCNet_x2_5_binary.yaml | 145 ++ .../GeneralRecognition_PPLCNet_x2_5_dml.yaml | 188 +++ .../GeneralRecognition_PPLCNet_x2_5_udml.yaml | 193 +++ .../GeneralRecognitionV2_CLIP_vit_base.yaml | 169 ++ .../GeneralRecognitionV2_CLIP_vit_large.yaml | 169 ++ .../GeneralRecognitionV2_PPLCNetV2_base.yaml | 209 +++ .../configs/ImageNet/CSPNet/CSPDarkNet53.yaml | 143 ++ .../CSWinTransformer_base_224.yaml | 174 ++ .../CSWinTransformer_base_384.yaml | 173 ++ .../CSWinTransformer_large_224.yaml | 173 ++ .../CSWinTransformer_large_384.yaml | 173 ++ .../CSWinTransformer_small_224.yaml | 173 ++ .../CSWinTransformer_tiny_224.yaml | 173 ++ .../ImageNet/ConvNeXt/ConvNeXt_base_224.yaml | 182 +++ .../ImageNet/ConvNeXt/ConvNeXt_base_384.yaml | 182 +++ .../ImageNet/ConvNeXt/ConvNeXt_large_224.yaml | 182 +++ .../ImageNet/ConvNeXt/ConvNeXt_large_384.yaml | 182 +++ .../ImageNet/ConvNeXt/ConvNeXt_small.yaml | 182 +++ .../ImageNet/ConvNeXt/ConvNeXt_tiny.yaml | 182 +++ .../configs/ImageNet/CvT/CvT_13_224.yaml | 174 ++ .../configs/ImageNet/CvT/CvT_13_384.yaml | 170 ++ .../configs/ImageNet/CvT/CvT_21_224.yaml | 154 ++ .../configs/ImageNet/CvT/CvT_21_384.yaml | 170 ++ .../configs/ImageNet/CvT/CvT_W24_384.yaml | 170 ++ .../configs/ImageNet/DLA/DLA102.yaml | 142 ++ .../configs/ImageNet/DLA/DLA102x.yaml | 142 ++ .../configs/ImageNet/DLA/DLA102x2.yaml | 142 ++ .../configs/ImageNet/DLA/DLA169.yaml | 142 ++ .../ppcls_2.6/configs/ImageNet/DLA/DLA34.yaml | 142 ++ .../configs/ImageNet/DLA/DLA46_c.yaml | 142 ++ .../configs/ImageNet/DLA/DLA46x_c.yaml | 142 ++ .../ppcls_2.6/configs/ImageNet/DLA/DLA60.yaml | 142 ++ .../configs/ImageNet/DLA/DLA60x.yaml | 142 ++ .../configs/ImageNet/DLA/DLA60x_c.yaml | 142 ++ .../configs/ImageNet/DPN/DPN107.yaml | 142 ++ .../configs/ImageNet/DPN/DPN131.yaml | 142 ++ .../ppcls_2.6/configs/ImageNet/DPN/DPN68.yaml | 142 ++ .../ppcls_2.6/configs/ImageNet/DPN/DPN92.yaml | 142 ++ .../ppcls_2.6/configs/ImageNet/DPN/DPN98.yaml | 142 ++ .../configs/ImageNet/DSNet/DSNet_base.yaml | 169 ++ .../configs/ImageNet/DSNet/DSNet_small.yaml | 170 ++ .../configs/ImageNet/DSNet/DSNet_tiny.yaml | 169 ++ .../configs/ImageNet/DarkNet/DarkNet53.yaml | 142 ++ .../DataAugment/ResNet50_AutoAugment.yaml | 141 ++ .../DataAugment/ResNet50_Baseline.yaml | 140 ++ .../ImageNet/DataAugment/ResNet50_Cutmix.yaml | 140 ++ .../ImageNet/DataAugment/ResNet50_Cutout.yaml | 143 ++ .../DataAugment/ResNet50_GridMask.yaml | 146 ++ .../DataAugment/ResNet50_HideAndSeek.yaml | 141 ++ .../ImageNet/DataAugment/ResNet50_Mixup.yaml | 140 ++ .../DataAugment/ResNet50_RandAugment.yaml | 143 ++ .../DataAugment/ResNet50_RandomErasing.yaml | 146 ++ .../DeiT/DeiT_base_distilled_patch16_224.yaml | 169 ++ .../DeiT/DeiT_base_distilled_patch16_384.yaml | 169 ++ .../ImageNet/DeiT/DeiT_base_patch16_224.yaml | 169 ++ .../ImageNet/DeiT/DeiT_base_patch16_384.yaml | 169 ++ .../DeiT_small_distilled_patch16_224.yaml | 168 ++ .../ImageNet/DeiT/DeiT_small_patch16_224.yaml | 169 ++ .../DeiT/DeiT_tiny_distilled_patch16_224.yaml | 169 ++ .../ImageNet/DeiT/DeiT_tiny_patch16_224.yaml | 169 ++ .../ImageNet/DenseNet/DenseNet121.yaml | 142 ++ .../ImageNet/DenseNet/DenseNet161.yaml | 142 ++ .../ImageNet/DenseNet/DenseNet169.yaml | 142 ++ .../ImageNet/DenseNet/DenseNet201.yaml | 142 ++ .../ImageNet/DenseNet/DenseNet264.yaml | 142 ++ .../Distillation/PPLCNet_x1_0_ssld.yaml | 161 ++ .../Distillation/PPLCNet_x2_5_dml.yaml | 161 ++ .../Distillation/PPLCNet_x2_5_ssld.yaml | 160 ++ .../Distillation/PPLCNet_x2_5_udml.yaml | 171 ++ ...mv3_large_x1_0_distill_mv3_small_x1_0.yaml | 167 ++ .../res2net200_vd_distill_pphgnet_base.yaml | 171 ++ .../resnet34_distill_resnet18_afd.yaml | 211 +++ .../resnet34_distill_resnet18_dist.yaml | 164 ++ .../resnet34_distill_resnet18_dkd.yaml | 166 ++ .../resnet34_distill_resnet18_mgd.yaml | 171 ++ .../resnet34_distill_resnet18_pefd.yaml | 171 ++ .../resnet34_distill_resnet18_skd.yaml | 163 ++ .../resnet34_distill_resnet18_wsl.yaml | 164 ++ .../configs/ImageNet/ESNet/ESNet_x0_25.yaml | 141 ++ .../configs/ImageNet/ESNet/ESNet_x0_5.yaml | 141 ++ .../configs/ImageNet/ESNet/ESNet_x0_75.yaml | 141 ++ .../configs/ImageNet/ESNet/ESNet_x1_0.yaml | 141 ++ .../ImageNet/EfficientNet/EfficientNetB0.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB1.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB2.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB3.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB4.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB5.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB6.yaml | 145 ++ .../ImageNet/EfficientNet/EfficientNetB7.yaml | 145 ++ .../EfficientNetV2/EfficientNetV2_S.yaml | 147 ++ .../ImageNet/FasterNet/FasterNet_L.yaml | 163 ++ .../ImageNet/FasterNet/FasterNet_M.yaml | 163 ++ .../ImageNet/FasterNet/FasterNet_S.yaml | 163 ++ .../ImageNet/FasterNet/FasterNet_T0.yaml | 163 ++ .../ImageNet/FasterNet/FasterNet_T1.yaml | 163 ++ .../ImageNet/FasterNet/FasterNet_T2.yaml | 162 ++ .../ImageNet/GhostNet/GhostNet_x0_5.yaml | 142 ++ .../ImageNet/GhostNet/GhostNet_x1_0.yaml | 142 ++ .../ImageNet/GhostNet/GhostNet_x1_3.yaml | 142 ++ .../ImageNet/HarDNet/HarDNet39_ds.yaml | 142 ++ .../configs/ImageNet/HarDNet/HarDNet68.yaml | 142 ++ .../ImageNet/HarDNet/HarDNet68_ds.yaml | 142 ++ .../configs/ImageNet/HarDNet/HarDNet85.yaml | 142 ++ .../configs/ImageNet/Inception/GoogLeNet.yaml | 141 ++ .../ImageNet/Inception/InceptionV3.yaml | 142 ++ .../ImageNet/Inception/InceptionV4.yaml | 142 ++ .../configs/ImageNet/LeViT/LeViT_128.yaml | 142 ++ .../configs/ImageNet/LeViT/LeViT_128S.yaml | 142 ++ .../configs/ImageNet/LeViT/LeViT_192.yaml | 142 ++ .../configs/ImageNet/LeViT/LeViT_256.yaml | 142 ++ .../configs/ImageNet/LeViT/LeViT_384.yaml | 142 ++ .../ImageNet/MicroNet/MicroNet_M0.yaml | 147 ++ .../ImageNet/MicroNet/MicroNet_M1.yaml | 147 ++ .../ImageNet/MicroNet/MicroNet_M2.yaml | 147 ++ .../ImageNet/MicroNet/MicroNet_M3.yaml | 152 ++ .../configs/ImageNet/MixNet/MixNet_L.yaml | 144 ++ .../configs/ImageNet/MixNet/MixNet_M.yaml | 144 ++ .../configs/ImageNet/MixNet/MixNet_S.yaml | 144 ++ .../ImageNet/MobileNeXt/MobileNeXt_x1_0.yaml | 160 ++ .../ImageNet/MobileNetV1/MobileNetV1.yaml | 144 ++ .../MobileNetV1/MobileNetV1_x0_25.yaml | 142 ++ .../MobileNetV1/MobileNetV1_x0_5.yaml | 142 ++ .../MobileNetV1/MobileNetV1_x0_75.yaml | 142 ++ .../ImageNet/MobileNetV2/MobileNetV2.yaml | 142 ++ .../MobileNetV2/MobileNetV2_x0_25.yaml | 140 ++ .../MobileNetV2/MobileNetV2_x0_5.yaml | 140 ++ .../MobileNetV2/MobileNetV2_x0_75.yaml | 140 ++ .../MobileNetV2/MobileNetV2_x1_5.yaml | 140 ++ .../MobileNetV2/MobileNetV2_x2_0.yaml | 140 ++ .../MobileNetV3/MobileNetV3_large_x0_35.yaml | 142 ++ .../MobileNetV3/MobileNetV3_large_x0_5.yaml | 142 ++ .../MobileNetV3/MobileNetV3_large_x0_75.yaml | 142 ++ .../MobileNetV3/MobileNetV3_large_x1_0.yaml | 143 ++ .../MobileNetV3/MobileNetV3_large_x1_25.yaml | 142 ++ .../MobileNetV3/MobileNetV3_small_x0_35.yaml | 142 ++ .../MobileNetV3/MobileNetV3_small_x0_5.yaml | 142 ++ .../MobileNetV3/MobileNetV3_small_x0_75.yaml | 142 ++ .../MobileNetV3/MobileNetV3_small_x1_0.yaml | 143 ++ .../MobileNetV3_small_x1_0_ampo2_ultra.yaml | 141 ++ .../MobileNetV3_small_x1_0_fp32_ultra.yaml | 143 ++ .../MobileNetV3/MobileNetV3_small_x1_25.yaml | 142 ++ .../MobileNetV4/MobileNetV4_conv_large.yaml | 181 +++ .../MobileNetV4/MobileNetV4_conv_medium.yaml | 181 +++ .../MobileNetV4/MobileNetV4_conv_small.yaml | 181 +++ .../MobileNetV4/MobileNetV4_hybrid_large.yaml | 182 +++ .../MobileNetV4_hybrid_medium.yaml | 176 ++ .../ImageNet/MobileViT/MobileViT_S.yaml | 151 ++ .../ImageNet/MobileViT/MobileViT_XS.yaml | 151 ++ .../ImageNet/MobileViT/MobileViT_XXS.yaml | 151 ++ .../MobileViTV2/MobileViTV2_x0_5.yaml | 174 ++ .../MobileViTV2/MobileViTV2_x1_0.yaml | 174 ++ .../MobileViTV2/MobileViTV2_x1_5.yaml | 174 ++ .../MobileViTV2/MobileViTV2_x2_0.yaml | 174 ++ .../ImageNet/MobileViTV3/MobileViTV3_S.yaml | 153 ++ .../MobileViTV3/MobileViTV3_S_L2.yaml | 153 ++ .../ImageNet/MobileViTV3/MobileViTV3_XS.yaml | 153 ++ .../MobileViTV3/MobileViTV3_XS_L2.yaml | 153 ++ .../ImageNet/MobileViTV3/MobileViTV3_XXS.yaml | 153 ++ .../MobileViTV3/MobileViTV3_XXS_L2.yaml | 153 ++ .../MobileViTV3/MobileViTV3_x0_5.yaml | 175 ++ .../MobileViTV3/MobileViTV3_x0_75.yaml | 175 ++ .../MobileViTV3/MobileViTV3_x1_0.yaml | 175 ++ .../ImageNet/NextViT/NextViT_base_224.yaml | 172 ++ .../ImageNet/NextViT/NextViT_base_384.yaml | 172 ++ .../ImageNet/NextViT/NextViT_large_224.yaml | 172 ++ .../ImageNet/NextViT/NextViT_large_384.yaml | 172 ++ .../ImageNet/NextViT/NextViT_small_224.yaml | 172 ++ .../ImageNet/NextViT/NextViT_small_384.yaml | 172 ++ .../ImageNet/PPHGNet/PPHGNet_base.yaml | 169 ++ .../ImageNet/PPHGNet/PPHGNet_small.yaml | 170 ++ .../ImageNet/PPHGNet/PPHGNet_tiny.yaml | 170 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B0.yaml | 164 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B1.yaml | 164 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B2.yaml | 164 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B3.yaml | 164 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B4.yaml | 164 ++ .../PPHGNetV2/PPHGNetV2_B4_ssld_stage1.yaml | 172 ++ .../PPHGNetV2/PPHGNetV2_B4_ssld_stage2.yaml | 173 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B5.yaml | 164 ++ .../ImageNet/PPHGNetV2/PPHGNetV2_B6.yaml | 164 ++ .../ImageNet/PPLCNet/PPLCNet_x0_25.yaml | 141 ++ .../ImageNet/PPLCNet/PPLCNet_x0_35.yaml | 141 ++ .../ImageNet/PPLCNet/PPLCNet_x0_5.yaml | 141 ++ .../ImageNet/PPLCNet/PPLCNet_x0_75.yaml | 141 ++ .../ImageNet/PPLCNet/PPLCNet_x1_0.yaml | 141 ++ .../PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml | 141 ++ .../PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml | 143 ++ .../ImageNet/PPLCNet/PPLCNet_x1_5.yaml | 141 ++ .../ImageNet/PPLCNet/PPLCNet_x2_0.yaml | 140 ++ .../ImageNet/PPLCNet/PPLCNet_x2_5.yaml | 142 ++ .../ImageNet/PPLCNetV2/PPLCNetV2_base.yaml | 145 ++ .../ImageNet/PPLCNetV2/PPLCNetV2_large.yaml | 145 ++ .../ImageNet/PPLCNetV2/PPLCNetV2_small.yaml | 145 ++ .../configs/ImageNet/PVTV2/PVT_V2_B0.yaml | 174 ++ .../configs/ImageNet/PVTV2/PVT_V2_B1.yaml | 174 ++ .../configs/ImageNet/PVTV2/PVT_V2_B2.yaml | 174 ++ .../ImageNet/PVTV2/PVT_V2_B2_Linear.yaml | 174 ++ .../configs/ImageNet/PVTV2/PVT_V2_B3.yaml | 175 ++ .../configs/ImageNet/PVTV2/PVT_V2_B4.yaml | 175 ++ .../configs/ImageNet/PVTV2/PVT_V2_B5.yaml | 175 ++ .../configs/ImageNet/PeleeNet/PeleeNet.yaml | 148 ++ .../configs/ImageNet/ReXNet/ReXNet_1_0.yaml | 144 ++ .../configs/ImageNet/ReXNet/ReXNet_1_3.yaml | 144 ++ .../configs/ImageNet/ReXNet/ReXNet_1_5.yaml | 144 ++ .../configs/ImageNet/ReXNet/ReXNet_2_0.yaml | 144 ++ .../configs/ImageNet/ReXNet/ReXNet_3_0.yaml | 144 ++ .../configs/ImageNet/RedNet/RedNet101.yaml | 142 ++ .../configs/ImageNet/RedNet/RedNet152.yaml | 142 ++ .../configs/ImageNet/RedNet/RedNet26.yaml | 142 ++ .../configs/ImageNet/RedNet/RedNet38.yaml | 142 ++ .../configs/ImageNet/RedNet/RedNet50.yaml | 142 ++ .../configs/ImageNet/RegNet/RegNetX_12GF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_1600MF.yaml | 142 ++ .../configs/ImageNet/RegNet/RegNetX_16GF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_200MF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_3200MF.yaml | 142 ++ .../configs/ImageNet/RegNet/RegNetX_32GF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_400MF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_600MF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_6400MF.yaml | 142 ++ .../ImageNet/RegNet/RegNetX_800MF.yaml | 142 ++ .../configs/ImageNet/RegNet/RegNetX_8GF.yaml | 142 ++ .../configs/ImageNet/RepVGG/RepVGG_A0.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_A1.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_A2.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_B0.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_B1.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_B1g2.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_B1g4.yaml | 140 ++ .../configs/ImageNet/RepVGG/RepVGG_B2.yaml | 145 ++ .../configs/ImageNet/RepVGG/RepVGG_B2g4.yaml | 145 ++ .../configs/ImageNet/RepVGG/RepVGG_B3.yaml | 149 ++ .../configs/ImageNet/RepVGG/RepVGG_B3g4.yaml | 149 ++ .../configs/ImageNet/RepVGG/RepVGG_D2se.yaml | 149 ++ .../Res2Net/Res2Net101_vd_26w_4s.yaml | 142 ++ .../Res2Net/Res2Net200_vd_26w_4s.yaml | 142 ++ .../ImageNet/Res2Net/Res2Net50_14w_8s.yaml | 142 ++ .../ImageNet/Res2Net/Res2Net50_26w_4s.yaml | 142 ++ .../ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml | 142 ++ .../configs/ImageNet/ResNeSt/ResNeSt101.yaml | 143 ++ .../configs/ImageNet/ResNeSt/ResNeSt200.yaml | 143 ++ .../configs/ImageNet/ResNeSt/ResNeSt269.yaml | 143 ++ .../configs/ImageNet/ResNeSt/ResNeSt50.yaml | 143 ++ .../ResNeSt/ResNeSt50_fast_1s1x64d.yaml | 143 ++ .../ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml | 142 ++ .../ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml | 142 ++ .../ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml | 142 ++ .../ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet101.yaml | 144 ++ .../configs/ImageNet/ResNet/ResNet101_vd.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet152.yaml | 144 ++ .../configs/ImageNet/ResNet/ResNet152_vd.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet18.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet18_dbb.yaml | 153 ++ .../configs/ImageNet/ResNet/ResNet18_vd.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet200_vd.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet34.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet34_vd.yaml | 142 ++ .../configs/ImageNet/ResNet/ResNet50.yaml | 145 ++ .../ImageNet/ResNet/ResNet50_amp_O1.yaml | 144 ++ .../ResNet/ResNet50_amp_O1_ultra.yaml | 150 ++ .../ResNet/ResNet50_amp_O2_ultra.yaml | 150 ++ .../ImageNet/ResNet/ResNet50_ampo2_ultra.yaml | 144 ++ .../ImageNet/ResNet/ResNet50_fp32_ultra.yaml | 146 ++ .../configs/ImageNet/ResNet/ResNet50_vd.yaml | 142 ++ .../configs/ImageNet/SENet/SENet154_vd.yaml | 142 ++ .../ImageNet/SENet/SE_ResNeXt101_32x4d.yaml | 142 ++ .../SE_ResNeXt101_32x4d_amp_O2_ultra.yaml | 144 ++ .../ImageNet/SENet/SE_ResNeXt50_32x4d.yaml | 142 ++ .../ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml | 142 ++ .../ImageNet/SENet/SE_ResNet18_vd.yaml | 142 ++ .../ImageNet/SENet/SE_ResNet34_vd.yaml | 142 ++ .../ImageNet/SENet/SE_ResNet50_vd.yaml | 142 ++ .../ShuffleNet/ShuffleNetV2_swish.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x0_25.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x0_33.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x0_5.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x1_0.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x1_5.yaml | 141 ++ .../ShuffleNet/ShuffleNetV2_x2_0.yaml | 141 ++ .../ImageNet/SqueezeNet/SqueezeNet1_0.yaml | 140 ++ .../ImageNet/SqueezeNet/SqueezeNet1_1.yaml | 140 ++ .../configs/ImageNet/StarNet/StarNet_S1.yaml | 165 ++ .../configs/ImageNet/StarNet/StarNet_S2.yaml | 165 ++ .../configs/ImageNet/StarNet/StarNet_S3.yaml | 166 ++ .../configs/ImageNet/StarNet/StarNet_S4.yaml | 165 ++ ...nTransformer_base_patch4_window12_384.yaml | 175 ++ ...inTransformer_base_patch4_window7_224.yaml | 176 ++ ...Transformer_large_patch4_window12_384.yaml | 175 ++ ...nTransformer_large_patch4_window7_224.yaml | 175 ++ ...nTransformer_small_patch4_window7_224.yaml | 175 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 175 ++ ...ransformerV2_base_patch4_window16_256.yaml | 172 ++ ...ransformerV2_base_patch4_window24_384.yaml | 172 ++ ...TransformerV2_base_patch4_window8_256.yaml | 172 ++ ...ansformerV2_large_patch4_window16_256.yaml | 172 ++ ...ansformerV2_large_patch4_window24_384.yaml | 172 ++ ...ansformerV2_small_patch4_window16_256.yaml | 172 ++ ...ransformerV2_small_patch4_window8_256.yaml | 172 ++ ...ransformerV2_tiny_patch4_window16_256.yaml | 172 ++ ...TransformerV2_tiny_patch4_window8_256.yaml | 172 ++ .../configs/ImageNet/TNT/TNT_base.yaml | 146 ++ .../configs/ImageNet/TNT/TNT_small.yaml | 146 ++ .../configs/ImageNet/TinyNet/TinyNet_A.yaml | 167 ++ .../configs/ImageNet/TinyNet/TinyNet_B.yaml | 167 ++ .../configs/ImageNet/TinyNet/TinyNet_C.yaml | 167 ++ .../configs/ImageNet/TinyNet/TinyNet_D.yaml | 167 ++ .../configs/ImageNet/TinyNet/TinyNet_E.yaml | 167 ++ .../configs/ImageNet/Twins/alt_gvt_base.yaml | 174 ++ .../configs/ImageNet/Twins/alt_gvt_large.yaml | 174 ++ .../configs/ImageNet/Twins/alt_gvt_small.yaml | 174 ++ .../configs/ImageNet/Twins/pcpvt_base.yaml | 174 ++ .../configs/ImageNet/Twins/pcpvt_large.yaml | 174 ++ .../configs/ImageNet/Twins/pcpvt_small.yaml | 174 ++ .../ImageNet/UniFormer/UniFormer_base.yaml | 174 ++ .../ImageNet/UniFormer/UniFormer_base_ls.yaml | 174 ++ .../ImageNet/UniFormer/UniFormer_small.yaml | 174 ++ .../UniFormer/UniFormer_small_plus.yaml | 174 ++ .../UniFormer/UniFormer_small_plus_dim64.yaml | 174 ++ .../configs/ImageNet/VAN/VAN_B0.yaml | 170 ++ .../configs/ImageNet/VAN/VAN_B1.yaml | 170 ++ .../configs/ImageNet/VAN/VAN_B2.yaml | 170 ++ .../configs/ImageNet/VAN/VAN_B3.yaml | 170 ++ .../ViT_base_patch16_224.yaml | 142 ++ .../ViT_base_patch16_384.yaml | 142 ++ .../ViT_base_patch32_384.yaml | 142 ++ .../ViT_large_patch16_224.yaml | 142 ++ .../ViT_large_patch16_384.yaml | 142 ++ .../ViT_large_patch32_384.yaml | 142 ++ .../ViT_small_patch16_224.yaml | 142 ++ .../configs/ImageNet/Xception/Xception41.yaml | 141 ++ .../ImageNet/Xception/Xception41_deeplab.yaml | 141 ++ .../configs/ImageNet/Xception/Xception65.yaml | 142 ++ .../ImageNet/Xception/Xception65_deeplab.yaml | 141 ++ .../configs/ImageNet/Xception/Xception71.yaml | 142 ++ .../ppcls_2.6/configs/Logo/ResNet50_ReID.yaml | 151 ++ ...P_vit_base_patch16_448_ml_decoder_448.yaml | 172 ++ .../PP-HGNetV2-B0_ml_decoder_448.yaml | 168 ++ .../PP-HGNetV2-B4_ml_decoder_448.yaml | 168 ++ .../PP-HGNetV2-B6_ml_decoder_448.yaml | 168 ++ .../PP-LCNet_x1_0_ml_decoder_448.yaml | 170 ++ .../MultiLabelCOCO/MLDecoder/README.md | 272 ++++ .../MLDecoder/ResNet101_ml_decoder_448.yaml | 168 ++ .../MLDecoder/ResNet50_ml_decoder_448.yaml | 168 ++ .../car_exists/MobileNetV3_small_x0_35.yaml | 139 ++ .../configs/PULC/car_exists/PPLCNet_x1_0.yaml | 152 ++ .../car_exists/PPLCNet_x1_0_distillation.yaml | 169 ++ .../PULC/car_exists/PPLCNet_x1_0_search.yaml | 152 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 169 ++ .../configs/PULC/car_exists/search.yaml | 40 + .../PULC/clarity_assessment/PPLCNet_x1_0.yaml | 133 ++ .../code_exists/MobileNetV3_small_x0_35.yaml | 137 ++ .../PULC/code_exists/PPLCNet_x1_0.yaml | 145 ++ .../PPLCNet_x1_0_distillation.yaml | 167 ++ .../PULC/code_exists/PPLCNet_x1_0_search.yaml | 150 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 167 ++ .../configs/PULC/code_exists/search.yaml | 40 + .../PULC/image_orientation/PPLCNet_x1_0.yaml | 144 ++ .../MobileNetV3_small_x0_35.yaml | 132 ++ .../language_classification/PPLCNet_x1_0.yaml | 143 ++ .../PPLCNet_x1_0_distillation.yaml | 164 ++ .../PPLCNet_x1_0_search.yaml | 142 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 160 ++ .../PULC/language_classification/search.yaml | 40 + .../MobileNetV3_small_x0_35.yaml | 135 ++ .../PULC/person_attribute/PPLCNet_x1_0.yaml | 149 ++ .../PPLCNet_x1_0_Distillation.yaml | 172 ++ .../person_attribute/PPLCNet_x1_0_search.yaml | 149 ++ .../Res2Net200_vd_26w_4s.yaml | 134 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 135 ++ .../configs/PULC/person_attribute/search.yaml | 41 + .../MobileNetV3_small_x0_35.yaml | 138 ++ .../PULC/person_exists/PPLCNet_x1_0.yaml | 151 ++ .../PPLCNet_x1_0_distillation.yaml | 168 ++ .../person_exists/PPLCNet_x1_0_search.yaml | 151 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 168 ++ .../configs/PULC/person_exists/search.yaml | 40 + .../MobileNetV3_small_x0_35.yaml | 134 ++ .../PULC/safety_helmet/PPLCNet_x1_0.yaml | 148 ++ .../PPLCNet_x1_0_distillation.yaml | 185 +++ .../safety_helmet/PPLCNet_x1_0_search.yaml | 148 ++ .../safety_helmet/Res2Net200_vd_26w_4s.yaml | 137 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 159 ++ .../configs/PULC/safety_helmet/search.yaml | 36 + .../PULC/table_attribute/PPLCNet_x1_0.yaml | 133 ++ .../PPLCNet_x1_0_distillation.yaml | 155 ++ .../MobileNetV3_small_x0_35.yaml | 132 ++ .../text_image_orientation/PPLCNet_x1_0.yaml | 143 ++ .../PPLCNet_x1_0_distillation.yaml | 164 ++ .../PPLCNet_x1_0_search.yaml | 146 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 157 ++ .../PULC/text_image_orientation/search.yaml | 41 + .../MobileNetV3_small_x0_35.yaml | 134 ++ .../textline_orientation/PPLCNet_x1_0.yaml | 143 ++ .../PPLCNet_x1_0_224x224.yaml | 132 ++ .../PPLCNet_x1_0_distillation.yaml | 162 ++ .../PPLCNet_x1_0_search.yaml | 144 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 164 ++ .../PULC/textline_orientation/search.yaml | 41 + .../traffic_sign/MobileNetV3_samll_x0_35.yaml | 132 ++ .../PULC/traffic_sign/PPLCNet_x1_0.yaml | 148 ++ .../PPLCNet_x1_0_distillation.yaml | 172 ++ .../traffic_sign/PPLCNet_x1_0_search.yaml | 148 ++ ...inTransformer_tiny_patch4_window7_224.yaml | 170 ++ .../configs/PULC/traffic_sign/search.yaml | 41 + .../MobileNetV3_small_x0_35.yaml | 115 ++ .../PULC/vehicle_attribute/PPLCNet_x1_0.yaml | 149 ++ .../PPLCNet_x1_0_distillation.yaml | 171 ++ .../PPLCNet_x1_0_search.yaml | 129 ++ .../Res2Net200_vd_26w_4s.yaml | 122 ++ .../PULC/vehicle_attribute/ResNet50.yaml | 116 ++ .../PULC/vehicle_attribute/search.yaml | 35 + .../MV3_Large_1x_Aliproduct_DLBHC.yaml | 149 ++ .../Products/ResNet50_vd_Aliproduct.yaml | 119 ++ .../configs/Products/ResNet50_vd_Inshop.yaml | 157 ++ .../configs/Products/ResNet50_vd_SOP.yaml | 156 ++ .../configs/ResNet50_UReID_infer.yaml | 152 ++ .../ppcls_2.6/configs/SVTR/svtr_base.yml | 146 ++ .../ppcls_2.6/configs/SVTR/svtr_large.yml | 146 ++ .../ppcls_2.6/configs/SVTR/svtr_tiny.yml | 146 ++ .../configs/StrategySearch/person.yaml | 40 + .../configs/Vehicle/PPLCNet_2.5x_ReID.yaml | 158 ++ .../ppcls_2.6/configs/Vehicle/ResNet50.yaml | 130 ++ .../configs/Vehicle/ResNet50_ReID.yaml | 155 ++ .../configs/metric_learning/adaface_ir18.yaml | 105 ++ .../configs/metric_learning/xbm_resnet50.yaml | 170 ++ .../multi_scale/MobileNetV1_multi_scale.yaml | 138 ++ .../configs/practical_models/.gitkeep | 1 + .../CLIP_large_patch14_224_aesthetic.yaml | 78 + .../EfficientNetB3_watermark.yaml | 81 + .../PPHGNet_tiny_calling_halfbody.yaml | 150 ++ .../quick_start/MobileNetV1_retrieval.yaml | 157 ++ .../quick_start/MobileNetV3_large_x1_0.yaml | 130 ++ .../configs/quick_start/ResNet50_vd.yaml | 129 ++ .../kunlun/HRNet_W18_C_finetune_kunlun.yaml | 68 + .../kunlun/ResNet50_vd_finetune_kunlun.yaml | 69 + .../kunlun/VGG16_finetune_kunlun.yaml | 70 + .../kunlun/VGG19_finetune_kunlun.yaml | 70 + .../new_user/ShuffleNetV2_x0_25.yaml | 129 ++ .../professional/MobileNetV1_multilabel.yaml | 130 ++ ...ileNetV3_large_x1_0_CIFAR100_finetune.yaml | 127 ++ ...50_vd_distill_MV3_large_x1_0_CIFAR100.yaml | 151 ++ .../professional/ResNet50_vd_CIFAR100.yaml | 127 ++ .../ResNet50_vd_mixup_CIFAR100_finetune.yaml | 127 ++ .../professional/VGG19_CIFAR10_DeepHash.yaml | 147 ++ .../reid/MetaBIN_ResNet50_cross_domain.yaml | 277 ++++ .../reid/strong_baseline/baseline.yaml | 158 ++ .../reid/strong_baseline/softmax_triplet.yaml | 176 ++ .../softmax_triplet_with_center.yaml | 187 +++ ...Recognition_PPLCNet_x2_5_quantization.yaml | 154 ++ .../slim/MobileNetV3_large_x1_0_prune.yaml | 139 ++ .../MobileNetV3_large_x1_0_quantization.yaml | 138 ++ .../slim/PPLCNet_x1_0_quantization.yaml | 138 ++ .../configs/slim/ResNet50_vd_prune.yaml | 138 ++ .../slim/ResNet50_vd_quantization.yaml | 137 ++ .../slim/ResNet50_vehicle_cls_prune.yaml | 135 ++ .../ResNet50_vehicle_cls_quantization.yaml | 134 ++ .../slim/ResNet50_vehicle_reid_prune.yaml | 162 ++ .../ResNet50_vehicle_reid_quantization.yaml | 161 ++ .../FixMatchCCSSL_cifar100_10000_4gpu.yaml | 209 +++ .../FixMatchCCSSL_cifar10_4000_4gpu.yaml | 208 +++ .../ssl/FixMatch/FixMatch_cifar10_250.yaml | 175 ++ .../ssl/FixMatch/FixMatch_cifar10_40.yaml | 175 ++ .../ssl/FixMatch/FixMatch_cifar10_4000.yaml | 175 ++ .../FixMatch/FixMatch_cifar10_40_4gpu.yaml | 176 ++ .../paddlepaddle/ppcls_2.6/engine/__init__.py | 0 .../paddlepaddle/ppcls_2.6/engine/engine.py | 717 ++++++++ .../ppcls_2.6/engine/evaluation/__init__.py | 18 + .../ppcls_2.6/engine/evaluation/adaface.py | 260 +++ .../engine/evaluation/classification.py | 175 ++ .../engine/evaluation/face_recognition.py | 152 ++ .../ppcls_2.6/engine/evaluation/retrieval.py | 327 ++++ .../ppcls_2.6/engine/train/__init__.py | 18 + .../ppcls_2.6/engine/train/train.py | 100 ++ .../ppcls_2.6/engine/train/train_fixmatch.py | 152 ++ .../engine/train/train_fixmatch_ccssl.py | 125 ++ .../ppcls_2.6/engine/train/train_metabin.py | 251 +++ .../engine/train/train_progressive.py | 72 + .../ppcls_2.6/engine/train/utils.py | 94 ++ .../paddlepaddle/ppcls_2.6/loss/__init__.py | 91 ++ .../paddlepaddle/ppcls_2.6/loss/afdloss.py | 130 ++ .../paddlepaddle/ppcls_2.6/loss/ccssl_loss.py | 19 + .../paddlepaddle/ppcls_2.6/loss/celoss.py | 76 + .../paddlepaddle/ppcls_2.6/loss/centerloss.py | 80 + .../paddlepaddle/ppcls_2.6/loss/comfunc.py | 45 + .../ppcls_2.6/loss/contrasiveloss.py | 152 ++ .../ppcls_2.6/loss/deephashloss.py | 149 ++ .../paddlepaddle/ppcls_2.6/loss/dist_loss.py | 52 + .../ppcls_2.6/loss/distanceloss.py | 43 + .../ppcls_2.6/loss/distillationloss.py | 426 +++++ .../paddlepaddle/ppcls_2.6/loss/dkdloss.py | 68 + .../paddlepaddle/ppcls_2.6/loss/dmlloss.py | 62 + .../paddlepaddle/ppcls_2.6/loss/emlloss.py | 102 ++ .../ppcls_2.6/loss/googlenetloss.py | 43 + .../paddlepaddle/ppcls_2.6/loss/kldivloss.py | 33 + .../ppcls_2.6/loss/metabinloss.py | 206 +++ .../paddlepaddle/ppcls_2.6/loss/mgd_loss.py | 84 + .../paddlepaddle/ppcls_2.6/loss/msmloss.py | 80 + .../ppcls_2.6/loss/multilabelloss.py | 119 ++ .../paddlepaddle/ppcls_2.6/loss/npairsloss.py | 43 + .../ppcls_2.6/loss/pairwisecosface.py | 64 + .../paddlepaddle/ppcls_2.6/loss/pefdloss.py | 83 + .../paddlepaddle/ppcls_2.6/loss/rkdloss.py | 99 ++ .../paddlepaddle/ppcls_2.6/loss/skdloss.py | 72 + .../ppcls_2.6/loss/softsuploss.py | 75 + .../ppcls_2.6/loss/softtargetceloss.py | 16 + .../paddlepaddle/ppcls_2.6/loss/supconloss.py | 109 ++ .../ppcls_2.6/loss/trihardloss.py | 84 + .../paddlepaddle/ppcls_2.6/loss/triplet.py | 157 ++ .../loss/tripletangularmarginloss.py | 241 +++ .../paddlepaddle/ppcls_2.6/loss/wslloss.py | 66 + .../paddlepaddle/ppcls_2.6/loss/xbm.py | 89 + .../paddlepaddle/ppcls_2.6/metric/__init__.py | 72 + .../ppcls_2.6/metric/avg_metrics.py | 20 + .../ppcls_2.6/metric/face_metrics.py | 201 +++ .../paddlepaddle/ppcls_2.6/metric/metrics.py | 661 ++++++++ .../ppcls_2.6/optimizer/__init__.py | 137 ++ .../ppcls_2.6/optimizer/learning_rate.py | 687 ++++++++ .../ppcls_2.6/optimizer/optimizer.py | 518 ++++++ .../paddlepaddle/ppcls_2.6/static/README.md | 19 + .../paddlepaddle/ppcls_2.6/static/program.py | 445 +++++ .../paddlepaddle/ppcls_2.6/static/run_dali.sh | 8 + .../ppcls_2.6/static/save_load.py | 139 ++ .../paddlepaddle/ppcls_2.6/static/train.py | 227 +++ .../ppcls_2.6/utils/COCO2017_label_list.txt | 80 + .../utils/NUS-WIDE-SCENE_label_list.txt | 33 + .../image_orientation_label_list.txt | 4 + .../language_classification_label_list.txt | 10 + .../text_image_orientation_label_list.txt | 4 + .../textline_orientation_label_list.txt | 2 + .../traffic_sign_label_list.txt | 232 +++ .../paddlepaddle/ppcls_2.6/utils/__init__.py | 29 + .../paddlepaddle/ppcls_2.6/utils/amp.py | 61 + .../paddlepaddle/ppcls_2.6/utils/check.py | 149 ++ .../paddlepaddle/ppcls_2.6/utils/config.py | 326 ++++ .../utils/create_cls_trainval_lists.py | 111 ++ .../utils/create_coco_multilabel_lists.py | 126 ++ .../ppcls_2.6/utils/dist_utils.py | 36 + .../paddlepaddle/ppcls_2.6/utils/download.py | 304 ++++ .../paddlepaddle/ppcls_2.6/utils/ema.py | 45 + .../feature_maps_visualization/fm_vis.py | 97 ++ .../feature_maps_visualization/resnet.py | 535 ++++++ .../utils/feature_maps_visualization/utils.py | 85 + .../ppcls_2.6/utils/imagenet1k_label_list.txt | 1000 ++++++++++++ .../ppcls_2.6/utils/initializer.py | 318 ++++ .../paddlepaddle/ppcls_2.6/utils/logger.py | 173 ++ .../paddlepaddle/ppcls_2.6/utils/metrics.py | 107 ++ .../paddlepaddle/ppcls_2.6/utils/misc.py | 155 ++ .../paddlepaddle/ppcls_2.6/utils/model_zoo.py | 213 +++ .../utils/pedestrian_attribute_label_list.txt | 26 + .../ppcls_2.6/utils/pretrained.list | 121 ++ .../paddlepaddle/ppcls_2.6/utils/profiler.py | 129 ++ .../paddlepaddle/ppcls_2.6/utils/save_load.py | 225 +++ .../ppcls_2.6/utils/save_result.py | 102 ++ .../utils/vehicle_attribute_label_list.txt | 19 + .../resnet50/paddlepaddle/requirements.txt | 11 + .../resnet50/paddlepaddle/run_resnet50.sh | 8 + .../paddlepaddle/run_resnet50_dist.sh | 8 + .../resnet50/paddlepaddle/train.py | 40 + tests/executables/resnet/init_paddle.sh | 2 +- .../resnet/train_resnet50_dist_paddle.sh | 2 +- 837 files changed, 147131 insertions(+), 2 deletions(-) create mode 100644 cv/classification/resnet50/paddlepaddle/.gitignore create mode 100644 cv/classification/resnet50/paddlepaddle/LICENSE create mode 100644 cv/classification/resnet50/paddlepaddle/MANIFEST.in create mode 100644 cv/classification/resnet50/paddlepaddle/README_ch.md create mode 100644 cv/classification/resnet50/paddlepaddle/README_en.md create mode 100644 cv/classification/resnet50/paddlepaddle/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/hubconf.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/theseus_layer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/resnet_vc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/resnet_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/arcmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/circlemargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/cosmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/fc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/identity_head.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/vehicle_neck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/prune.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/quant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/arch/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/configs/quick_start/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/engine.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/classification.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/retrieval.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/train/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/train/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/engine/train/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/loss/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/loss/celoss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/loss/comfunc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/loss/distanceloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/metric/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/metric/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/optimizer/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/optimizer/learning_rate.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/optimizer/optimizer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/static/program.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/static/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/static/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/check.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/config.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/download.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/ema.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/fm_vis.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/gallery2fc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/imagenet1k_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/logger.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/misc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/model_zoo.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/pretrained.list create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/profiler.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls/utils/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/theseus_layer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/resnet_vc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/resnet_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/arcmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/circlemargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/cosmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/fc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/identity_head.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/vehicle_neck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/prune.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/quant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/quick_start/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/engine.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/classification.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/retrieval.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/celoss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/comfunc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/distanceloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/learning_rate.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/optimizer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/program.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/check.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/config.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/download.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/ema.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/fm_vis.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/gallery2fc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/imagenet1k_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/logger.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/misc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/model_zoo.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/pretrained.list create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/profiler.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_block.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_transforms.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/theseus_layer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/custom_devices_layers.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/esnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/hrnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/inception_v3.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v1.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v3.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v4.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/swin_transformer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/vgg.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/adaface_ir_net.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/alexnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cae.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/convnext.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cspnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cswin_transformer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cvt.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/darknet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/densenet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/distilled_vision_transformer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dla.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dpn.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dsnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/fasternet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/foundation_vit.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/ghostnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/googlenet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/hardnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/inception_v4.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/levit.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/micronet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mixnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilefacenet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenet_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenext.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v3.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/nextvit.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/peleenet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/pvt_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rednet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/regnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/repvgg.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net_vd.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnest.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnet_vc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext101_wsl.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext_vd.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rexnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnet_vd.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext_vd.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/shufflenet_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/squeezenet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/starnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/svtrnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/swin_transformer_v2.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tinynet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tnt.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/twins.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/uniformer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/van.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/vision_transformer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/wideresnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception_deeplab.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/efficientnet_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/foundation_vit_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnet_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnetv2_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/resnet_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/swin_transformer_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/vgg_variant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/distill/afd_attention.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/adamargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/arcmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/bnneck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/circlemargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/cosmargin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/fc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/frfn_neck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/identity_head.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/metabnneck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/ml_decoder.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/vehicle_neck.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/prune.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/quant.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_pedestrian_attribute.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_vehicle_attribute.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/StrongBaselineAttr.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_base_patch16_224_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_large_patch16_224_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_large_patch14_224_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Cartoonface/ResNet50_icartoon.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DCH.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DSHSD.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/LCDSH.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_MobileFaceNet.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/Gallery2FC_PPLCNet_x2_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_binary.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_dml.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_udml.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSPNet/CSPDarkNet53.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_W24_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA169.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA34.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46_c.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46x_c.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x_c.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN107.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN131.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN68.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN92.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN98.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_tiny.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DarkNet/DarkNet53.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet121.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet161.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet169.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet201.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet264.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x1_0_ssld.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_afd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_mgd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_pefd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_skd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_wsl.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB6.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB7.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNetV2/EfficientNetV2_S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_L.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_M.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet39_ds.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68_ds.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet85.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/GoogLeNet.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_192.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_L.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_M.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNeXt/MobileNeXt_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_medium.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_medium.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XS.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XXS.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x2_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S_L2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS_L2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS_L2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B6.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PeleeNet/PeleeNet.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_2_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_3_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet101.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet152.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet26.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet38.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_12GF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_1600MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_16GF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_200MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_3200MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_32GF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_400MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_600MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_6400MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_800MF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_8GF.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2g4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3g4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_D2se.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt101.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt200.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt269.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_dbb.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet200_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SENet154_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet18_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet34_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S4.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window16_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window24_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window8_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window16_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window24_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window16_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window8_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window16_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window8_256.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_A.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_B.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_C.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_D.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_E.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_large.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base_ls.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus_dim64.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B1.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B2.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B3.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41_deeplab.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65_deeplab.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception71.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Logo/ResNet50_ReID.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/CLIP_vit_base_patch16_448_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B0_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B4_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B6_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-LCNet_x1_0_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/README.md create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet50_ml_decoder_448.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/clarity_assessment/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/image_orientation/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/search.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Aliproduct.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Inshop.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_SOP.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ResNet50_UReID_infer.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_base.yml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_large.yml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_tiny.yml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/StrategySearch/person.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/PPLCNet_2.5x_ReID.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50_ReID.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/adaface_ir18.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/xbm_resnet50.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/multi_scale/MobileNetV1_multi_scale.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/.gitkeep create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/CLIP_large_patch14_224_aesthetic.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/EfficientNetB3_watermark.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV1_retrieval.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV3_large_x1_0.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/ResNet50_vd.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV1_multilabel.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/MetaBIN_ResNet50_cross_domain.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/baseline.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet_with_center.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/GeneralRecognition_PPLCNet_x2_5_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_prune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/PPLCNet_x1_0_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_prune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_prune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_prune.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_quantization.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar100_10000_4gpu.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar10_4000_4gpu.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_250.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_4000.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40_4gpu.yaml create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/engine.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/adaface.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/classification.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/face_recognition.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/retrieval.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch_ccssl.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_metabin.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_progressive.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/afdloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/ccssl_loss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/celoss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/centerloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/comfunc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/contrasiveloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/deephashloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dist_loss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distanceloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distillationloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dkdloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dmlloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/emlloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/googlenetloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/kldivloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/metabinloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/mgd_loss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/msmloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/multilabelloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/npairsloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pairwisecosface.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pefdloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/rkdloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/skdloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softsuploss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softtargetceloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/supconloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/trihardloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/triplet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/tripletangularmarginloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/wslloss.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/xbm.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/avg_metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/face_metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/learning_rate.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/optimizer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/README.md create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/program.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/run_dali.sh create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/train.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/COCO2017_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/NUS-WIDE-SCENE_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/image_orientation_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/language_classification_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/text_image_orientation_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/textline_orientation_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/traffic_sign_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/__init__.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/amp.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/check.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/config.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_cls_trainval_lists.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_coco_multilabel_lists.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/dist_utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/download.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/ema.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/fm_vis.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/resnet.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/utils.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/imagenet1k_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/initializer.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/logger.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/metrics.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/misc.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/model_zoo.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pedestrian_attribute_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pretrained.list create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/profiler.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_load.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_result.py create mode 100644 cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/vehicle_attribute_label_list.txt create mode 100644 cv/classification/resnet50/paddlepaddle/requirements.txt create mode 100644 cv/classification/resnet50/paddlepaddle/run_resnet50.sh create mode 100644 cv/classification/resnet50/paddlepaddle/run_resnet50_dist.sh create mode 100644 cv/classification/resnet50/paddlepaddle/train.py diff --git a/cv/classification/resnet50/paddlepaddle/.gitignore b/cv/classification/resnet50/paddlepaddle/.gitignore new file mode 100644 index 000000000..dcf07c225 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/.gitignore @@ -0,0 +1,16 @@ +data/ +dataset/ +__pycache__/ +*.pyc +*.sw* +*/workerlog* +checkpoints/ +output*/ +pretrained/ +.ipynb_checkpoints/ +*.ipynb* +_build/ +build/ +log/ +nohup.out +.DS_Store diff --git a/cv/classification/resnet50/paddlepaddle/LICENSE b/cv/classification/resnet50/paddlepaddle/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cv/classification/resnet50/paddlepaddle/MANIFEST.in b/cv/classification/resnet50/paddlepaddle/MANIFEST.in new file mode 100644 index 000000000..b0a4f6dc1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/MANIFEST.in @@ -0,0 +1,7 @@ +include LICENSE.txt +include README.md +include docs/en/whl_en.md +recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py +recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py + +recursive-include ppcls/ *.py *.txt \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/README_ch.md b/cv/classification/resnet50/paddlepaddle/README_ch.md new file mode 100644 index 000000000..9219857fd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/README_ch.md @@ -0,0 +1,151 @@ +简体中文 | [English](README_en.md) + +# PaddleClas + +## 简介 + +飞桨图像识别套件PaddleClas是飞桨为工业界和学术界所准备的一个图像识别任务的工具集,助力使用者训练出更好的视觉模型和应用落地。 + +**近期更新** +- 2022.4.21 新增 CVPR2022 oral论文 [MixFormer](https://arxiv.org/pdf/2204.02557.pdf) 相关[代码](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files)。 +- 2022.1.27 全面升级文档;新增[PaddleServing C++ pipeline部署方式](./deploy/paddleserving)和[18M图像识别安卓部署Demo](./deploy/lite_shitu)。 +- 2021.11.1 发布[PP-ShiTu技术报告](https://arxiv.org/pdf/2111.00775.pdf),新增饮料识别demo +- 2021.10.23 发布轻量级图像识别系统PP-ShiTu,CPU上0.2s即可完成在10w+库的图像识别。 +[点击这里](./docs/zh_CN/quick_start/quick_start_recognition.md)立即体验 +- 2021.09.17 发布PP-LCNet系列超轻量骨干网络模型, 在Intel CPU上,单张图像预测速度约5ms,ImageNet-1K数据集上Top1识别准确率达到80.82%,超越ResNet152的模型效果。PP-LCNet的介绍可以参考[论文](https://arxiv.org/pdf/2109.15099.pdf), 或者[PP-LCNet模型介绍](docs/zh_CN/models/PP-LCNet.md),相关指标和预训练权重可以从 [这里](docs/zh_CN/algorithm_introduction/ImageNet_models.md)下载。 +- [more](./docs/zh_CN/others/update_history.md) + +## 特性 + +- PP-ShiTu轻量图像识别系统:集成了目标检测、特征学习、图像检索等模块,广泛适用于各类图像识别任务。cpu上0.2s即可完成在10w+库的图像识别。 + +- PP-LCNet轻量级CPU骨干网络:专门为CPU设备打造轻量级骨干网络,速度、精度均远超竞品。 + +- 丰富的预训练模型库:提供了36个系列共175个ImageNet预训练模型,其中7个精选系列模型支持结构快速修改。 + +- 全面易用的特征学习组件:集成arcmargin, triplet loss等12度量学习方法,通过配置文件即可随意组合切换。 + +- SSLD知识蒸馏:14个分类预训练模型,精度普遍提升3%以上;其中ResNet50_vd模型在ImageNet-1k数据集上的Top-1精度达到了84.0%, +Res2Net200_vd预训练模型Top-1精度高达85.1%。 + +
+ +
+ + +## 欢迎加入技术交流群 + +* 您可以扫描下面的QQ/微信二维码(添加小助手微信并回复“C”),加入PaddleClas微信交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。 + +
+ + +
+ +## 快速体验 + +PP-ShiTu图像识别快速体验:[点击这里](./docs/zh_CN/quick_start/quick_start_recognition.md) + +## 文档教程 +- 安装说明 + - [安装Paddle](./docs/zh_CN/installation/install_paddle.md) + - [安装PaddleClas](./docs/zh_CN/installation/install_paddleclas.md) +- 快速体验 + - [PP-ShiTu图像识别快速体验](./docs/zh_CN/quick_start/quick_start_recognition.md) + - 图像分类快速体验 + - [尝鲜版](./docs/zh_CN/quick_start/quick_start_classification_new_user.md) + - [进阶版](./docs/zh_CN/quick_start/quick_start_classification_professional.md) + - [多标签分类](./docs/zh_CN/quick_start/quick_start_multilabel_classification.md) +- [PP-ShiTu图像识别系统介绍](#图像识别系统介绍) + - [主体检测](./docs/zh_CN/image_recognition_pipeline/mainbody_detection.md) + - [特征提取](./docs/zh_CN/image_recognition_pipeline/feature_extraction.md) + - [向量检索](./docs/zh_CN/image_recognition_pipeline/vector_search.md) +- [骨干网络和预训练模型库](./docs/zh_CN/algorithm_introduction/ImageNet_models.md) +- 数据准备 + - [图像分类数据集介绍](./docs/zh_CN/data_preparation/classification_dataset.md) + - [图像识别数据集介绍](./docs/zh_CN/data_preparation/recognition_dataset.md) +- 模型训练 + - [图像分类任务](./docs/zh_CN/models_training/classification.md) + - [图像识别任务](./docs/zh_CN/models_training/recognition.md) + - [训练参数调整策略](./docs/zh_CN/models_training/train_strategy.md) + - [配置文件说明](./docs/zh_CN/models_training/config_description.md) +- 模型预测部署 + - [模型导出](./docs/zh_CN/inference_deployment/export_model.md) + - Python/C++ 预测引擎 + - [基于Python预测引擎预测推理](./docs/zh_CN/inference_deployment/python_deploy.md) + - [基于C++分类预测引擎预测推理](./docs/zh_CN/inference_deployment/cpp_deploy.md)、[基于C++的PP-ShiTu预测引擎预测推理](deploy/cpp_shitu/readme.md) + - 服务化部署 + - [Paddle Serving服务化部署(推荐)](./docs/zh_CN/inference_deployment/paddle_serving_deploy.md) + - [Hub serving服务化部署](./docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md) + - [端侧部署](./deploy/lite/readme.md) + - [whl包预测](./docs/zh_CN/inference_deployment/whl_deploy.md) +- 算法介绍 + - [图像分类任务介绍](./docs/zh_CN/algorithm_introduction/image_classification.md) + - [度量学习介绍](./docs/zh_CN/algorithm_introduction/metric_learning.md) +- 高阶使用 + - [数据增广](./docs/zh_CN/advanced_tutorials/DataAugmentation.md) + - [模型量化](./docs/zh_CN/advanced_tutorials/model_prune_quantization.md) + - [知识蒸馏](./docs/zh_CN/advanced_tutorials/knowledge_distillation.md) + - [PaddleClas结构解析](./docs/zh_CN/advanced_tutorials/code_overview.md) + - [社区贡献指南](./docs/zh_CN/advanced_tutorials/how_to_contribute.md) +- FAQ + - [图像识别精选问题](docs/zh_CN/faq_series/faq_2021_s2.md) + - [图像分类精选问题](docs/zh_CN/faq_series/faq_selected_30.md) + - [图像分类FAQ第一季](docs/zh_CN/faq_series/faq_2020_s1.md) + - [图像分类FAQ第二季](docs/zh_CN/faq_series/faq_2021_s1.md) +- [许可证书](#许可证书) +- [贡献代码](#贡献代码) + + +## PP-ShiTu图像识别系统介绍 + +
+ +
+ +PP-ShiTu是一个实用的轻量级通用图像识别系统,主要由主体检测、特征学习和向量检索三个模块组成。该系统从骨干网络选择和调整、损失函数的选择、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型裁剪量化8个方面,采用多种策略,对各个模块的模型进行优化,最终得到在CPU上仅0.2s即可完成10w+库的图像识别的系统。更多细节请参考[PP-ShiTu技术方案](https://arxiv.org/pdf/2111.00775.pdf)。 + + + +## PP-ShiTu图像识别系统效果展示 +- 瓶装饮料识别 +
+ +
+ +- 商品识别 +
+ +
+ +- 动漫人物识别 +
+ +
+ +- logo识别 +
+ +
+ + +- 车辆识别 +
+ +
+ + + + +## 许可证书 +本项目的发布受Apache 2.0 license许可认证。 + + + +## 贡献代码 +我们非常欢迎你为PaddleClas贡献代码,也十分感谢你的反馈。 +如果想为PaddleCLas贡献代码,可以参考[贡献指南](./docs/zh_CN/advanced_tutorials/how_to_contribute.md)。 + +- 非常感谢[nblib](https://github.com/nblib)修正了PaddleClas中RandErasing的数据增广配置文件。 +- 非常感谢[chenpy228](https://github.com/chenpy228)修正了PaddleClas文档中的部分错别字。 +- 非常感谢[jm12138](https://github.com/jm12138)为PaddleClas添加ViT,DeiT系列模型和RepVGG系列模型。 diff --git a/cv/classification/resnet50/paddlepaddle/README_en.md b/cv/classification/resnet50/paddlepaddle/README_en.md new file mode 100644 index 000000000..9b0d7c85d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/README_en.md @@ -0,0 +1,135 @@ +[简体中文](README_ch.md) | English + +# PaddleClas + +## Introduction + +PaddleClas is an image recognition toolset for industry and academia, helping users train better computer vision models and apply them in real scenarios. + +**Recent updates** + +- 2022.4.21 Added the related [code](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files) of the CVPR2022 oral paper [MixFormer](https://arxiv.org/pdf/2204.02557.pdf). + +- 2021.09.17 Add PP-LCNet series model developed by PaddleClas, these models show strong competitiveness on Intel CPUs. +For the introduction of PP-LCNet, please refer to [paper](https://arxiv.org/pdf/2109.15099.pdf) or [PP-LCNet model introduction](docs/en/models/PP-LCNet_en.md). The metrics and pretrained model are available [here](docs/en/ImageNet_models_en.md). + +- 2021.06.29 Add Swin-transformer series model,Highest top1 acc on ImageNet1k dataset reaches 87.2%, training, evaluation and inference are all supported. Pretrained models can be downloaded [here](docs/en/models/models_intro_en.md). +- 2021.06.16 PaddleClas release/2.2. Add metric learning and vector search modules. Add product recognition, animation character recognition, vehicle recognition and logo recognition. Added 30 pretrained models of LeViT, Twins, TNT, DLA, HarDNet, and RedNet, and the accuracy is roughly the same as that of the paper. +- [more](./docs/en/update_history_en.md) + +## Features + +- A practical image recognition system consist of detection, feature learning and retrieval modules, widely applicable to all types of image recognition tasks. +Four sample solutions are provided, including product recognition, vehicle recognition, logo recognition and animation character recognition. + +- Rich library of pre-trained models: Provide a total of 164 ImageNet pre-trained models in 35 series, among which 6 selected series of models support fast structural modification. + +- Comprehensive and easy-to-use feature learning components: 12 metric learning methods are integrated and can be combined and switched at will through configuration files. + +- SSLD knowledge distillation: The 14 classification pre-training models generally improved their accuracy by more than 3%; among them, the ResNet50_vd model achieved a Top-1 accuracy of 84.0% on the Image-Net-1k dataset and the Res2Net200_vd pre-training model achieved a Top-1 accuracy of 85.1%. + +- Data augmentation: Provide 8 data augmentation algorithms such as AutoAugment, Cutout, Cutmix, etc. with detailed introduction, code replication and evaluation of effectiveness in a unified experimental environment. + + + + +
+ +
+ + +## Welcome to Join the Technical Exchange Group + +* You can also scan the QR code below to join the PaddleClas QQ group and WeChat group (add and replay "C") to get more efficient answers to your questions and to communicate with developers from all walks of life. We look forward to hearing from you. + +
+ + +
+ +## Quick Start +Quick experience of image recognition:[Link](./docs/en/tutorials/quick_start_recognition_en.md) + +## Tutorials + +- [Quick Installation](./docs/en/tutorials/install_en.md) +- [Quick Start of Recognition](./docs/en/tutorials/quick_start_recognition_en.md) +- [Introduction to Image Recognition Systems](#Introduction_to_Image_Recognition_Systems) +- [Demo images](#Demo_images) +- Algorithms Introduction + - [Backbone Network and Pre-trained Model Library](./docs/en/ImageNet_models_en.md) + - [Mainbody Detection](./docs/en/application/mainbody_detection_en.md) + - [Image Classification](./docs/en/tutorials/image_classification_en.md) + - [Feature Learning](./docs/en/application/feature_learning_en.md) + - [Product Recognition](./docs/en/application/product_recognition_en.md) + - [Vehicle Recognition](./docs/en/application/vehicle_recognition_en.md) + - [Logo Recognition](./docs/en/application/logo_recognition_en.md) + - [Animation Character Recognition](./docs/en/application/cartoon_character_recognition_en.md) + - [Vector Search](./deploy/vector_search/README.md) +- Models Training/Evaluation + - [Image Classification](./docs/en/tutorials/getting_started_en.md) + - [Feature Learning](./docs/en/tutorials/getting_started_retrieval_en.md) +- Inference Model Prediction + - [Python Inference](./docs/en/inference.md) + - [C++ Classfication Inference](./deploy/cpp/readme_en.md), [C++ PP-ShiTu Inference](deploy/cpp_shitu/readme_en.md) +- Model Deploy (only support classification for now, recognition coming soon) + - [Hub Serving Deployment](./deploy/hubserving/readme_en.md) + - [Mobile Deployment](./deploy/lite/readme_en.md) + - [Inference Using whl](./docs/en/whl_en.md) +- Advanced Tutorial + - [Knowledge Distillation](./docs/en/advanced_tutorials/distillation/distillation_en.md) + - [Model Quantization](./docs/en/extension/paddle_quantization_en.md) + - [Data Augmentation](./docs/en/advanced_tutorials/image_augmentation/ImageAugment_en.md) +- [License](#License) +- [Contribution](#Contribution) + + +## Introduction to Image Recognition Systems + +
+ +
+ +Image recognition can be divided into three steps: +- (1)Identify region proposal for target objects through a detection model; +- (2)Extract features for each region proposal; +- (3)Search features in the retrieval database and output results; + +For a new unknown category, there is no need to retrain the model, just prepare images of new category, extract features and update retrieval database and the category can be recognised. + + +## Demo images [more](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.2/docs/images/recognition/more_demo_images) +- Product recognition +
+ +
+ +- Cartoon character recognition +
+ +
+ +- Logo recognition +
+ +
+ +- Car recognition +
+ +
+ + +## License +PaddleClas is released under the Apache 2.0 license Apache 2.0 license + + + +## Contribution +Contributions are highly welcomed and we would really appreciate your feedback!! + + +- Thank [nblib](https://github.com/nblib) to fix bug of RandErasing. +- Thank [chenpy228](https://github.com/chenpy228) to fix some typos PaddleClas. +- Thank [jm12138](https://github.com/jm12138) to add ViT, DeiT models and RepVGG models into PaddleClas. +- Thank [FutureSI](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/76563) to parse and summarize the PaddleClas code. diff --git a/cv/classification/resnet50/paddlepaddle/__init__.py b/cv/classification/resnet50/paddlepaddle/__init__.py new file mode 100644 index 000000000..2128a6cc7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['PaddleClas'] +from .paddleclas import PaddleClas +from ppcls.arch.backbone import * diff --git a/cv/classification/resnet50/paddlepaddle/hubconf.py b/cv/classification/resnet50/paddlepaddle/hubconf.py new file mode 100644 index 000000000..b7f76745a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/hubconf.py @@ -0,0 +1,788 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dependencies = ['paddle'] + +import paddle +import os +import sys + + +class _SysPathG(object): + """ + _SysPathG used to add/clean path for sys.path. Making sure minimal pkgs dependents by skiping parent dirs. + + __enter__ + add path into sys.path + __exit__ + clean user's sys.path to avoid unexpect behaviors + """ + + def __init__(self, path): + self.path = path + + def __enter__(self, ): + sys.path.insert(0, self.path) + + def __exit__(self, type, value, traceback): + _p = sys.path.pop(0) + assert _p == self.path, 'Make sure sys.path cleaning {} correctly.'.format( + self.path) + + +with _SysPathG(os.path.dirname(os.path.abspath(__file__)), ): + import ppcls + import ppcls.arch.backbone as backbone + + def ppclas_init(): + if ppcls.utils.logger._logger is None: + ppcls.utils.logger.init_logger() + + ppclas_init() + + def _load_pretrained_parameters(model, name): + url = 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/{}_pretrained.pdparams'.format( + name) + path = paddle.utils.download.get_weights_path_from_url(url) + model.set_state_dict(paddle.load(path)) + return model + + def alexnet(pretrained=False, **kwargs): + """ + AlexNet + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `AlexNet` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.AlexNet(**kwargs) + + return model + + def vgg11(pretrained=False, **kwargs): + """ + VGG11 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG11` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG11(**kwargs) + + return model + + def vgg13(pretrained=False, **kwargs): + """ + VGG13 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG13` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG13(**kwargs) + + return model + + def vgg16(pretrained=False, **kwargs): + """ + VGG16 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG16` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG16(**kwargs) + + return model + + def vgg19(pretrained=False, **kwargs): + """ + VGG19 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + Returns: + model: nn.Layer. Specific `VGG19` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.VGG19(**kwargs) + + return model + + def resnet18(pretrained=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet18(**kwargs) + + return model + + def resnet34(pretrained=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet34(**kwargs) + + return model + + def resnet50(pretrained=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet50(**kwargs) + + return model + + def resnet101(pretrained=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet101(**kwargs) + + return model + + def resnet152(pretrained=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + input_image_channel: int=3. The number of input image channels + data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC') + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNet152(**kwargs) + + return model + + def squeezenet1_0(pretrained=False, **kwargs): + """ + SqueezeNet1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `SqueezeNet1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.SqueezeNet1_0(**kwargs) + + return model + + def squeezenet1_1(pretrained=False, **kwargs): + """ + SqueezeNet1_1 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `SqueezeNet1_1` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.SqueezeNet1_1(**kwargs) + + return model + + def densenet121(pretrained=False, **kwargs): + """ + DenseNet121 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet121` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet121(**kwargs) + + return model + + def densenet161(pretrained=False, **kwargs): + """ + DenseNet161 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet161` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet161(**kwargs) + + return model + + def densenet169(pretrained=False, **kwargs): + """ + DenseNet169 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet169` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet169(**kwargs) + + return model + + def densenet201(pretrained=False, **kwargs): + """ + DenseNet201 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet201` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet201(**kwargs) + + return model + + def densenet264(pretrained=False, **kwargs): + """ + DenseNet264 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + dropout: float=0. Probability of setting units to zero. + bn_size: int=4. The number of channals per group + Returns: + model: nn.Layer. Specific `DenseNet264` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DenseNet264(**kwargs) + + return model + + def inceptionv3(pretrained=False, **kwargs): + """ + InceptionV3 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `InceptionV3` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.InceptionV3(**kwargs) + + return model + + def inceptionv4(pretrained=False, **kwargs): + """ + InceptionV4 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `InceptionV4` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.InceptionV4(**kwargs) + + return model + + def googlenet(pretrained=False, **kwargs): + """ + GoogLeNet + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `GoogLeNet` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.GoogLeNet(**kwargs) + + return model + + def shufflenetv2_x0_25(pretrained=False, **kwargs): + """ + ShuffleNetV2_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ShuffleNetV2_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ShuffleNetV2_x0_25(**kwargs) + + return model + + def mobilenetv1(pretrained=False, **kwargs): + """ + MobileNetV1 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1(**kwargs) + + return model + + def mobilenetv1_x0_25(pretrained=False, **kwargs): + """ + MobileNetV1_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_25(**kwargs) + + return model + + def mobilenetv1_x0_5(pretrained=False, **kwargs): + """ + MobileNetV1_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_5(**kwargs) + + return model + + def mobilenetv1_x0_75(pretrained=False, **kwargs): + """ + MobileNetV1_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV1_x0_75(**kwargs) + + return model + + def mobilenetv2_x0_25(pretrained=False, **kwargs): + """ + MobileNetV2_x0_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_25(**kwargs) + + return model + + def mobilenetv2_x0_5(pretrained=False, **kwargs): + """ + MobileNetV2_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_5(**kwargs) + + return model + + def mobilenetv2_x0_75(pretrained=False, **kwargs): + """ + MobileNetV2_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x0_75(**kwargs) + + return model + + def mobilenetv2_x1_5(pretrained=False, **kwargs): + """ + MobileNetV2_x1_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x1_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x1_5(**kwargs) + + return model + + def mobilenetv2_x2_0(pretrained=False, **kwargs): + """ + MobileNetV2_x2_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV2_x2_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV2_x2_0(**kwargs) + + return model + + def mobilenetv3_large_x0_35(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_35 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_35(**kwargs) + + return model + + def mobilenetv3_large_x0_5(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_5(**kwargs) + + return model + + def mobilenetv3_large_x0_75(pretrained=False, **kwargs): + """ + MobileNetV3_large_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x0_75(**kwargs) + + return model + + def mobilenetv3_large_x1_0(pretrained=False, **kwargs): + """ + MobileNetV3_large_x1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x1_0(**kwargs) + + return model + + def mobilenetv3_large_x1_25(pretrained=False, **kwargs): + """ + MobileNetV3_large_x1_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_large_x1_25(**kwargs) + + return model + + def mobilenetv3_small_x0_35(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_35 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_35(**kwargs) + + return model + + def mobilenetv3_small_x0_5(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_5 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_5(**kwargs) + + return model + + def mobilenetv3_small_x0_75(pretrained=False, **kwargs): + """ + MobileNetV3_small_x0_75 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x0_75(**kwargs) + + return model + + def mobilenetv3_small_x1_0(pretrained=False, **kwargs): + """ + MobileNetV3_small_x1_0 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x1_0(**kwargs) + + return model + + def mobilenetv3_small_x1_25(pretrained=False, **kwargs): + """ + MobileNetV3_small_x1_25 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.MobileNetV3_small_x1_25(**kwargs) + + return model + + def resnext101_32x4d(pretrained=False, **kwargs): + """ + ResNeXt101_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt101_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt101_32x4d(**kwargs) + + return model + + def resnext101_64x4d(pretrained=False, **kwargs): + """ + ResNeXt101_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt101_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt101_64x4d(**kwargs) + + return model + + def resnext152_32x4d(pretrained=False, **kwargs): + """ + ResNeXt152_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt152_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt152_32x4d(**kwargs) + + return model + + def resnext152_64x4d(pretrained=False, **kwargs): + """ + ResNeXt152_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt152_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt152_64x4d(**kwargs) + + return model + + def resnext50_32x4d(pretrained=False, **kwargs): + """ + ResNeXt50_32x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_32x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt50_32x4d(**kwargs) + + return model + + def resnext50_64x4d(pretrained=False, **kwargs): + """ + ResNeXt50_64x4d + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.ResNeXt50_64x4d(**kwargs) + + return model + + def darknet53(pretrained=False, **kwargs): + """ + DarkNet53 + Args: + pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise. + kwargs: + class_dim: int=1000. Output dim of last fc layer. + Returns: + model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args. + """ + kwargs.update({'pretrained': pretrained}) + model = backbone.DarkNet53(**kwargs) + + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/__init__.py new file mode 100644 index 000000000..d6cdb6f8f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer + +from .arch import * +from .optimizer import * +from .data import * +from .utils import * diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/__init__.py new file mode 100644 index 000000000..2d5e29db8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/__init__.py @@ -0,0 +1,134 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import copy +import importlib + +import paddle.nn as nn +from paddle.jit import to_static +from paddle.static import InputSpec + +from . import backbone, gears +from .backbone import * +from .gears import build_gear +from .utils import * +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils import logger +from ppcls.utils.save_load import load_dygraph_pretrain +from ppcls.arch.slim import prune_model, quantize_model + +__all__ = ["build_model", "RecModel", "DistillationModel"] + + +def build_model(config): + arch_config = copy.deepcopy(config["Arch"]) + model_type = arch_config.pop("name") + mod = importlib.import_module(__name__) + arch = getattr(mod, model_type)(**arch_config) + if isinstance(arch, TheseusLayer): + prune_model(config, arch) + quantize_model(config, arch) + return arch + + +def apply_to_static(config, model): + support_to_static = config['Global'].get('to_static', False) + + if support_to_static: + specs = None + if 'image_shape' in config['Global']: + specs = [InputSpec([None] + config['Global']['image_shape'])] + model = to_static(model, input_spec=specs) + logger.info("Successfully to apply @to_static with specs: {}".format( + specs)) + return model + + +class RecModel(TheseusLayer): + def __init__(self, **config): + super().__init__() + backbone_config = config["Backbone"] + backbone_name = backbone_config.pop("name") + self.backbone = eval(backbone_name)(**backbone_config) + if "BackboneStopLayer" in config: + backbone_stop_layer = config["BackboneStopLayer"]["name"] + self.backbone.stop_after(backbone_stop_layer) + + if "Neck" in config: + self.neck = build_gear(config["Neck"]) + else: + self.neck = None + + if "Head" in config: + self.head = build_gear(config["Head"]) + else: + self.head = None + + def forward(self, x, label=None): + out = dict() + x = self.backbone(x) + out["backbone"] = x + if self.neck is not None: + x = self.neck(x) + out["neck"] = x + out["features"] = x + if self.head is not None: + y = self.head(x, label) + out["logits"] = y + return out + + +class DistillationModel(nn.Layer): + def __init__(self, + models=None, + pretrained_list=None, + freeze_params_list=None, + **kargs): + super().__init__() + assert isinstance(models, list) + self.model_list = [] + self.model_name_list = [] + if pretrained_list is not None: + assert len(pretrained_list) == len(models) + + if freeze_params_list is None: + freeze_params_list = [False] * len(models) + assert len(freeze_params_list) == len(models) + for idx, model_config in enumerate(models): + assert len(model_config) == 1 + key = list(model_config.keys())[0] + model_config = model_config[key] + model_name = model_config.pop("name") + model = eval(model_name)(**model_config) + + if freeze_params_list[idx]: + for param in model.parameters(): + param.trainable = False + self.model_list.append(self.add_sublayer(key, model)) + self.model_name_list.append(key) + + if pretrained_list is not None: + for idx, pretrained in enumerate(pretrained_list): + if pretrained is not None: + load_dygraph_pretrain( + self.model_name_list[idx], path=pretrained) + + def forward(self, x, label=None): + result_dict = dict() + for idx, model_name in enumerate(self.model_name_list): + if label is None: + result_dict[model_name] = self.model_list[idx](x) + else: + result_dict[model_name] = self.model_list[idx](x, label) + return result_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/__init__.py new file mode 100644 index 000000000..74d266414 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/__init__.py @@ -0,0 +1,34 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import inspect + +from ppcls.arch.backbone.legendary_models.resnet import ResNet50, ResNet50_vd +from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc + +# help whl get all the models' api (class type) and components' api (func type) +def get_apis(): + current_func = sys._getframe().f_code.co_name + current_module = sys.modules[__name__] + api = [] + for _, obj in inspect.getmembers(current_module, + inspect.isclass) + inspect.getmembers( + current_module, inspect.isfunction): + api.append(obj.__name__) + api.remove(current_func) + return api + + +__all__ = get_apis() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/theseus_layer.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/theseus_layer.py new file mode 100644 index 000000000..908d94445 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/base/theseus_layer.py @@ -0,0 +1,301 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, List, Dict, Union, Callable, Any + +from paddle import nn +from ppcls.utils import logger + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, inputs): + return inputs + + +class TheseusLayer(nn.Layer): + def __init__(self, *args, **kwargs): + super(TheseusLayer, self).__init__() + self.res_dict = {} + self.res_name = self.full_name() + self.pruner = None + self.quanter = None + + def _return_dict_hook(self, layer, input, output): + res_dict = {"output": output} + # 'list' is needed to avoid error raised by popping self.res_dict + for res_key in list(self.res_dict): + # clear the res_dict because the forward process may change according to input + res_dict[res_key] = self.res_dict.pop(res_key) + return res_dict + + def init_res(self, + stages_pattern, + return_patterns=None, + return_stages=None): + if return_patterns and return_stages: + msg = f"The 'return_patterns' would be ignored when 'return_stages' is set." + logger.warning(msg) + return_stages = None + + if return_stages is True: + return_patterns = stages_pattern + # return_stages is int or bool + if type(return_stages) is int: + return_stages = [return_stages] + if isinstance(return_stages, list): + if max(return_stages) > len(stages_pattern) or min( + return_stages) < 0: + msg = f"The 'return_stages' set error. Illegal value(s) have been ignored. The stages' pattern list is {stages_pattern}." + logger.warning(msg) + return_stages = [ + val for val in return_stages + if val >= 0 and val < len(stages_pattern) + ] + return_patterns = [stages_pattern[i] for i in return_stages] + + if return_patterns: + self.update_res(return_patterns) + + def replace_sub(self, *args, **kwargs) -> None: + msg = "The function 'replace_sub()' is deprecated, please use 'upgrade_sublayer()' instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) + + def upgrade_sublayer(self, + layer_name_pattern: Union[str, List[str]], + handle_func: Callable[[nn.Layer, str], nn.Layer] + ) -> Dict[str, nn.Layer]: + """use 'handle_func' to modify the sub-layer(s) specified by 'layer_name_pattern'. + + Args: + layer_name_pattern (Union[str, List[str]]): The name of layer to be modified by 'handle_func'. + handle_func (Callable[[nn.Layer, str], nn.Layer]): The function to modify target layer specified by 'layer_name_pattern'. The formal params are the layer(nn.Layer) and pattern(str) that is (a member of) layer_name_pattern (when layer_name_pattern is List type). And the return is the layer processed. + + Returns: + Dict[str, nn.Layer]: The key is the pattern and corresponding value is the result returned by 'handle_func()'. + + Examples: + + from paddle import nn + import paddleclas + + def rep_func(layer: nn.Layer, pattern: str): + new_layer = nn.Conv2D( + in_channels=layer._in_channels, + out_channels=layer._out_channels, + kernel_size=5, + padding=2 + ) + return new_layer + + net = paddleclas.MobileNetV1() + res = net.replace_sub(layer_name_pattern=["blocks[11].depthwise_conv.conv", "blocks[12].depthwise_conv.conv"], handle_func=rep_func) + print(res) + # {'blocks[11].depthwise_conv.conv': the corresponding new_layer, 'blocks[12].depthwise_conv.conv': the corresponding new_layer} + """ + + if not isinstance(layer_name_pattern, list): + layer_name_pattern = [layer_name_pattern] + + hit_layer_pattern_list = [] + for pattern in layer_name_pattern: + # parse pattern to find target layer and its parent + layer_list = parse_pattern_str(pattern=pattern, parent_layer=self) + if not layer_list: + continue + sub_layer_parent = layer_list[-2]["layer"] if len( + layer_list) > 1 else self + + sub_layer = layer_list[-1]["layer"] + sub_layer_name = layer_list[-1]["name"] + sub_layer_index = layer_list[-1]["index"] + + new_sub_layer = handle_func(sub_layer, pattern) + + if sub_layer_index: + getattr(sub_layer_parent, + sub_layer_name)[sub_layer_index] = new_sub_layer + else: + setattr(sub_layer_parent, sub_layer_name, new_sub_layer) + + hit_layer_pattern_list.append(pattern) + return hit_layer_pattern_list + + def stop_after(self, stop_layer_name: str) -> bool: + """stop forward and backward after 'stop_layer_name'. + + Args: + stop_layer_name (str): The name of layer that stop forward and backward after this layer. + + Returns: + bool: 'True' if successful, 'False' otherwise. + """ + + layer_list = parse_pattern_str(stop_layer_name, self) + if not layer_list: + return False + + parent_layer = self + for layer_dict in layer_list: + name, index = layer_dict["name"], layer_dict["index"] + if not set_identity(parent_layer, name, index): + msg = f"Failed to set the layers that after stop_layer_name('{stop_layer_name}') to IdentityLayer. The error layer's name is '{name}'." + logger.warning(msg) + return False + parent_layer = layer_dict["layer"] + + return True + + def update_res( + self, + return_patterns: Union[str, List[str]]) -> Dict[str, nn.Layer]: + """update the result(s) to be returned. + + Args: + return_patterns (Union[str, List[str]]): The name of layer to return output. + + Returns: + Dict[str, nn.Layer]: The pattern(str) and corresponding layer(nn.Layer) that have been set successfully. + """ + + # clear res_dict that could have been set + self.res_dict = {} + + class Handler(object): + def __init__(self, res_dict): + # res_dict is a reference + self.res_dict = res_dict + + def __call__(self, layer, pattern): + layer.res_dict = self.res_dict + layer.res_name = pattern + if hasattr(layer, "hook_remove_helper"): + layer.hook_remove_helper.remove() + layer.hook_remove_helper = layer.register_forward_post_hook( + save_sub_res_hook) + return layer + + handle_func = Handler(self.res_dict) + + hit_layer_pattern_list = self.upgrade_sublayer( + return_patterns, handle_func=handle_func) + + if hasattr(self, "hook_remove_helper"): + self.hook_remove_helper.remove() + self.hook_remove_helper = self.register_forward_post_hook( + self._return_dict_hook) + + return hit_layer_pattern_list + + +def save_sub_res_hook(layer, input, output): + layer.res_dict[layer.res_name] = output + + +def set_identity(parent_layer: nn.Layer, + layer_name: str, + layer_index: str=None) -> bool: + """set the layer specified by layer_name and layer_index to Indentity. + + Args: + parent_layer (nn.Layer): The parent layer of target layer specified by layer_name and layer_index. + layer_name (str): The name of target layer to be set to Indentity. + layer_index (str, optional): The index of target layer to be set to Indentity in parent_layer. Defaults to None. + + Returns: + bool: True if successfully, False otherwise. + """ + + stop_after = False + for sub_layer_name in parent_layer._sub_layers: + if stop_after: + parent_layer._sub_layers[sub_layer_name] = Identity() + continue + if sub_layer_name == layer_name: + stop_after = True + + if layer_index and stop_after: + stop_after = False + for sub_layer_index in parent_layer._sub_layers[ + layer_name]._sub_layers: + if stop_after: + parent_layer._sub_layers[layer_name][ + sub_layer_index] = Identity() + continue + if layer_index == sub_layer_index: + stop_after = True + + return stop_after + + +def parse_pattern_str(pattern: str, parent_layer: nn.Layer) -> Union[ + None, List[Dict[str, Union[nn.Layer, str, None]]]]: + """parse the string type pattern. + + Args: + pattern (str): The pattern to discribe layer. + parent_layer (nn.Layer): The root layer relative to the pattern. + + Returns: + Union[None, List[Dict[str, Union[nn.Layer, str, None]]]]: None if failed. If successfully, the members are layers parsed in order: + [ + {"layer": first layer, "name": first layer's name parsed, "index": first layer's index parsed if exist}, + {"layer": second layer, "name": second layer's name parsed, "index": second layer's index parsed if exist}, + ... + ] + """ + + pattern_list = pattern.split(".") + if not pattern_list: + msg = f"The pattern('{pattern}') is illegal. Please check and retry." + logger.warning(msg) + return None + + layer_list = [] + while len(pattern_list) > 0: + if '[' in pattern_list[0]: + target_layer_name = pattern_list[0].split('[')[0] + target_layer_index = pattern_list[0].split('[')[1].split(']')[0] + else: + target_layer_name = pattern_list[0] + target_layer_index = None + + target_layer = getattr(parent_layer, target_layer_name, None) + + if target_layer is None: + msg = f"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}')." + logger.warning(msg) + return None + + if target_layer_index and target_layer: + if int(target_layer_index) < 0 or int(target_layer_index) >= len( + target_layer): + msg = f"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0." + logger.warning(msg) + return None + + target_layer = target_layer[target_layer_index] + + layer_list.append({ + "layer": target_layer, + "name": target_layer_name, + "index": target_layer_index + }) + + pattern_list = pattern_list[1:] + parent_layer = target_layer + return layer_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/__init__.py new file mode 100644 index 000000000..550e5544c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/__init__.py @@ -0,0 +1,8 @@ +from .resnet import ResNet50 + +# from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd +# from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W64_C +# from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +# from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +# from .inception_v3 import InceptionV3 +# from .vgg import VGG11, VGG13, VGG16, VGG19 \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/resnet.py new file mode 100644 index 000000000..74c5c5fa6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/legendary_models/resnet.py @@ -0,0 +1,591 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = { + "ResNet18": ["blocks[1]", "blocks[3]", "blocks[5]", "blocks[7]"], + "ResNet34": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet50": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet101": ["blocks[2]", "blocks[6]", "blocks[29]", "blocks[32]"], + "ResNet152": ["blocks[2]", "blocks[10]", "blocks[46]", "blocks[49]"], + "ResNet200": ["blocks[2]", "blocks[14]", "blocks[62]", "blocks[65]"] +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None, + return_stages=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["200"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet200"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/resnet_vc.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/resnet_vc.py new file mode 100644 index 000000000..6b972dc7b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/model_zoo/resnet_vc.py @@ -0,0 +1,309 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet50_vc": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vc_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + return y + + +class ResNet_vc(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(ResNet_vc, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet50_vc(pretrained=False, use_ssld=False, **kwargs): + model = ResNet_vc(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNet50_vc"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/__init__.py new file mode 100644 index 000000000..ae9549246 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/__init__.py @@ -0,0 +1,3 @@ +from .resnet_variant import ResNet50_last_stage_stride1 +# from .vgg_variant import VGG19Sigmoid +# from .pp_lcnet_variant import PPLCNet_x2_5_Tanh \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/resnet_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/resnet_variant.py new file mode 100644 index 000000000..0219344b1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/backbone/variant_models/resnet_variant.py @@ -0,0 +1,23 @@ +from paddle.nn import Conv2D +from ppcls.arch.backbone.legendary_models.resnet import ResNet50, MODEL_URLS, _load_pretrained + +__all__ = ["ResNet50_last_stage_stride1"] + + +def ResNet50_last_stage_stride1(pretrained=False, use_ssld=False, **kwargs): + def replace_function(conv, pattern): + new_conv = Conv2D( + in_channels=conv._in_channels, + out_channels=conv._out_channels, + kernel_size=conv._kernel_size, + stride=1, + padding=conv._padding, + groups=conv._groups, + bias_attr=conv._bias_attr) + return new_conv + + pattern = ["blocks[13].conv1.conv", "blocks[13].short.conv"] + model = ResNet50(pretrained=False, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/__init__.py new file mode 100644 index 000000000..75ca41d8a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .arcmargin import ArcMargin +from .cosmargin import CosMargin +from .circlemargin import CircleMargin +from .fc import FC +from .vehicle_neck import VehicleNeck + +__all__ = ['build_gear'] + + +def build_gear(config): + support_dict = [ + 'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck' + ] + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'head only support {}'.format(support_dict)) + module_class = eval(module_name)(**config) + return module_class diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/arcmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/arcmargin.py new file mode 100644 index 000000000..22cc76e1d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/arcmargin.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import math + + +class ArcMargin(nn.Layer): + def __init__(self, + embedding_size, + class_num, + margin=0.5, + scale=80.0, + easy_margin=False): + super().__init__() + self.embedding_size = embedding_size + self.class_num = class_num + self.margin = margin + self.scale = scale + self.easy_margin = easy_margin + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label=None): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6) + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + phi = cos * cos_m - sin * sin_m + + th = math.cos(self.margin) * (-1) + mm = math.sin(self.margin) * self.margin + if self.easy_margin: + phi = self._paddle_where_more_than(cos, 0, phi, cos) + else: + phi = self._paddle_where_more_than(cos, th, phi, cos - mm) + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, phi) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output + + def _paddle_where_more_than(self, target, limit, x, y): + mask = paddle.cast(x=(target > limit), dtype='float32') + output = paddle.multiply(mask, x) + paddle.multiply((1.0 - mask), y) + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/circlemargin.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/circlemargin.py new file mode 100644 index 000000000..d1bce83cb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/circlemargin.py @@ -0,0 +1,59 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CircleMargin(nn.Layer): + def __init__(self, embedding_size, class_num, margin, scale): + super(CircleMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + feat_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, feat_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + logits = paddle.matmul(input, weight) + if not self.training or label is None: + return logits + + alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.) + alpha_n = paddle.clip(logits.detach() + self.margin, min=0.) + delta_p = 1 - self.margin + delta_n = self.margin + + m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1]) + + logits_p = alpha_p * (logits - delta_p) + logits_n = alpha_n * (logits - delta_n) + pre_logits = logits_p * m_hot + logits_n * (1 - m_hot) + pre_logits = self.scale * pre_logits + + return pre_logits diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/cosmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/cosmargin.py new file mode 100644 index 000000000..578b64c2b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/cosmargin.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import math +import paddle.nn as nn + + +class CosMargin(paddle.nn.Layer): + def __init__(self, embedding_size, class_num, margin=0.35, scale=64.0): + super(CosMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + label.stop_gradient = True + + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + + cos_m = cos - self.margin + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, cos_m) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/fc.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/fc.py new file mode 100644 index 000000000..b32474195 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/fc.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + + +class FC(nn.Layer): + def __init__(self, embedding_size, class_num): + super(FC, self).__init__() + self.embedding_size = embedding_size + self.class_num = class_num + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierNormal()) + self.fc = paddle.nn.Linear( + self.embedding_size, self.class_num, weight_attr=weight_attr) + + def forward(self, input, label=None): + out = self.fc(input) + return out diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/identity_head.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/identity_head.py new file mode 100644 index 000000000..7d11e5742 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/identity_head.py @@ -0,0 +1,9 @@ +from paddle import nn + + +class IdentityHead(nn.Layer): + def __init__(self): + super(IdentityHead, self).__init__() + + def forward(self, x, label=None): + return {"features": x, "logits": None} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/vehicle_neck.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/vehicle_neck.py new file mode 100644 index 000000000..05f4e333f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/gears/vehicle_neck.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn + + +class VehicleNeck(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format='NCHW'): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=weight_attr, + data_format=data_format) + self.flatten = nn.Flatten() + + def forward(self, x): + x = self.conv(x) + x = self.flatten(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/__init__.py new file mode 100644 index 000000000..3733059ce --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.arch.slim.prune import prune_model +from ppcls.arch.slim.quant import quantize_model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/prune.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/prune.py new file mode 100644 index 000000000..c0c9d220b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/prune.py @@ -0,0 +1,65 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + + +def prune_model(config, model): + if config.get("Slim", False) and config["Slim"].get("prune", False): + import paddleslim + prune_method_name = config["Slim"]["prune"]["name"].lower() + assert prune_method_name in [ + "fpgm", "l1_norm" + ], "The prune methods only support 'fpgm' and 'l1_norm'" + if prune_method_name == "fpgm": + model.pruner = paddleslim.dygraph.FPGMFilterPruner( + model, [1] + config["Global"]["image_shape"]) + else: + model.pruner = paddleslim.dygraph.L1NormFilterPruner( + model, [1] + config["Global"]["image_shape"]) + + # prune model + _prune_model(config, model) + else: + model.pruner = None + + + +def _prune_model(config, model): + from paddleslim.analysis import dygraph_flops as flops + logger.info("FLOPs before pruning: {}GFLOPs".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9)) + model.eval() + + params = [] + for sublayer in model.sublayers(): + for param in sublayer.parameters(include_sublayers=False): + if isinstance(sublayer, paddle.nn.Conv2D): + params.append(param.name) + ratios = {} + for param in params: + ratios[param] = config["Slim"]["prune"]["pruned_ratio"] + plan = model.pruner.prune_vars(ratios, [0]) + + logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9, + plan.pruned_flops)) + + for param in model.parameters(): + if "conv2d" in param.name: + logger.info("{}\t{}".format(param.name, param.shape)) + + model.train() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/quant.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/quant.py new file mode 100644 index 000000000..b8f59a78f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/slim/quant.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + +QUANT_CONFIG = { + # weight preprocess type, default is None and no preprocessing is performed. + 'weight_preprocess_type': None, + # activation preprocess type, default is None and no preprocessing is performed. + 'activation_preprocess_type': None, + # weight quantize type, default is 'channel_wise_abs_max' + 'weight_quantize_type': 'channel_wise_abs_max', + # activation quantize type, default is 'moving_average_abs_max' + 'activation_quantize_type': 'moving_average_abs_max', + # weight quantize bit num, default is 8 + 'weight_bits': 8, + # activation quantize bit num, default is 8 + 'activation_bits': 8, + # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8' + 'dtype': 'int8', + # window size for 'range_abs_max' quantization. default is 10000 + 'window_size': 10000, + # The decay coefficient of moving average, default is 0.9 + 'moving_rate': 0.9, + # for dygraph quantization, layers of type in quantizable_layer_type will be quantized + 'quantizable_layer_type': ['Conv2D', 'Linear'], +} + + +def quantize_model(config, model): + if config.get("Slim", False) and config["Slim"].get("quant", False): + from paddleslim.dygraph.quant import QAT + assert config["Slim"]["quant"]["name"].lower( + ) == 'pact', 'Only PACT quantization method is supported now' + QUANT_CONFIG["activation_preprocess_type"] = "PACT" + model.quanter = QAT(config=QUANT_CONFIG) + model.quanter.quantize(model) + logger.info("QAT model summary:") + paddle.summary(model, (1, 3, 224, 224)) + else: + model.quanter = None + return diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/arch/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls/arch/utils.py new file mode 100644 index 000000000..308475d7d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/arch/utils.py @@ -0,0 +1,53 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import types +from difflib import SequenceMatcher + +from . import backbone + + +def get_architectures(): + """ + get all of model architectures + """ + names = [] + for k, v in backbone.__dict__.items(): + if isinstance(v, (types.FunctionType, six.class_types)): + names.append(k) + return names + + +def get_blacklist_model_in_static_mode(): + from ppcls.arch.backbone import distilled_vision_transformer + from ppcls.arch.backbone import vision_transformer + blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__ + return blacklist + + +def similar_architectures(name='', names=[], thresh=0.1, topk=10): + """ + inferred similar architectures + """ + scores = [] + for idx, n in enumerate(names): + if n.startswith('__'): + continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: + scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml new file mode 100644 index 000000000..ab4c29c30 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml new file mode 100644 index 000000000..d75fede9e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml new file mode 100644 index 000000000..2fefb9f4b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - CutmixOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml new file mode 100644 index 000000000..4bf530664 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - Cutout: + n_holes: 1 + length: 112 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml new file mode 100644 index 000000000..c0016aa00 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - GridMask: + d1: 96 + d2: 224 + rotate: 1 + ratio: 0.5 + mode: 0 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml new file mode 100644 index 000000000..12e4ac8db --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - HideAndSeek: + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml new file mode 100644 index 000000000..3434cab5a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml new file mode 100644 index 000000000..153451e13 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - RandAugment: + num_layers: 2 + magnitude: 5 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml new file mode 100644 index 000000000..8e89c5ca1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50.yaml new file mode 100644 index 000000000..c2da23fb3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml new file mode 100644 index 000000000..2d6de3b21 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 90 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: False + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.875 + multi_precision: True + lr: + name: Cosine + warmup_epoch: 8 + learning_rate: 8.192 + regularizer: + name: 'L2' + coeff: 2.5e-05 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml new file mode 100644 index 000000000..be7b2d9db --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/configs/quick_start/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls/configs/quick_start/ResNet50_vd.yaml new file mode 100644 index 000000000..30d745599 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/configs/quick_start/ResNet50_vd.yaml @@ -0,0 +1,107 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 5 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./data/datasets/flowers102/ + cls_label_path: ./data/datasets/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./data/datasets/flowers102/ + cls_label_path: ./data/datasets/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/engine.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/engine.py new file mode 100644 index 000000000..019cf1650 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/engine.py @@ -0,0 +1,468 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import platform +import paddle +import paddle.distributed as dist +from visualdl import LogWriter +from paddle import nn +import numpy as np +import random + +from ppcls.utils.check import check_gpu +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.utils.config import print_config +from ppcls.data import build_dataloader +from ppcls.arch import build_model, RecModel, DistillationModel, TheseusLayer +from ppcls.arch import apply_to_static +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url +from ppcls.utils.save_load import init_model +from ppcls.utils import save_load + +from ppcls.data.utils.get_image_list import get_image_list +from ppcls.data.postprocess import build_postprocess +from ppcls.data import create_operators +from ppcls.engine.train import train_epoch +from ppcls.engine import evaluation +from ppcls.arch.gears.identity_head import IdentityHead + + +class Engine(object): + def __init__(self, config, mode="train"): + assert mode in ["train", "eval", "infer", "export"] + self.mode = mode + self.config = config + self.eval_mode = self.config["Global"].get("eval_mode", + "classification") + if "Head" in self.config["Arch"] or self.config["Arch"].get("is_rec", + False): + self.is_rec = True + else: + self.is_rec = False + + # set seed + seed = self.config["Global"].get("seed", False) + if seed or seed == 0: + assert isinstance(seed, int), "The 'seed' must be a integer!" + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + # init logger + self.output_dir = self.config['Global']['output_dir'] + log_file = os.path.join(self.output_dir, self.config["Arch"]["name"], + f"{mode}.log") + init_logger(log_file=log_file) + print_config(config) + + # init train_func and eval_func + assert self.eval_mode in ["classification", "retrieval"], logger.error( + "Invalid eval mode: {}".format(self.eval_mode)) + self.train_epoch_func = train_epoch + self.eval_func = getattr(evaluation, self.eval_mode + "_eval") + + self.use_dali = self.config['Global'].get("use_dali", False) + + # for visualdl + self.vdl_writer = None + if self.config['Global'][ + 'use_visualdl'] and mode == "train" and dist.get_rank() == 0: + vdl_writer_path = os.path.join(self.output_dir, "vdl") + if not os.path.exists(vdl_writer_path): + os.makedirs(vdl_writer_path) + self.vdl_writer = LogWriter(logdir=vdl_writer_path) + + # set device + assert self.config["Global"]["device"] in ["cpu", "gpu", "xpu", "npu", "mlu"] + self.device = paddle.set_device(self.config["Global"]["device"]) + logger.info('train with paddle {} and device {}'.format( + paddle.__version__, self.device)) + + # AMP training + self.amp = True if "AMP" in self.config and self.mode == "train" else False + if self.amp and self.config["AMP"] is not None: + self.scale_loss = self.config["AMP"].get("scale_loss", 1.0) + self.use_dynamic_loss_scaling = self.config["AMP"].get( + "use_dynamic_loss_scaling", False) + else: + self.scale_loss = 1.0 + self.use_dynamic_loss_scaling = False + if self.amp: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_max_inplace_grad_add': 8, + } + if paddle.is_compiled_with_cuda(): + AMP_RELATED_FLAGS_SETTING.update({ + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1 + }) + paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + + if "class_num" in config["Global"]: + global_class_num = config["Global"]["class_num"] + if "class_num" not in config["Arch"]: + config["Arch"]["class_num"] = global_class_num + msg = f"The Global.class_num will be deprecated. Please use Arch.class_num instead. Arch.class_num has been set to {global_class_num}." + else: + msg = "The Global.class_num will be deprecated. Please use Arch.class_num instead. The Global.class_num has been ignored." + logger.warning(msg) + #TODO(gaotingquan): support rec + class_num = config["Arch"].get("class_num", None) + self.config["DataLoader"].update({"class_num": class_num}) + # build dataloader + if self.mode == 'train': + self.train_dataloader = build_dataloader( + self.config["DataLoader"], "Train", self.device, self.use_dali) + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + if self.eval_mode == "classification": + self.eval_dataloader = build_dataloader( + self.config["DataLoader"], "Eval", self.device, + self.use_dali) + elif self.eval_mode == "retrieval": + self.gallery_query_dataloader = None + if len(self.config["DataLoader"]["Eval"].keys()) == 1: + key = list(self.config["DataLoader"]["Eval"].keys())[0] + self.gallery_query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], key, self.device, + self.use_dali) + else: + self.gallery_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Gallery", + self.device, self.use_dali) + self.query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Query", + self.device, self.use_dali) + + # build loss + if self.mode == "train": + loss_info = self.config["Loss"]["Train"] + self.train_loss_func = build_loss(loss_info) + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + loss_config = self.config.get("Loss", None) + if loss_config is not None: + loss_config = loss_config.get("Eval") + if loss_config is not None: + self.eval_loss_func = build_loss(loss_config) + else: + self.eval_loss_func = None + else: + self.eval_loss_func = None + + # build metric + if self.mode == 'train': + metric_config = self.config.get("Metric") + if metric_config is not None: + metric_config = metric_config.get("Train") + if metric_config is not None: + if hasattr( + self.train_dataloader, "collate_fn" + ) and self.train_dataloader.collate_fn is not None: + for m_idx, m in enumerate(metric_config): + if "TopkAcc" in m: + msg = f"'TopkAcc' metric can not be used when setting 'batch_transform_ops' in config. The 'TopkAcc' metric has been removed." + logger.warning(msg) + break + metric_config.pop(m_idx) + self.train_metric_func = build_metrics(metric_config) + else: + self.train_metric_func = None + else: + self.train_metric_func = None + + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + metric_config = self.config.get("Metric") + if self.eval_mode == "classification": + if metric_config is not None: + metric_config = metric_config.get("Eval") + if metric_config is not None: + self.eval_metric_func = build_metrics(metric_config) + elif self.eval_mode == "retrieval": + if metric_config is None: + metric_config = [{"name": "Recallk", "topk": (1, 5)}] + else: + metric_config = metric_config["Eval"] + self.eval_metric_func = build_metrics(metric_config) + else: + self.eval_metric_func = None + + # build model + self.model = build_model(self.config) + # set @to_static for benchmark, skip this by default. + apply_to_static(self.config, self.model) + + # load_pretrain + if self.config["Global"]["pretrained_model"] is not None: + if self.config["Global"]["pretrained_model"].startswith("http"): + load_dygraph_pretrain_from_url( + self.model, self.config["Global"]["pretrained_model"]) + else: + load_dygraph_pretrain( + self.model, self.config["Global"]["pretrained_model"]) + + # build optimizer + if self.mode == 'train': + self.optimizer, self.lr_sch = build_optimizer( + self.config["Optimizer"], self.config["Global"]["epochs"], + len(self.train_dataloader), [self.model]) + + # for amp training + if self.amp: + self.scaler = paddle.amp.GradScaler( + init_loss_scaling=self.scale_loss, + use_dynamic_loss_scaling=self.use_dynamic_loss_scaling) + amp_level = self.config['AMP'].get("level", "O1") + if amp_level not in ["O1", "O2"]: + msg = "[Parameter Error]: The optimize level of AMP only support 'O1' and 'O2'. The level has been set 'O1'." + logger.warning(msg) + self.config['AMP']["level"] = "O1" + amp_level = "O1" + self.model, self.optimizer = paddle.amp.decorate( + models=self.model, + optimizers=self.optimizer, + level=amp_level, + save_dtype='float32') + + # for distributed + world_size = dist.get_world_size() + self.config["Global"]["distributed"] = world_size != 1 + if world_size != 4 and self.mode == "train": + msg = f"The training strategy in config files provided by PaddleClas is based on 4 gpus. But the number of gpus is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use config files in PaddleClas to train." + logger.warning(msg) + if self.config["Global"]["distributed"]: + dist.init_parallel_env() + self.model = paddle.DataParallel(self.model) + + # build postprocess for infer + if self.mode == 'infer': + self.preprocess_func = create_operators(self.config["Infer"][ + "transforms"]) + self.postprocess_func = build_postprocess(self.config["Infer"][ + "PostProcess"]) + + def train(self): + assert self.mode == "train" + print_batch_step = self.config['Global']['print_batch_step'] + save_interval = self.config["Global"]["save_interval"] + best_metric = { + "metric": 0.0, + "epoch": 0, + } + # key: + # val: metrics list word + self.output_info = dict() + self.time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + # global iter counter + self.global_step = 0 + + if self.config["Global"]["checkpoints"] is not None: + metric_info = init_model(self.config["Global"], self.model, + self.optimizer) + if metric_info is not None: + best_metric.update(metric_info) + + self.max_iter = len(self.train_dataloader) - 1 if platform.system( + ) == "Windows" else len(self.train_dataloader) + for epoch_id in range(best_metric["epoch"] + 1, + self.config["Global"]["epochs"] + 1): + acc = 0.0 + # for one epoch train + self.train_epoch_func(self, epoch_id, print_batch_step) + + if self.use_dali: + self.train_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, self.output_info[key].avg) + for key in self.output_info + ]) + logger.info("[Train][Epoch {}/{}][Avg]{}".format( + epoch_id, self.config["Global"]["epochs"], metric_msg)) + self.output_info.clear() + + # eval model and save model if possible + if self.config["Global"][ + "eval_during_train"] and epoch_id % self.config["Global"][ + "eval_interval"] == 0: + acc = self.eval(epoch_id) + if acc > best_metric["metric"]: + best_metric["metric"] = acc + best_metric["epoch"] = epoch_id + save_load.save_model( + self.model, + self.optimizer, + best_metric, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="best_model") + logger.info("[Eval][Epoch {}][best metric: {}]".format( + epoch_id, best_metric["metric"])) + logger.scaler( + name="eval_acc", + value=acc, + step=epoch_id, + writer=self.vdl_writer) + + self.model.train() + + # save model + if epoch_id % save_interval == 0: + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="epoch_{}".format(epoch_id)) + # save the latest model + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="latest") + + if self.vdl_writer is not None: + self.vdl_writer.close() + + @paddle.no_grad() + def eval(self, epoch_id=0): + assert self.mode in ["train", "eval"] + self.model.eval() + eval_result = self.eval_func(self, epoch_id) + self.model.train() + return eval_result + + @paddle.no_grad() + def infer(self): + assert self.mode == "infer" and self.eval_mode == "classification" + total_trainer = dist.get_world_size() + local_rank = dist.get_rank() + image_list = get_image_list(self.config["Infer"]["infer_imgs"]) + # data split + image_list = image_list[local_rank::total_trainer] + + batch_size = self.config["Infer"]["batch_size"] + self.model.eval() + batch_data = [] + image_file_list = [] + for idx, image_file in enumerate(image_list): + with open(image_file, 'rb') as f: + x = f.read() + for process in self.preprocess_func: + x = process(x) + batch_data.append(x) + image_file_list.append(image_file) + if len(batch_data) >= batch_size or idx == len(image_list) - 1: + batch_tensor = paddle.to_tensor(batch_data) + out = self.model(batch_tensor) + if isinstance(out, list): + out = out[0] + if isinstance(out, dict) and "logits" in out: + out = out["logits"] + if isinstance(out, dict) and "output" in out: + out = out["output"] + result = self.postprocess_func(out, image_file_list) + print(result) + batch_data.clear() + image_file_list.clear() + + def export(self): + assert self.mode == "export" + use_multilabel = self.config["Global"].get("use_multilabel", False) + model = ExportModel(self.config["Arch"], self.model, use_multilabel) + if self.config["Global"]["pretrained_model"] is not None: + load_dygraph_pretrain(model.base_model, + self.config["Global"]["pretrained_model"]) + + model.eval() + save_path = os.path.join(self.config["Global"]["save_inference_dir"], + "inference") + if model.quanter: + model.quanter.save_quantized_model( + model.base_model, + save_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + else: + model = paddle.jit.to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + paddle.jit.save(model, save_path) + + +class ExportModel(TheseusLayer): + """ + ExportModel: add softmax onto the model + """ + + def __init__(self, config, model, use_multilabel): + super().__init__() + self.base_model = model + # we should choose a final model to export + if isinstance(self.base_model, DistillationModel): + self.infer_model_name = config["infer_model_name"] + else: + self.infer_model_name = None + + self.infer_output_key = config.get("infer_output_key", None) + if self.infer_output_key == "features" and isinstance(self.base_model, + RecModel): + self.base_model.head = IdentityHead() + if use_multilabel: + self.out_act = nn.Sigmoid() + else: + if config.get("infer_add_softmax", True): + self.out_act = nn.Softmax(axis=-1) + else: + self.out_act = None + + def eval(self): + self.training = False + for layer in self.sublayers(): + layer.training = False + layer.eval() + + def forward(self, x): + x = self.base_model(x) + if isinstance(x, list): + x = x[0] + if self.infer_model_name is not None: + x = x[self.infer_model_name] + if self.infer_output_key is not None: + x = x[self.infer_output_key] + if self.out_act is not None: + x = self.out_act(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/__init__.py new file mode 100644 index 000000000..e0cd77888 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.engine.evaluation.classification import classification_eval +from ppcls.engine.evaluation.retrieval import retrieval_eval diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/classification.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/classification.py new file mode 100644 index 000000000..79fb1d692 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/classification.py @@ -0,0 +1,174 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import platform +import paddle + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def classification_eval(engine, epoch_id=0): + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + metric_key = None + tic = time.time() + accum_samples = 0 + total_samples = len( + engine.eval_dataloader. + dataset) if not engine.use_dali else engine.eval_dataloader.size + max_iter = len(engine.eval_dataloader) - 1 if platform.system( + ) == "Windows" else len(engine.eval_dataloader) + for iter_id, batch in enumerate(engine.eval_dataloader): + if iter_id >= max_iter: + break + if iter_id == 5: + for key in time_info: + time_info[key].reset() + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + time_info["reader_cost"].update(time.time() - tic) + batch_size = batch[0].shape[0] + batch[0] = paddle.to_tensor(batch[0]).astype("float32") + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + + # image input + if engine.amp: + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + out = engine.model(batch[0]) + else: + out = engine.model(batch[0]) + + # just for DistributedBatchSampler issue: repeat sampling + current_samples = batch_size * paddle.distributed.get_world_size() + accum_samples += current_samples + + # gather Tensor when distributed + if paddle.distributed.get_world_size() > 1: + label_list = [] + paddle.distributed.all_gather(label_list, batch[1]) + labels = paddle.concat(label_list, 0) + + if isinstance(out, dict): + if "Student" in out: + out = out["Student"] + if isinstance(out, dict): + out = out["logits"] + elif "logits" in out: + out = out["logits"] + else: + msg = "Error: Wrong key in out!" + raise Exception(msg) + if isinstance(out, list): + preds = [] + for x in out: + pred_list = [] + paddle.distributed.all_gather(pred_list, x) + pred_x = paddle.concat(pred_list, 0) + preds.append(pred_x) + else: + pred_list = [] + paddle.distributed.all_gather(pred_list, out) + preds = paddle.concat(pred_list, 0) + + if accum_samples > total_samples and not engine.use_dali: + preds = preds[:total_samples + current_samples - accum_samples] + labels = labels[:total_samples + current_samples - + accum_samples] + current_samples = total_samples + current_samples - accum_samples + else: + labels = batch[1] + preds = out + + # calc loss + if engine.eval_loss_func is not None: + if engine.amp and engine.config["AMP"].get("use_fp16_test", False): + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + loss_dict = engine.eval_loss_func(preds, labels) + else: + loss_dict = engine.eval_loss_func(preds, labels) + + for key in loss_dict: + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + output_info[key].update(loss_dict[key].numpy()[0], + current_samples) + # calc metric + if engine.eval_metric_func is not None: + metric_dict = engine.eval_metric_func(preds, labels) + for key in metric_dict: + if metric_key is None: + metric_key = key + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + + output_info[key].update(metric_dict[key].numpy()[0], + current_samples) + + time_info["batch_cost"].update(time.time() - tic) + + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + if engine.use_dali: + engine.eval_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return output_info[metric_key].avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/retrieval.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/retrieval.py new file mode 100644 index 000000000..b481efae1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/evaluation/retrieval.py @@ -0,0 +1,179 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import platform +import paddle +from ppcls.utils import logger + + +def retrieval_eval(engine, epoch_id=0): + engine.model.eval() + # step1. build gallery + if engine.gallery_query_dataloader is not None: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery_query') + query_feas, query_img_id, query_query_id = gallery_feas, gallery_img_id, gallery_unique_id + else: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery') + query_feas, query_img_id, query_query_id = cal_feature( + engine, name='query') + + # step2. do evaluation + sim_block_size = engine.config["Global"].get("sim_block_size", 64) + sections = [sim_block_size] * (len(query_feas) // sim_block_size) + if len(query_feas) % sim_block_size: + sections.append(len(query_feas) % sim_block_size) + fea_blocks = paddle.split(query_feas, num_or_sections=sections) + if query_query_id is not None: + query_id_blocks = paddle.split( + query_query_id, num_or_sections=sections) + image_id_blocks = paddle.split(query_img_id, num_or_sections=sections) + metric_key = None + + if engine.eval_loss_func is None: + metric_dict = {metric_key: 0.} + else: + metric_dict = dict() + for block_idx, block_fea in enumerate(fea_blocks): + similarity_matrix = paddle.matmul( + block_fea, gallery_feas, transpose_y=True) + if query_query_id is not None: + query_id_block = query_id_blocks[block_idx] + query_id_mask = (query_id_block != gallery_unique_id.t()) + + image_id_block = image_id_blocks[block_idx] + image_id_mask = (image_id_block != gallery_img_id.t()) + + keep_mask = paddle.logical_or(query_id_mask, image_id_mask) + similarity_matrix = similarity_matrix * keep_mask.astype( + "float32") + else: + keep_mask = None + + metric_tmp = engine.eval_metric_func(similarity_matrix, + image_id_blocks[block_idx], + gallery_img_id, keep_mask) + + for key in metric_tmp: + if key not in metric_dict: + metric_dict[key] = metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + else: + metric_dict[key] += metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + + metric_info_list = [] + for key in metric_dict: + if metric_key is None: + metric_key = key + metric_info_list.append("{}: {:.5f}".format(key, metric_dict[key])) + metric_msg = ", ".join(metric_info_list) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + return metric_dict[metric_key] + + +def cal_feature(engine, name='gallery'): + has_unique_id = False + all_unique_id = None + + if name == 'gallery': + dataloader = engine.gallery_dataloader + elif name == 'query': + dataloader = engine.query_dataloader + elif name == 'gallery_query': + dataloader = engine.gallery_query_dataloader + else: + raise RuntimeError("Only support gallery or query dataset") + + batch_feas_list = [] + img_id_list = [] + unique_id_list = [] + max_iter = len(dataloader) - 1 if platform.system() == "Windows" else len( + dataloader) + for idx, batch in enumerate(dataloader): # load is very time-consuming + if idx >= max_iter: + break + if idx % engine.config["Global"]["print_batch_step"] == 0: + logger.info( + f"{name} feature calculation process: [{idx}/{len(dataloader)}]" + ) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch = [paddle.to_tensor(x) for x in batch] + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + if len(batch) == 3: + has_unique_id = True + batch[2] = batch[2].reshape([-1, 1]).astype("int64") + out = engine.model(batch[0], batch[1]) + if "Student" in out: + out = out["Student"] + batch_feas = out["features"] + + # do norm + if engine.config["Global"].get("feature_normalize", True): + feas_norm = paddle.sqrt( + paddle.sum(paddle.square(batch_feas), axis=1, keepdim=True)) + batch_feas = paddle.divide(batch_feas, feas_norm) + + # do binarize + if engine.config["Global"].get("feature_binarize") == "round": + batch_feas = paddle.round(batch_feas).astype("float32") * 2.0 - 1.0 + + if engine.config["Global"].get("feature_binarize") == "sign": + batch_feas = paddle.sign(batch_feas).astype("float32") + + if paddle.distributed.get_world_size() > 1: + batch_feas_gather = [] + img_id_gather = [] + unique_id_gather = [] + paddle.distributed.all_gather(batch_feas_gather, batch_feas) + paddle.distributed.all_gather(img_id_gather, batch[1]) + batch_feas_list.append(paddle.concat(batch_feas_gather)) + img_id_list.append(paddle.concat(img_id_gather)) + if has_unique_id: + paddle.distributed.all_gather(unique_id_gather, batch[2]) + unique_id_list.append(paddle.concat(unique_id_gather)) + else: + batch_feas_list.append(batch_feas) + img_id_list.append(batch[1]) + if has_unique_id: + unique_id_list.append(batch[2]) + + if engine.use_dali: + dataloader.reset() + + all_feas = paddle.concat(batch_feas_list) + all_img_id = paddle.concat(img_id_list) + if has_unique_id: + all_unique_id = paddle.concat(unique_id_list) + + # just for DistributedBatchSampler issue: repeat sampling + total_samples = len( + dataloader.dataset) if not engine.use_dali else dataloader.size + all_feas = all_feas[:total_samples] + all_img_id = all_img_id[:total_samples] + if has_unique_id: + all_unique_id = all_unique_id[:total_samples] + + logger.info("Build {} done, all feat shape: {}, begin to eval..".format( + name, all_feas.shape)) + return all_feas, all_img_id, all_unique_id diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/__init__.py new file mode 100644 index 000000000..800d3a41e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ppcls.engine.train.train import train_epoch diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/train.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/train.py new file mode 100644 index 000000000..b15c1088a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/train.py @@ -0,0 +1,82 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import time +import paddle +from ppcls.engine.train.utils import update_loss, update_metric, log_info +from ppcls.utils import profiler + + +def train_epoch(engine, epoch_id, print_batch_step): + tic = time.time() + for iter_id, batch in enumerate(engine.train_dataloader): + if iter_id >= engine.max_iter: + break + profiler.add_profiler_step(engine.config["profiler_options"]) + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + engine.time_info["reader_cost"].update(time.time() - tic) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch_size = batch[0].shape[0] + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([batch_size, -1]) + engine.global_step += 1 + + # image input + if engine.amp: + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + else: + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + + # step opt and lr + if engine.amp: + scaled = engine.scaler.scale(loss_dict["loss"]) + scaled.backward() + engine.scaler.minimize(engine.optimizer, scaled) + else: + loss_dict["loss"].backward() + engine.optimizer.step() + engine.optimizer.clear_grad() + engine.lr_sch.step() + + # below code just for logging + # update metric_for_logger + update_metric(engine, out, batch, batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + tic = time.time() + + +def forward(engine, batch): + if not engine.is_rec: + return engine.model(batch[0]) + else: + return engine.model(batch[0], batch[1]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/utils.py new file mode 100644 index 000000000..92eb35d75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/engine/train/utils.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import datetime +from ppcls.utils import logger +from ppcls.utils.misc import AverageMeter + + +def update_metric(trainer, out, batch, batch_size): + # calc metric + if trainer.train_metric_func is not None: + metric_dict = trainer.train_metric_func(out, batch[-1]) + for key in metric_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(metric_dict[key].numpy()[0], + batch_size) + + +def update_loss(trainer, loss_dict, batch_size): + # update_output_info + for key in loss_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + + +def log_info(trainer, batch_size, epoch_id, iter_id): + lr_msg = "lr: {:.5f}".format(trainer.lr_sch.get_lr()) + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, trainer.output_info[key].avg) + for key in trainer.output_info + ]) + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, trainer.time_info[key].avg) + for key in trainer.time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / trainer.time_info["batch_cost"].avg) + eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 + ) * len(trainer.train_dataloader) - iter_id + ) * trainer.time_info["batch_cost"].avg + eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) + logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( + epoch_id, trainer.config["Global"]["epochs"], iter_id, + len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, + eta_msg)) + + logger.scaler( + name="lr", + value=trainer.lr_sch.get_lr(), + step=trainer.global_step, + writer=trainer.vdl_writer) + for key in trainer.output_info: + logger.scaler( + name="train_{}".format(key), + value=trainer.output_info[key].avg, + step=trainer.global_step, + writer=trainer.vdl_writer) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/loss/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/loss/__init__.py new file mode 100644 index 000000000..7c50ff76f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/loss/__init__.py @@ -0,0 +1,47 @@ +import copy + +import paddle +import paddle.nn as nn +from ppcls.utils import logger + +from .celoss import CELoss, MixCELoss +from .distanceloss import DistanceLoss + +class CombinedLoss(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.loss_func = [] + self.loss_weight = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + name = list(config)[0] + param = config[name] + assert "weight" in param, "weight must be in param, but param just contains {}".format( + param.keys()) + self.loss_weight.append(param.pop("weight")) + self.loss_func.append(eval(name)(**param)) + + def __call__(self, input, batch): + loss_dict = {} + # just for accelerate classification traing speed + if len(self.loss_func) == 1: + loss = self.loss_func[0](input, batch) + loss_dict.update(loss) + loss_dict["loss"] = list(loss.values())[0] + else: + for idx, loss_func in enumerate(self.loss_func): + loss = loss_func(input, batch) + weight = self.loss_weight[idx] + loss = {key: loss[key] * weight for key in loss} + loss_dict.update(loss) + loss_dict["loss"] = paddle.add_n(list(loss_dict.values())) + return loss_dict + + +def build_loss(config): + module_class = CombinedLoss(copy.deepcopy(config)) + logger.debug("build loss {} success.".format(module_class)) + return module_class diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/loss/celoss.py b/cv/classification/resnet50/paddlepaddle/ppcls/loss/celoss.py new file mode 100644 index 000000000..a78926170 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/loss/celoss.py @@ -0,0 +1,67 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppcls.utils import logger + + +class CELoss(nn.Layer): + """ + Cross entropy loss + """ + + def __init__(self, epsilon=None): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + + def _labelsmoothing(self, target, class_num): + if len(target.shape) == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + if self.epsilon is not None: + class_num = x.shape[-1] + label = self._labelsmoothing(label, class_num) + x = -F.log_softmax(x, axis=-1) + loss = paddle.sum(x * label, axis=-1) + else: + if label.shape[-1] == x.shape[-1]: + label = F.softmax(label, axis=-1) + soft_label = True + else: + soft_label = False + loss = F.cross_entropy(x, label=label, soft_label=soft_label) + loss = loss.mean() + return {"CELoss": loss} + + +class MixCELoss(object): + def __init__(self, *args, **kwargs): + msg = "\"MixCELos\" is deprecated, please use \"CELoss\" instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/loss/comfunc.py b/cv/classification/resnet50/paddlepaddle/ppcls/loss/comfunc.py new file mode 100644 index 000000000..277bdd6b5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/loss/comfunc.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def rerange_index(batch_size, samples_each_class): + tmp = np.arange(0, batch_size * batch_size) + tmp = tmp.reshape(-1, batch_size) + rerange_index = [] + + for i in range(batch_size): + step = i // samples_each_class + start = step * samples_each_class + end = (step + 1) * samples_each_class + + pos_idx = [] + neg_idx = [] + for j, k in enumerate(tmp[i]): + if j >= start and j < end: + if j == i: + pos_idx.insert(0, k) + else: + pos_idx.append(k) + else: + neg_idx.append(k) + rerange_index += (pos_idx + neg_idx) + + rerange_index = np.array(rerange_index).astype(np.int32) + return rerange_index diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/loss/distanceloss.py b/cv/classification/resnet50/paddlepaddle/ppcls/loss/distanceloss.py new file mode 100644 index 000000000..0a09f0cb2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/loss/distanceloss.py @@ -0,0 +1,43 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn import L1Loss +from paddle.nn import MSELoss as L2Loss +from paddle.nn import SmoothL1Loss + + +class DistanceLoss(nn.Layer): + """ + DistanceLoss: + mode: loss mode + """ + + def __init__(self, mode="l2", **kargs): + super().__init__() + assert mode in ["l1", "l2", "smooth_l1"] + if mode == "l1": + self.loss_func = nn.L1Loss(**kargs) + elif mode == "l2": + self.loss_func = nn.MSELoss(**kargs) + elif mode == "smooth_l1": + self.loss_func = nn.SmoothL1Loss(**kargs) + self.mode = mode + + def forward(self, x, y): + loss = self.loss_func(x, y) + return {"loss_{}".format(self.mode): loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/metric/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/metric/__init__.py new file mode 100644 index 000000000..94721235b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/metric/__init__.py @@ -0,0 +1,51 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from paddle import nn +import copy +from collections import OrderedDict + +from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk +from .metrics import DistillationTopkAcc +from .metrics import GoogLeNetTopkAcc +from .metrics import HammingDistance, AccuracyScore + + +class CombinedMetrics(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.metric_func_list = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + metric_name = list(config)[0] + metric_params = config[metric_name] + if metric_params is not None: + self.metric_func_list.append( + eval(metric_name)(**metric_params)) + else: + self.metric_func_list.append(eval(metric_name)()) + + def __call__(self, *args, **kwargs): + metric_dict = OrderedDict() + for idx, metric_func in enumerate(self.metric_func_list): + metric_dict.update(metric_func(*args, **kwargs)) + return metric_dict + + +def build_metrics(config): + metrics_list = CombinedMetrics(copy.deepcopy(config)) + return metrics_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/metric/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls/metric/metrics.py new file mode 100644 index 000000000..03e742082 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/metric/metrics.py @@ -0,0 +1,306 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.preprocessing import binarize + + +class TopkAcc(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + + metric_dict = dict() + for k in self.topk: + metric_dict["top{}".format(k)] = paddle.metric.accuracy( + x, label, k=k) + return metric_dict + + +class mAP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + div = paddle.arange(acc_sum.shape[1]).astype("float32") + 1 + precision = paddle.divide(acc_sum, div) + + #calc map + precision_mask = paddle.multiply(equal_flag, precision) + ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, + axis=1) + metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + return metric_dict + + +class mINP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + #do accumulative sum + div = paddle.arange(equal_flag.shape[1]).astype("float32") + 2 + minus = paddle.divide(equal_flag, div) + auxilary = paddle.subtract(equal_flag, minus) + hard_index = paddle.argmax(auxilary, axis=1).astype("float32") + all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) + mINP = paddle.mean(all_INP) + metric_dict["mINP"] = mINP.numpy()[0] + return metric_dict + + +class Recallk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + real_query_num = paddle.sum(equal_flag, axis=1) + real_query_num = paddle.sum( + paddle.greater_than(real_query_num, paddle.to_tensor(0.)).astype( + "float32")) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + mask = paddle.greater_than(acc_sum, + paddle.to_tensor(0.)).astype("float32") + all_cmc = (paddle.sum(mask, axis=0) / real_query_num).numpy() + + for k in self.topk: + metric_dict["recall{}".format(k)] = all_cmc[k - 1] + return metric_dict + + +class Precisionk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + Ns = paddle.arange(gallery_img_id.shape[0]) + 1 + equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1) + Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy() + + for k in self.topk: + metric_dict["precision@{}".format(k)] = Precision_at_k[k - 1] + + return metric_dict + + +class DistillationTopkAcc(TopkAcc): + def __init__(self, model_key, feature_key=None, topk=(1, 5)): + super().__init__(topk=topk) + self.model_key = model_key + self.feature_key = feature_key + + def forward(self, x, label): + if isinstance(x, dict): + x = x[self.model_key] + if self.feature_key is not None: + x = x[self.feature_key] + return super().forward(x, label) + + +class GoogLeNetTopkAcc(TopkAcc): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + return super().forward(x[0], label) + + +class MutiLabelMetric(object): + def __init__(self): + pass + + def _multi_hot_encode(self, logits, threshold=0.5): + return binarize(logits, threshold=threshold) + + def __call__(self, output): + output = F.sigmoid(output) + preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5) + return preds + + +class HammingDistance(MutiLabelMetric): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + def __init__(self): + super().__init__() + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + metric_dict["HammingDistance"] = paddle.to_tensor( + hamming_loss(target, preds)) + return metric_dict + + +class AccuracyScore(MutiLabelMetric): + """ + Hard metric for multilabel classification + Args: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + def __init__(self, base="label"): + super().__init__() + assert base in ["sample", "label" + ], 'must be one of ["sample", "label"]' + self.base = base + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + if self.base == "sample": + accuracy = accuracy_metric(target, preds) + elif self.base == "label": + mcm = multilabel_confusion_matrix(target, preds) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + accuracy = (sum(tps) + sum(tns)) / ( + sum(tps) + sum(tns) + sum(fns) + sum(fps)) + metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) + return metric_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/__init__.py new file mode 100644 index 000000000..61db39f89 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/__init__.py @@ -0,0 +1,72 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import paddle + +from ppcls.utils import logger + +from . import optimizer + +__all__ = ['build_optimizer'] + + +def build_lr_scheduler(lr_config, epochs, step_each_epoch): + from . import learning_rate + lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) + if 'name' in lr_config: + lr_name = lr_config.pop('name') + lr = getattr(learning_rate, lr_name)(**lr_config) + if isinstance(lr, paddle.optimizer.lr.LRScheduler): + return lr + else: + return lr() + else: + lr = lr_config['learning_rate'] + return lr + + +# model_list is None in static graph +def build_optimizer(config, epochs, step_each_epoch, model_list=None): + config = copy.deepcopy(config) + # step1 build lr + lr = build_lr_scheduler(config.pop('lr'), epochs, step_each_epoch) + logger.debug("build lr ({}) success..".format(lr)) + # step2 build regularization + if 'regularizer' in config and config['regularizer'] is not None: + if 'weight_decay' in config: + logger.warning( + "ConfigError: Only one of regularizer and weight_decay can be set in Optimizer Config. \"weight_decay\" has been ignored." + ) + reg_config = config.pop('regularizer') + reg_name = reg_config.pop('name') + 'Decay' + reg = getattr(paddle.regularizer, reg_name)(**reg_config) + config["weight_decay"] = reg + logger.debug("build regularizer ({}) success..".format(reg)) + # step3 build optimizer + optim_name = config.pop('name') + if 'clip_norm' in config: + clip_norm = config.pop('clip_norm') + grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_norm) + else: + grad_clip = None + optim = getattr(optimizer, optim_name)(learning_rate=lr, + grad_clip=grad_clip, + **config)(model_list=model_list) + logger.debug("build optimizer ({}) success..".format(optim)) + return optim, lr diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/learning_rate.py b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/learning_rate.py new file mode 100644 index 000000000..b59387dd9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/learning_rate.py @@ -0,0 +1,326 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from paddle.optimizer import lr +from paddle.optimizer.lr import LRScheduler + +from ppcls.utils import logger + + +class Linear(object): + """ + Linear learning rate decay + Args: + lr (float): The initial learning rate. It is a python float number. + epochs(int): The decay step size. It determines the decay cycle. + end_lr(float, optional): The minimum final learning rate. Default: 0.0001. + power(float, optional): Power of polynomial. Default: 1.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + epochs, + step_each_epoch, + end_lr=0.0, + power=1.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.steps = (epochs - warmup_epoch) * step_each_epoch + self.end_lr = end_lr + self.power = power + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PolynomialDecay( + learning_rate=self.learning_rate, + decay_steps=self.steps, + end_lr=self.end_lr, + power=self.power, + last_epoch=self. + last_epoch) if self.steps > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Cosine(object): + """ + Cosine learning rate decay + lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1) + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + epochs(int): total training epochs + eta_min(float): Minimum learning rate. Default: 0.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_each_epoch, + epochs, + eta_min=0.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.T_max = (epochs - warmup_epoch) * step_each_epoch + self.eta_min = eta_min + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.CosineAnnealingDecay( + learning_rate=self.learning_rate, + T_max=self.T_max, + eta_min=self.eta_min, + last_epoch=self. + last_epoch) if self.T_max > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Step(object): + """ + Piecewise learning rate decay + Args: + step_each_epoch(int): steps each epoch + learning_rate (float): The initial learning rate. It is a python float number. + step_size (int): the interval to update. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_size, + step_each_epoch, + epochs, + gamma, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.step_size = step_each_epoch * step_size + self.learning_rate = learning_rate + self.gamma = gamma + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.StepDecay( + learning_rate=self.learning_rate, + step_size=self.step_size, + gamma=self.gamma, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Piecewise(object): + """ + Piecewise learning rate decay + Args: + boundaries(list): A list of steps numbers. The type of element in the list is python int. + values(list): A list of learning rate values that will be picked during different epoch boundaries. + The type of element in the list is python float. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + step_each_epoch, + decay_epochs, + values, + epochs, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.boundaries = [step_each_epoch * e for e in decay_epochs] + self.values = values + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PiecewiseDecay( + boundaries=self.boundaries, + values=self.values, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.values[0], + last_epoch=self.last_epoch) + return learning_rate + + +class MultiStepDecay(LRScheduler): + """ + Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones. + The algorithm can be described as the code below. + .. code-block:: text + learning_rate = 0.5 + milestones = [30, 50] + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 + else: + learning_rate = 0.005 + Args: + learning_rate (float): The initial learning rate. It is a python float number. + milestones (tuple|list): List or tuple of each boundaries. Must be increasing. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . + + Returns: + ``MultiStepDecay`` instance to schedule learning rate. + Examples: + + .. code-block:: python + import paddle + import numpy as np + # train on default dynamic graph mode + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) + for epoch in range(20): + for batch_id in range(5): + x = paddle.uniform([10, 10]) + out = linear(x) + loss = paddle.mean(out) + loss.backward() + sgd.step() + sgd.clear_gradients() + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + # train on static graph mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(5): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + """ + + def __init__(self, + learning_rate, + milestones, + epochs, + step_each_epoch, + gamma=0.1, + last_epoch=-1, + verbose=False): + if not isinstance(milestones, (tuple, list)): + raise TypeError( + "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." + % type(milestones)) + if not all([ + milestones[i] < milestones[i + 1] + for i in range(len(milestones) - 1) + ]): + raise ValueError('The elements of milestones must be incremented') + if gamma >= 1.0: + raise ValueError('gamma should be < 1.0.') + self.milestones = [x * step_each_epoch for x in milestones] + self.gamma = gamma + super().__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + for i in range(len(self.milestones)): + if self.last_epoch < self.milestones[i]: + return self.base_lr * (self.gamma**i) + return self.base_lr * (self.gamma**len(self.milestones)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/optimizer.py b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/optimizer.py new file mode 100644 index 000000000..4422ea70d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/optimizer/optimizer.py @@ -0,0 +1,217 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from paddle import optimizer as optim +import paddle + +from ppcls.utils import logger + + +class Momentum(object): + """ + Simple Momentum optimizer with velocity state. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum, + weight_decay=None, + grad_clip=None, + multi_precision=True): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.multi_precision = multi_precision + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters) + if hasattr(opt, '_use_multi_tensor'): + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters, + use_multi_tensor=True) + return opt + + +class Adam(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + parameter_list=None, + weight_decay=None, + grad_clip=None, + name=None, + lazy_mode=False, + multi_precision=False): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.parameter_list = parameter_list + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.name = name + self.lazy_mode = lazy_mode + self.multi_precision = multi_precision + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.Adam( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name, + lazy_mode=self.lazy_mode, + multi_precision=self.multi_precision, + parameters=parameters) + return opt + + +class RMSProp(object): + """ + Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + rho (float) - rho value in equation. + epsilon (float) - avoid division by zero, default is 1e-6. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum=0.0, + rho=0.95, + epsilon=1e-6, + weight_decay=None, + grad_clip=None, + multi_precision=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.rho = rho + self.epsilon = epsilon + self.weight_decay = weight_decay + self.grad_clip = grad_clip + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.RMSProp( + learning_rate=self.learning_rate, + momentum=self.momentum, + rho=self.rho, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters) + return opt + + +class AdamW(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + weight_decay=None, + multi_precision=False, + grad_clip=None, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False, + **args): + super().__init__() + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.multi_precision = multi_precision + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + + # TODO(gaotingquan): model_list is None when in static graph, "no_weight_decay" not work. + if model_list is None: + if self.one_dim_param_no_weight_decay or len( + self.no_weight_decay_name_list) != 0: + msg = "\"AdamW\" does not support setting \"no_weight_decay\" in static graph. Please use dynamic graph." + logger.error(Exception(msg)) + raise Exception(msg) + + self.no_weight_decay_param_name_list = [ + p.name for model in model_list for n, p in model.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] if model_list else [] + + if self.one_dim_param_no_weight_decay: + self.no_weight_decay_param_name_list += [ + p.name for model in model_list + for n, p in model.named_parameters() if len(p.shape) == 1 + ] if model_list else [] + + opt = optim.AdamW( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + parameters=parameters, + weight_decay=self.weight_decay, + multi_precision=self.multi_precision, + grad_clip=self.grad_clip, + apply_decay_param_fun=self._apply_decay_param_fun) + return opt + + def _apply_decay_param_fun(self, name): + return name not in self.no_weight_decay_param_name_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/static/program.py b/cv/classification/resnet50/paddlepaddle/ppcls/static/program.py new file mode 100644 index 000000000..b3534a2cf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/static/program.py @@ -0,0 +1,449 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import numpy as np + +from collections import OrderedDict + +import paddle +import paddle.nn.functional as F + +from paddle.distributed import fleet +from paddle.distributed.fleet import DistributedStrategy + +# from ppcls.optimizer import OptimizerBuilder +# from ppcls.optimizer.learning_rate import LearningRateBuilder + +from ppcls.arch import build_model +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.optimizer import build_lr_scheduler + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger, profiler + + +def create_feeds(image_shape, use_mix=False, class_num=None, dtype="float32"): + """ + Create feeds as model input + + Args: + image_shape(list[int]): model input shape, such as [3, 224, 224] + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + class_num(int): the class number of network, required if use_mix + + Returns: + feeds(dict): dict of model input variables + """ + feeds = OrderedDict() + feeds['data'] = paddle.static.data( + name="data", shape=[None] + image_shape, dtype=dtype) + + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + feeds['target'] = paddle.static.data( + name="target", shape=[None, class_num], dtype="float32") + else: + feeds['label'] = paddle.static.data( + name="label", shape=[None, 1], dtype="int64") + + return feeds + + +def create_fetchs(out, + feeds, + architecture, + topk=5, + epsilon=None, + class_num=None, + use_mix=False, + config=None, + mode="Train"): + """ + Create fetchs as model outputs(included loss and measures), + will call create_loss and create_metric(if use_mix). + Args: + out(variable): model output variable + feeds(dict): dict of model input variables. + If use mix_up, it will not include label. + architecture(dict): architecture information, + name(such as ResNet50) is needed + topk(int): usually top5 + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + class_num(int): the class number of network, required if use_mix + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + config(dict): model config + + Returns: + fetchs(dict): dict of model outputs(included loss and measures) + """ + fetchs = OrderedDict() + # build loss + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + target = paddle.reshape(feeds['target'], [-1, class_num]) + else: + target = paddle.reshape(feeds['label'], [-1, 1]) + + loss_func = build_loss(config["Loss"][mode]) + loss_dict = loss_func(out, target) + + loss_out = loss_dict["loss"] + fetchs['loss'] = (loss_out, AverageMeter('loss', '7.4f', need_avg=True)) + + # build metric + if not use_mix: + metric_func = build_metrics(config["Metric"][mode]) + + metric_dict = metric_func(out, target) + + for key in metric_dict: + if mode != "Train" and paddle.distributed.get_world_size() > 1: + paddle.distributed.all_reduce( + metric_dict[key], op=paddle.distributed.ReduceOp.SUM) + metric_dict[key] = metric_dict[ + key] / paddle.distributed.get_world_size() + + fetchs[key] = (metric_dict[key], AverageMeter( + key, '7.4f', need_avg=True)) + + return fetchs + + +def create_optimizer(config, step_each_epoch): + # create learning_rate instance + optimizer, lr_sch = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], step_each_epoch) + return optimizer, lr_sch + + +def create_strategy(config): + """ + Create build strategy and exec strategy. + + Args: + config(dict): config + + Returns: + build_strategy: build strategy + exec_strategy: exec strategy + """ + build_strategy = paddle.static.BuildStrategy() + exec_strategy = paddle.static.ExecutionStrategy() + + exec_strategy.num_threads = 1 + exec_strategy.num_iteration_per_drop_scope = ( + 10000 + if 'AMP' in config and config.AMP.get("level", "O1") == "O2" else 10) + + fuse_op = True if 'AMP' in config else False + + fuse_bn_act_ops = config.get('fuse_bn_act_ops', fuse_op) + fuse_elewise_add_act_ops = config.get('fuse_elewise_add_act_ops', fuse_op) + fuse_bn_add_act_ops = config.get('fuse_bn_add_act_ops', fuse_op) + enable_addto = config.get('enable_addto', fuse_op) + + build_strategy.fuse_bn_act_ops = fuse_bn_act_ops + build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops + build_strategy.fuse_bn_add_act_ops = fuse_bn_add_act_ops + build_strategy.enable_addto = enable_addto + + return build_strategy, exec_strategy + + +def dist_optimizer(config, optimizer): + """ + Create a distributed optimizer based on a normal optimizer + + Args: + config(dict): + optimizer(): a normal optimizer + + Returns: + optimizer: a distributed optimizer + """ + build_strategy, exec_strategy = create_strategy(config) + + dist_strategy = DistributedStrategy() + dist_strategy.execution_strategy = exec_strategy + dist_strategy.build_strategy = build_strategy + + dist_strategy.nccl_comm_num = 1 + dist_strategy.fuse_all_reduce_ops = True + dist_strategy.fuse_grad_size_in_MB = 16 + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + + return optimizer + + +def mixed_precision_optimizer(config, optimizer): + if 'AMP' in config: + amp_cfg = config.AMP if config.AMP else dict() + scale_loss = amp_cfg.get('scale_loss', 1.0) + use_dynamic_loss_scaling = amp_cfg.get('use_dynamic_loss_scaling', + False) + use_pure_fp16 = amp_cfg.get("level", "O1") == "O2" + optimizer = paddle.static.amp.decorate( + optimizer, + init_loss_scaling=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling, + use_pure_fp16=use_pure_fp16, + use_fp16_guard=True) + + return optimizer + + +def build(config, + main_prog, + startup_prog, + class_num=None, + step_each_epoch=100, + is_train=True, + is_distributed=True): + """ + Build a program using a model and an optimizer + 1. create feeds + 2. create a dataloader + 3. create a model + 4. create fetchs + 5. create an optimizer + + Args: + config(dict): config + main_prog(): main program + startup_prog(): startup program + class_num(int): the class number of network, required if use_mix + is_train(bool): train or eval + is_distributed(bool): whether to use distributed training method + + Returns: + dataloader(): a bridge between the model and the data + fetchs(dict): dict of model outputs(included loss and measures) + """ + with paddle.static.program_guard(main_prog, startup_prog): + with paddle.utils.unique_name.guard(): + mode = "Train" if is_train else "Eval" + use_mix = "batch_transform_ops" in config["DataLoader"][mode][ + "dataset"] + feeds = create_feeds( + config["Global"]["image_shape"], + use_mix, + class_num=class_num, + dtype="float32") + + # build model + # data_format should be assigned in arch-dict + input_image_channel = config["Global"]["image_shape"][ + 0] # default as [3, 224, 224] + model = build_model(config) + out = model(feeds["data"]) + # end of build model + + fetchs = create_fetchs( + out, + feeds, + config["Arch"], + epsilon=config.get('ls_epsilon'), + class_num=class_num, + use_mix=use_mix, + config=config, + mode=mode) + lr_scheduler = None + optimizer = None + if is_train: + optimizer, lr_scheduler = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], + step_each_epoch) + optimizer = mixed_precision_optimizer(config, optimizer) + if is_distributed: + optimizer = dist_optimizer(config, optimizer) + optimizer.minimize(fetchs['loss'][0]) + return fetchs, lr_scheduler, feeds, optimizer + + +def compile(config, program, loss_name=None, share_prog=None): + """ + Compile the program + + Args: + config(dict): config + program(): the program which is wrapped by + loss_name(str): loss name + share_prog(): the shared program, used for evaluation during training + + Returns: + compiled_program(): a compiled program + """ + build_strategy, exec_strategy = create_strategy(config) + + compiled_program = paddle.static.CompiledProgram( + program).with_data_parallel( + share_vars_from=share_prog, + loss_name=loss_name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + return compiled_program + + +total_step = 0 + + +def run(dataloader, + exe, + program, + feeds, + fetchs, + epoch=0, + mode='train', + config=None, + vdl_writer=None, + lr_scheduler=None, + profiler_options=None): + """ + Feed data to the model and fetch the measures and loss + + Args: + dataloader(paddle io dataloader): + exe(): + program(): + fetchs(dict): dict of measures and the loss + epoch(int): epoch of training or evaluation + model(str): log only + + Returns: + """ + fetch_list = [f[0] for f in fetchs.values()] + metric_dict = OrderedDict([("lr", AverageMeter( + 'lr', 'f', postfix=",", need_avg=False))]) + + for k in fetchs: + metric_dict[k] = fetchs[k][1] + + metric_dict["batch_time"] = AverageMeter( + 'batch_cost', '.5f', postfix=" s,") + metric_dict["reader_time"] = AverageMeter( + 'reader_cost', '.5f', postfix=" s,") + + for m in metric_dict.values(): + m.reset() + + use_dali = config["Global"].get('use_dali', False) + tic = time.time() + + if not use_dali: + dataloader = dataloader() + + idx = 0 + batch_size = None + while True: + # The DALI maybe raise RuntimeError for some particular images, such as ImageNet1k/n04418357_26036.JPEG + try: + batch = next(dataloader) + except StopIteration: + break + except RuntimeError: + logger.warning( + "Except RuntimeError when reading data from dataloader, try to read once again..." + ) + continue + idx += 1 + # ignore the warmup iters + if idx == 5: + metric_dict["batch_time"].reset() + metric_dict["reader_time"].reset() + + metric_dict['reader_time'].update(time.time() - tic) + + profiler.add_profiler_step(profiler_options) + + if use_dali: + batch_size = batch[0]["data"].shape()[0] + feed_dict = batch[0] + else: + batch_size = batch[0].shape()[0] + feed_dict = { + key.name: batch[idx] + for idx, key in enumerate(feeds.values()) + } + + metrics = exe.run(program=program, + feed=feed_dict, + fetch_list=fetch_list) + + for name, m in zip(fetchs.keys(), metrics): + metric_dict[name].update(np.mean(m), batch_size) + metric_dict["batch_time"].update(time.time() - tic) + if mode == "train": + metric_dict['lr'].update(lr_scheduler.get_lr()) + + fetchs_str = ' '.join([ + str(metric_dict[key].mean) + if "time" in key else str(metric_dict[key].value) + for key in metric_dict + ]) + ips_info = " ips: {:.5f} images/sec.".format( + batch_size / metric_dict["batch_time"].avg) + fetchs_str += ips_info + + if lr_scheduler is not None: + lr_scheduler.step() + + if vdl_writer: + global total_step + logger.scaler('loss', metrics[0][0], total_step, vdl_writer) + total_step += 1 + if mode == 'eval': + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} step:{:<4d} {:s}".format(mode, idx, + fetchs_str)) + else: + epoch_str = "epoch:{:<3d}".format(epoch) + step_str = "{:s} step:{:<4d}".format(mode, idx) + + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} {:s} {:s}".format(epoch_str, step_str, + fetchs_str)) + + tic = time.time() + + end_str = ' '.join([str(m.mean) for m in metric_dict.values()] + + [metric_dict["batch_time"].total]) + ips_info = "ips: {:.5f} images/sec.".format(batch_size / + metric_dict["batch_time"].avg) + if mode == 'eval': + logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) + else: + end_epoch_str = "END epoch:{:<3d}".format(epoch) + logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str, + ips_info)) + if use_dali: + dataloader.reset() + + # return top1_acc in order to save the best model + if mode == 'eval': + return fetchs["top1"][1].avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/static/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls/static/save_load.py new file mode 100644 index 000000000..13badfddc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/static/save_load.py @@ -0,0 +1,139 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle + +from ppcls.utils import logger + +__all__ = ['init_model', 'save_model'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def _load_state(path): + if os.path.exists(path + '.pdopt'): + # XXX another hack to ignore the optimizer state + tmp = tempfile.mkdtemp() + dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) + shutil.copy(path + '.pdparams', dst + '.pdparams') + state = paddle.static.load_program_state(dst) + shutil.rmtree(tmp) + else: + state = paddle.static.load_program_state(path) + return state + + +def load_params(exe, prog, path, ignore_params=None): + """ + Load model from the given path. + Args: + exe (fluid.Executor): The fluid.Executor object. + prog (fluid.Program): load weight to which Program object. + path (string): URL string or loca model path. + ignore_params (list): ignore variable to load when finetuning. + It can be specified by finetune_exclude_pretrained_params + and the usage can refer to the document + docs/advanced_tutorials/TRANSFER_LEARNING.md + """ + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + + logger.info("Loading parameters from {}...".format(path)) + + ignore_set = set() + state = _load_state(path) + + # ignore the parameter which mismatch the shape + # between the model and pretrain weight. + all_var_shape = {} + for block in prog.blocks: + for param in block.all_parameters(): + all_var_shape[param.name] = param.shape + ignore_set.update([ + name for name, shape in all_var_shape.items() + if name in state and shape != state[name].shape + ]) + + if ignore_params: + all_var_names = [var.name for var in prog.list_vars()] + ignore_list = filter( + lambda var: any([re.match(name, var) for name in ignore_params]), + all_var_names) + ignore_set.update(list(ignore_list)) + + if len(ignore_set) > 0: + for k in ignore_set: + if k in state: + logger.warning( + 'variable {} is already excluded automatically'.format(k)) + del state[k] + + paddle.static.set_program_state(prog, state) + + +def init_model(config, program, exe): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints: + paddle.static.load(program, checkpoints, exe) + logger.info("Finish initing model from {}".format(checkpoints)) + return + + pretrained_model = config.get('pretrained_model') + if pretrained_model: + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + for pretrain in pretrained_model: + load_params(exe, program, pretrain) + logger.info("Finish initing model from {}".format(pretrained_model)) + + +def save_model(program, model_path, epoch_id, prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, str(epoch_id)) + _mkdir_if_not_exist(model_path) + model_prefix = os.path.join(model_path, prefix) + paddle.static.save(program, model_prefix) + logger.info("Already save model in {}".format(model_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/static/train.py b/cv/classification/resnet50/paddlepaddle/ppcls/static/train.py new file mode 100644 index 000000000..dd16cdb4c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/static/train.py @@ -0,0 +1,212 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../'))) + +import paddle +from paddle.distributed import fleet +from visualdl import LogWriter + +from ppcls.data import build_dataloader +from ppcls.utils.config import get_config, print_config +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.static.save_load import init_model, save_model +from ppcls.static import program + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/ResNet/ResNet50.yaml', + help='config file path') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args + + +def main(args): + """ + all the config of training paradigm should be in config["Global"] + """ + config = get_config(args.config, overrides=args.override, show=False) + global_config = config["Global"] + + mode = "train" + + log_file = os.path.join(global_config['output_dir'], + config["Arch"]["name"], f"{mode}.log") + init_logger(log_file=log_file) + print_config(config) + + if global_config.get("is_distributed", True): + fleet.init(is_collective=True) + # assign the device + use_gpu = global_config.get("use_gpu", True) + # amp related config + if 'AMP' in config: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_cudnn_exhaustive_search': 1, + 'FLAGS_conv_workspace_size_limit': 1500, + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, + 'FLAGS_max_inplace_grad_add': 8, + } + os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' + paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + + use_xpu = global_config.get("use_xpu", False) + use_npu = global_config.get("use_npu", False) + use_mlu = global_config.get("use_mlu", False) + assert ( + use_gpu and use_xpu and use_npu and use_mlu + ) is not True, "gpu, xpu, npu and mlu can not be true in the same time in static mode!" + + if use_gpu: + device = paddle.set_device('gpu') + elif use_xpu: + device = paddle.set_device('xpu') + elif use_npu: + device = paddle.set_device('npu') + elif use_mlu: + device = paddle.set_device('mlu') + else: + device = paddle.set_device('cpu') + + # visualDL + vdl_writer = None + if global_config["use_visualdl"]: + vdl_dir = os.path.join(global_config["output_dir"], "vdl") + vdl_writer = LogWriter(vdl_dir) + + # build dataloader + eval_dataloader = None + use_dali = global_config.get('use_dali', False) + + class_num = config["Arch"].get("class_num", None) + config["DataLoader"].update({"class_num": class_num}) + train_dataloader = build_dataloader( + config["DataLoader"], "Train", device=device, use_dali=use_dali) + if global_config["eval_during_train"]: + eval_dataloader = build_dataloader( + config["DataLoader"], "Eval", device=device, use_dali=use_dali) + + step_each_epoch = len(train_dataloader) + + # startup_prog is used to do some parameter init work, + # and train prog is used to hold the network + startup_prog = paddle.static.Program() + train_prog = paddle.static.Program() + + best_top1_acc = 0.0 # best top1 acc record + + train_fetchs, lr_scheduler, train_feeds, optimizer = program.build( + config, + train_prog, + startup_prog, + class_num, + step_each_epoch=step_each_epoch, + is_train=True, + is_distributed=global_config.get("is_distributed", True)) + + if global_config["eval_during_train"]: + eval_prog = paddle.static.Program() + eval_fetchs, _, eval_feeds, _ = program.build( + config, + eval_prog, + startup_prog, + is_train=False, + is_distributed=global_config.get("is_distributed", True)) + # clone to prune some content which is irrelevant in eval_prog + eval_prog = eval_prog.clone(for_test=True) + + # create the "Executor" with the statement of which device + exe = paddle.static.Executor(device) + # Parameter initialization + exe.run(startup_prog) + # load pretrained models or checkpoints + init_model(global_config, train_prog, exe) + + if 'AMP' in config and config.AMP.get("level", "O1") == "O2": + optimizer.amp_init( + device, + scope=paddle.static.global_scope(), + test_program=eval_prog + if global_config["eval_during_train"] else None) + + if not global_config.get("is_distributed", True): + compiled_train_prog = program.compile( + config, train_prog, loss_name=train_fetchs["loss"][0].name) + else: + compiled_train_prog = train_prog + + if eval_dataloader is not None: + compiled_eval_prog = program.compile(config, eval_prog) + + for epoch_id in range(global_config["epochs"]): + # 1. train with train dataset + program.run(train_dataloader, exe, compiled_train_prog, train_feeds, + train_fetchs, epoch_id, 'train', config, vdl_writer, + lr_scheduler, args.profiler_options) + # 2. evaate with eval dataset + if global_config["eval_during_train"] and epoch_id % global_config[ + "eval_interval"] == 0: + top1_acc = program.run(eval_dataloader, exe, compiled_eval_prog, + eval_feeds, eval_fetchs, epoch_id, "eval", + config) + if top1_acc > best_top1_acc: + best_top1_acc = top1_acc + message = "The best top1 acc {:.5f}, in epoch: {:d}".format( + best_top1_acc, epoch_id) + logger.info(message) + if epoch_id % global_config["save_interval"] == 0: + + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, "best_model") + + # 3. save the persistable model + if epoch_id % global_config["save_interval"] == 0: + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, epoch_id) + + +if __name__ == '__main__': + paddle.enable_static() + args = parse_args() + main(args) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/__init__.py new file mode 100644 index 000000000..632cc7882 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import logger +from . import misc +from . import model_zoo +from . import metrics + +from .save_load import init_model, save_model +from .config import get_config +from .misc import AverageMeter +from .metrics import multi_hot_encode +from .metrics import hamming_distance +from .metrics import accuracy_score +from .metrics import precision_recall_fscore +from .metrics import mean_average_precision diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/check.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/check.py new file mode 100644 index 000000000..bc7030818 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/check.py @@ -0,0 +1,151 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +import paddle +from paddle import is_compiled_with_cuda + +from ppcls.arch import get_architectures +from ppcls.arch import similar_architectures +from ppcls.arch import get_blacklist_model_in_static_mode +from ppcls.utils import logger + + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.8.0 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." + try: + pass + # paddle.utils.require_version('0.0.0') + except Exception: + logger.error(err) + sys.exit(1) + + +def check_gpu(): + """ + Log error and exit when using paddlepaddle cpu version. + """ + err = "You are using paddlepaddle cpu version! Please try to " \ + "install paddlepaddle-gpu to run model on GPU." + + try: + assert is_compiled_with_cuda() + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_architecture(architecture): + """ + check architecture and recommend similar architectures + """ + assert isinstance(architecture, dict), \ + ("the type of architecture({}) should be dict". format(architecture)) + assert "name" in architecture, \ + ("name must be in the architecture keys, just contains: {}". format( + architecture.keys())) + + similar_names = similar_architectures(architecture["name"], + get_architectures()) + model_list = ', '.join(similar_names) + err = "Architecture [{}] is not exist! Maybe you want: [{}]" \ + "".format(architecture["name"], model_list) + try: + assert architecture["name"] in similar_names + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_model_with_running_mode(architecture): + """ + check whether the model is consistent with the operating mode + """ + # some model are not supported in the static mode + blacklist = get_blacklist_model_in_static_mode() + if not paddle.in_dynamic_mode() and architecture["name"] in blacklist: + logger.error("Model: {} is not supported in the staic mode.".format( + architecture["name"])) + sys.exit(1) + return + + +def check_mix(architecture, use_mix=False): + """ + check mix parameter + """ + err = "Cannot use mix processing in GoogLeNet, " \ + "please set use_mix = False." + try: + if architecture["name"] == "GoogLeNet": + assert use_mix is not True + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_classes_num(classes_num): + """ + check classes_num + """ + err = "classes_num({}) should be a positive integer" \ + "and larger than 1".format(classes_num) + try: + assert isinstance(classes_num, int) + assert classes_num > 1 + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_data_dir(path): + """ + check cata_dir + """ + err = "Data path is not exist, please given a right path" \ + "".format(path) + try: + assert os.isdir(path) + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_function_params(config, key): + """ + check specify config + """ + k_config = config.get(key) + assert k_config is not None, \ + ('{} is required in config'.format(key)) + + assert k_config.get('function'), \ + ('function is required {} config'.format(key)) + params = k_config.get('params') + assert params is not None, \ + ('params is required in {} config'.format(key)) + assert isinstance(params, dict), \ + ('the params in {} config should be a dict'.format(key)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/config.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/config.py new file mode 100644 index 000000000..e3277c480 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/config.py @@ -0,0 +1,210 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import argparse +import yaml +from ppcls.utils import logger +from ppcls.utils import check +__all__ = ['get_config'] + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + def __deepcopy__(self, content): + return copy.deepcopy(dict(self)) + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + placeholder = "-" * 60 + for k, v in sorted(d.items()): + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", k, v)) + if k.isupper(): + logger.info(placeholder) + + +def print_config(config): + """ + visualize configs + Arguments: + config: configs + """ + logger.advertise() + print_dict(config) + + +def check_config(config): + """ + Check config + """ + check.check_version() + use_gpu = config.get('use_gpu', True) + if use_gpu: + check.check_gpu() + architecture = config.get('ARCHITECTURE') + #check.check_architecture(architecture) + use_mix = config.get('use_mix', False) + check.check_mix(architecture, use_mix) + classes_num = config.get('classes_num') + check.check_classes_num(classes_num) + mode = config.get('mode', 'train') + if mode.lower() == 'train': + check.check_function_params(config, 'LEARNING_RATE') + check.check_function_params(config, 'OPTIMIZER') + + +def override(dl, ks, v): + """ + Recursively replace dict of list + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + if not ks[0] in dl: + print('A new filed ({}) detected!'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=None): + """ + Recursively override the config + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + Returns: + config(dict): replaced config + """ + if options is not None: + for opt in options: + assert isinstance(opt, str), ( + "option({}) should be a str".format(opt)) + assert "=" in opt, ( + "option({}) should contain a =" + "to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + return config + + +def get_config(fname, overrides=None, show=False): + """ + Read config from file + """ + assert os.path.exists(fname), ( + 'config file({}) is not exist'.format(fname)) + config = parse_config(fname) + override_config(config, overrides) + if show: + print_config(config) + # check_config(config) + return config + + +def parse_args(): + parser = argparse.ArgumentParser("generic-image-rec train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/config.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + args = parser.parse_args() + return args diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/download.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/download.py new file mode 100644 index 000000000..9c4575048 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/download.py @@ -0,0 +1,319 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import os.path as osp +import shutil +import requests +import hashlib +import tarfile +import zipfile +import time +from collections import OrderedDict +from tqdm import tqdm + +from ppcls.utils import logger + +__all__ = ['get_weights_path_from_url'] + +WEIGHTS_HOME = osp.expanduser("~/.paddleclas/weights") + +DOWNLOAD_RETRY_LIMIT = 3 + + +def is_url(path): + """ + Whether path is URL. + Args: + path (string): URL string or not. + """ + return path.startswith('http://') or path.startswith('https://') + + +def get_weights_path_from_url(url, md5sum=None): + """Get weights path from WEIGHT_HOME, if not exists, + download it from url. + + Args: + url (str): download url + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded weights. + + Examples: + .. code-block:: python + + from paddle.utils.download import get_weights_path_from_url + + resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams' + local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url) + + """ + path = get_path_from_url(url, WEIGHTS_HOME, md5sum) + return path + + +def _map_path(url, root_dir): + # parse path after download under root_dir + fname = osp.split(url)[-1] + fpath = fname + return osp.join(root_dir, fpath) + + +def _get_unique_endpoints(trainer_endpoints): + # Sorting is to avoid different environmental variables for each card + trainer_endpoints.sort() + ips = set() + unique_endpoints = set() + for endpoint in trainer_endpoints: + ip = endpoint.split(":")[0] + if ip in ips: + continue + ips.add(ip) + unique_endpoints.add(endpoint) + logger.info("unique_endpoints {}".format(unique_endpoints)) + return unique_endpoints + + +def get_path_from_url(url, + root_dir, + md5sum=None, + check_exist=True, + decompress=True): + """ Download from given url to root_dir. + if file or directory specified by url is exists under + root_dir, return the path directly, otherwise download + from url and decompress it, return the path. + + Args: + url (str): download url + root_dir (str): root dir for downloading, it should be + WEIGHTS_HOME or DATASET_HOME + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded models & weights & datasets. + """ + + from paddle.fluid.dygraph.parallel import ParallelEnv + + assert is_url(url), "downloading from {} not a url".format(url) + # parse path after download to decompress under root_dir + fullpath = _map_path(url, root_dir) + # Mainly used to solve the problem of downloading data from different + # machines in the case of multiple machines. Different ips will download + # data, and the same ip will only download data once. + unique_endpoints = _get_unique_endpoints(ParallelEnv() + .trainer_endpoints[:]) + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): + logger.info("Found {}".format(fullpath)) + else: + if ParallelEnv().current_endpoint in unique_endpoints: + fullpath = _download(url, root_dir, md5sum) + else: + while not os.path.exists(fullpath): + time.sleep(1) + + if ParallelEnv().current_endpoint in unique_endpoints: + if decompress and (tarfile.is_tarfile(fullpath) or + zipfile.is_zipfile(fullpath)): + fullpath = _decompress(fullpath) + + return fullpath + + +def _download(url, path, md5sum=None): + """ + Download from url, save to path. + + url (str): download url + path (str): download to given path + """ + if not osp.exists(path): + os.makedirs(path) + + fname = osp.split(url)[-1] + fullname = osp.join(path, fname) + retry_cnt = 0 + + while not (osp.exists(fullname) and _md5check(fullname, md5sum)): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RuntimeError("Download from {} failed. " + "Retry limit reached".format(url)) + + logger.info("Downloading {} from {}".format(fname, url)) + + try: + req = requests.get(url, stream=True) + except Exception as e: # requests.exceptions.ConnectionError + logger.info( + "Downloading {} from {} failed {} times with exception {}". + format(fname, url, retry_cnt + 1, str(e))) + time.sleep(1) + continue + + if req.status_code != 200: + raise RuntimeError("Downloading from {} failed with code " + "{}!".format(url, req.status_code)) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + with tqdm(total=(int(total_size) + 1023) // 1024) as pbar: + for chunk in req.iter_content(chunk_size=1024): + f.write(chunk) + pbar.update(1) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _md5check(fullname, md5sum=None): + if md5sum is None: + return True + + logger.info("File {} md5 checking...".format(fullname)) + md5 = hashlib.md5() + with open(fullname, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + md5.update(chunk) + calc_md5sum = md5.hexdigest() + + if calc_md5sum != md5sum: + logger.info("File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum)) + return False + return True + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + + if tarfile.is_tarfile(fname): + uncompressed_path = _uncompress_file_tar(fname) + elif zipfile.is_zipfile(fname): + uncompressed_path = _uncompress_file_zip(fname) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + return uncompressed_path + + +def _uncompress_file_zip(filepath): + files = zipfile.ZipFile(filepath, 'r') + file_list = files.namelist() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _uncompress_file_tar(filepath, mode="r:*"): + files = tarfile.open(filepath, mode) + file_list = files.getnames() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _is_a_single_file(file_list): + if len(file_list) == 1 and file_list[0].find(os.sep) < -1: + return True + return False + + +def _is_a_single_dir(file_list): + new_file_list = [] + for file_path in file_list: + if '/' in file_path: + file_path = file_path.replace('/', os.sep) + elif '\\' in file_path: + file_path = file_path.replace('\\', os.sep) + new_file_list.append(file_path) + + file_name = new_file_list[0].split(os.sep)[0] + for i in range(1, len(new_file_list)): + if file_name != new_file_list[i].split(os.sep)[0]: + return False + return True diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/ema.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/ema.py new file mode 100644 index 000000000..b54cdb1b2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/ema.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np + + +class ExponentialMovingAverage(): + """ + Exponential Moving Average + Code was heavily based on https://github.com/Wanger-SJTU/SegToolbox.Pytorch/blob/master/lib/utils/ema.py + """ + + def __init__(self, model, decay, thres_steps=True): + self._model = model + self._decay = decay + self._thres_steps = thres_steps + self._shadow = {} + self._backup = {} + + def register(self): + self._update_step = 0 + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + self._shadow[name] = param.numpy().copy() + + def update(self): + decay = min(self._decay, (1 + self._update_step) / ( + 10 + self._update_step)) if self._thres_steps else self._decay + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + new_val = np.array(param.numpy().copy()) + old_val = np.array(self._shadow[name]) + new_average = decay * old_val + (1 - decay) * new_val + self._shadow[name] = new_average + self._update_step += 1 + return decay + + def apply(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + self._backup[name] = np.array(param.numpy().copy()) + param.set_value(np.array(self._shadow[name])) + + def restore(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._backup + param.set_value(self._backup[name]) + self._backup = {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/fm_vis.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/fm_vis.py new file mode 100644 index 000000000..a5368b10e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/fm_vis.py @@ -0,0 +1,97 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import cv2 +import utils +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../..'))) + +import paddle +from paddle.distributed import ParallelEnv + +from resnet import ResNet50 +from ppcls.utils.save_load import load_dygraph_pretrain + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", required=True, type=str) + parser.add_argument("-c", "--channel_num", type=int) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("--show", type=str2bool, default=False) + parser.add_argument("--interpolation", type=int, default=1) + parser.add_argument("--save_path", type=str, default=None) + parser.add_argument("--use_gpu", type=str2bool, default=True) + + return parser.parse_args() + + +def create_operators(interpolation=1): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + resize_op = utils.ResizeImage( + resize_short=256, interpolation=interpolation) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(data, ops): + for op in ops: + data = op(data) + return data + + +def main(): + args = parse_args() + operators = create_operators(args.interpolation) + # assign the place + place = 'gpu:{}'.format(ParallelEnv().dev_id) if args.use_gpu else 'cpu' + place = paddle.set_device(place) + + net = ResNet50() + load_dygraph_pretrain(net, args.pretrained_model) + + img = cv2.imread(args.image_file, cv2.IMREAD_COLOR) + data = preprocess(img, operators) + data = np.expand_dims(data, axis=0) + data = paddle.to_tensor(data) + net.eval() + _, fm = net(data) + assert args.channel_num >= 0 and args.channel_num <= fm.shape[ + 1], "the channel is out of the range, should be in {} but got {}".format( + [0, fm.shape[1]], args.channel_num) + + fm = (np.squeeze(fm[0][args.channel_num].numpy()) * 255).astype(np.uint8) + fm = cv2.resize(fm, (img.shape[1], img.shape[0])) + if args.save_path is not None: + print("the feature map is saved in path: {}".format(args.save_path)) + cv2.imwrite(args.save_path, fm) + + +if __name__ == "__main__": + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/resnet.py new file mode 100644 index 000000000..b75881414 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/resnet.py @@ -0,0 +1,535 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + fm = x + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x, fm + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["200"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/utils.py new file mode 100644 index 000000000..7c7014932 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/feature_maps_visualization/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +class DecodeImage(object): + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, img): + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + return img + + +class ResizeImage(object): + def __init__(self, resize_short=None, interpolation=1): + self.resize_short = resize_short + self.interpolation = interpolation + + def __call__(self, img): + img_h, img_w = img.shape[:2] + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + return cv2.resize(img, (w, h), interpolation=self.interpolation) + + +class CropImage(object): + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class NormalizeImage(object): + def __init__(self, scale=None, mean=None, std=None): + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, img): + img = img.transpose((2, 0, 1)) + return img diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/gallery2fc.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/gallery2fc.py new file mode 100644 index 000000000..67b08529e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/gallery2fc.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import paddle +import cv2 + +from ppcls.arch import build_model +from ppcls.utils.config import parse_config, parse_args +from ppcls.utils.save_load import load_dygraph_pretrain +from ppcls.utils.logger import init_logger +from ppcls.data import create_operators +from ppcls.arch.slim import quantize_model + + +class GalleryLayer(paddle.nn.Layer): + def __init__(self, configs): + super().__init__() + self.configs = configs + embedding_size = self.configs["Arch"]["Head"]["embedding_size"] + self.batch_size = self.configs["IndexProcess"]["batch_size"] + self.image_shape = self.configs["Global"]["image_shape"].copy() + self.image_shape.insert(0, self.batch_size) + + image_root = self.configs["IndexProcess"]["image_root"] + data_file = self.configs["IndexProcess"]["data_file"] + delimiter = self.configs["IndexProcess"]["delimiter"] + self.gallery_images = [] + gallery_docs = [] + gallery_labels = [] + + with open(data_file, 'r', encoding='utf-8') as f: + lines = f.readlines() + for ori_line in lines: + line = ori_line.strip().split(delimiter) + text_num = len(line) + assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}" + image_file = os.path.join(image_root, line[0]) + + self.gallery_images.append(image_file) + gallery_docs.append(ori_line.strip()) + gallery_labels.append(line[1].strip()) + self.gallery_layer = paddle.nn.Linear(embedding_size, len(self.gallery_images), bias_attr=False) + self.gallery_layer.skip_quant = True + output_label_str = "" + for i, label_i in enumerate(gallery_labels): + output_label_str += "{} {}\n".format(i, label_i) + output_path = configs["Global"]["save_inference_dir"] + "_label.txt" + with open(output_path, "w") as f: + f.write(output_label_str) + + def forward(self, x, label=None): + x = paddle.nn.functional.normalize(x) + x = self.gallery_layer(x) + return x + + def build_gallery_layer(self, feature_extractor): + transform_configs = self.configs["IndexProcess"]["transform_ops"] + preprocess_ops = create_operators(transform_configs) + embedding_size = self.configs["Arch"]["Head"]["embedding_size"] + batch_index = 0 + input_tensor = paddle.zeros(self.image_shape) + gallery_feature = paddle.zeros((len(self.gallery_images), embedding_size)) + for i, image_path in enumerate(self.gallery_images): + image = cv2.imread(image_path)[:, :, ::-1] + for op in preprocess_ops: + image = op(image) + input_tensor[batch_index] = image + batch_index += 1 + if batch_index == self.batch_size or i == len(self.gallery_images) - 1: + batch_feature = feature_extractor(input_tensor)["features"] + for j in range(batch_index): + feature = batch_feature[j] + norm_feature = paddle.nn.functional.normalize(feature, axis=0) + gallery_feature[i - batch_index + j + 1] = norm_feature + self.gallery_layer.set_state_dict({"_layer.weight": gallery_feature.T}) + + +def export_fuse_model(configs): + slim_config = configs["Slim"].copy() + configs["Slim"] = None + fuse_model = build_model(configs) + fuse_model.head = GalleryLayer(configs) + configs["Slim"] = slim_config + quantize_model(configs, fuse_model) + load_dygraph_pretrain(fuse_model, configs["Global"]["pretrained_model"]) + fuse_model.eval() + fuse_model.head.build_gallery_layer(fuse_model) + save_path = configs["Global"]["save_inference_dir"] + fuse_model.quanter.save_quantized_model( + fuse_model, + save_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + configs["Global"]["image_shape"], + dtype='float32') + ]) + + +def main(): + args = parse_args() + configs = parse_config(args.config) + init_logger(name='gallery2fc') + export_fuse_model(configs) + + +if __name__ == '__main__': + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/imagenet1k_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls/utils/imagenet1k_label_list.txt new file mode 100644 index 000000000..376e18021 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/imagenet1k_label_list.txt @@ -0,0 +1,1000 @@ +0 tench, Tinca tinca +1 goldfish, Carassius auratus +2 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +3 tiger shark, Galeocerdo cuvieri +4 hammerhead, hammerhead shark +5 electric ray, crampfish, numbfish, torpedo +6 stingray +7 cock +8 hen +9 ostrich, Struthio camelus +10 brambling, Fringilla montifringilla +11 goldfinch, Carduelis carduelis +12 house finch, linnet, Carpodacus mexicanus +13 junco, snowbird +14 indigo bunting, indigo finch, indigo bird, Passerina cyanea +15 robin, American robin, Turdus migratorius +16 bulbul +17 jay +18 magpie +19 chickadee +20 water ouzel, dipper +21 kite +22 bald eagle, American eagle, Haliaeetus leucocephalus +23 vulture +24 great grey owl, great gray owl, Strix nebulosa +25 European fire salamander, Salamandra salamandra +26 common newt, Triturus vulgaris +27 eft +28 spotted salamander, Ambystoma maculatum +29 axolotl, mud puppy, Ambystoma mexicanum +30 bullfrog, Rana catesbeiana +31 tree frog, tree-frog +32 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +33 loggerhead, loggerhead turtle, Caretta caretta +34 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +35 mud turtle +36 terrapin +37 box turtle, box tortoise +38 banded gecko +39 common iguana, iguana, Iguana iguana +40 American chameleon, anole, Anolis carolinensis +41 whiptail, whiptail lizard +42 agama +43 frilled lizard, Chlamydosaurus kingi +44 alligator lizard +45 Gila monster, Heloderma suspectum +46 green lizard, Lacerta viridis +47 African chameleon, Chamaeleo chamaeleon +48 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +49 African crocodile, Nile crocodile, Crocodylus niloticus +50 American alligator, Alligator mississipiensis +51 triceratops +52 thunder snake, worm snake, Carphophis amoenus +53 ringneck snake, ring-necked snake, ring snake +54 hognose snake, puff adder, sand viper +55 green snake, grass snake +56 king snake, kingsnake +57 garter snake, grass snake +58 water snake +59 vine snake +60 night snake, Hypsiglena torquata +61 boa constrictor, Constrictor constrictor +62 rock python, rock snake, Python sebae +63 Indian cobra, Naja naja +64 green mamba +65 sea snake +66 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +67 diamondback, diamondback rattlesnake, Crotalus adamanteus +68 sidewinder, horned rattlesnake, Crotalus cerastes +69 trilobite +70 harvestman, daddy longlegs, Phalangium opilio +71 scorpion +72 black and gold garden spider, Argiope aurantia +73 barn spider, Araneus cavaticus +74 garden spider, Aranea diademata +75 black widow, Latrodectus mactans +76 tarantula +77 wolf spider, hunting spider +78 tick +79 centipede +80 black grouse +81 ptarmigan +82 ruffed grouse, partridge, Bonasa umbellus +83 prairie chicken, prairie grouse, prairie fowl +84 peacock +85 quail +86 partridge +87 African grey, African gray, Psittacus erithacus +88 macaw +89 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +90 lorikeet +91 coucal +92 bee eater +93 hornbill +94 hummingbird +95 jacamar +96 toucan +97 drake +98 red-breasted merganser, Mergus serrator +99 goose +100 black swan, Cygnus atratus +101 tusker +102 echidna, spiny anteater, anteater +103 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +104 wallaby, brush kangaroo +105 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +106 wombat +107 jellyfish +108 sea anemone, anemone +109 brain coral +110 flatworm, platyhelminth +111 nematode, nematode worm, roundworm +112 conch +113 snail +114 slug +115 sea slug, nudibranch +116 chiton, coat-of-mail shell, sea cradle, polyplacophore +117 chambered nautilus, pearly nautilus, nautilus +118 Dungeness crab, Cancer magister +119 rock crab, Cancer irroratus +120 fiddler crab +121 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +122 American lobster, Northern lobster, Maine lobster, Homarus americanus +123 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +124 crayfish, crawfish, crawdad, crawdaddy +125 hermit crab +126 isopod +127 white stork, Ciconia ciconia +128 black stork, Ciconia nigra +129 spoonbill +130 flamingo +131 little blue heron, Egretta caerulea +132 American egret, great white heron, Egretta albus +133 bittern +134 crane +135 limpkin, Aramus pictus +136 European gallinule, Porphyrio porphyrio +137 American coot, marsh hen, mud hen, water hen, Fulica americana +138 bustard +139 ruddy turnstone, Arenaria interpres +140 red-backed sandpiper, dunlin, Erolia alpina +141 redshank, Tringa totanus +142 dowitcher +143 oystercatcher, oyster catcher +144 pelican +145 king penguin, Aptenodytes patagonica +146 albatross, mollymawk +147 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +148 killer whale, killer, orca, grampus, sea wolf, Orcinus orca +149 dugong, Dugong dugon +150 sea lion +151 Chihuahua +152 Japanese spaniel +153 Maltese dog, Maltese terrier, Maltese +154 Pekinese, Pekingese, Peke +155 Shih-Tzu +156 Blenheim spaniel +157 papillon +158 toy terrier +159 Rhodesian ridgeback +160 Afghan hound, Afghan +161 basset, basset hound +162 beagle +163 bloodhound, sleuthhound +164 bluetick +165 black-and-tan coonhound +166 Walker hound, Walker foxhound +167 English foxhound +168 redbone +169 borzoi, Russian wolfhound +170 Irish wolfhound +171 Italian greyhound +172 whippet +173 Ibizan hound, Ibizan Podenco +174 Norwegian elkhound, elkhound +175 otterhound, otter hound +176 Saluki, gazelle hound +177 Scottish deerhound, deerhound +178 Weimaraner +179 Staffordshire bullterrier, Staffordshire bull terrier +180 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +181 Bedlington terrier +182 Border terrier +183 Kerry blue terrier +184 Irish terrier +185 Norfolk terrier +186 Norwich terrier +187 Yorkshire terrier +188 wire-haired fox terrier +189 Lakeland terrier +190 Sealyham terrier, Sealyham +191 Airedale, Airedale terrier +192 cairn, cairn terrier +193 Australian terrier +194 Dandie Dinmont, Dandie Dinmont terrier +195 Boston bull, Boston terrier +196 miniature schnauzer +197 giant schnauzer +198 standard schnauzer +199 Scotch terrier, Scottish terrier, Scottie +200 Tibetan terrier, chrysanthemum dog +201 silky terrier, Sydney silky +202 soft-coated wheaten terrier +203 West Highland white terrier +204 Lhasa, Lhasa apso +205 flat-coated retriever +206 curly-coated retriever +207 golden retriever +208 Labrador retriever +209 Chesapeake Bay retriever +210 German short-haired pointer +211 vizsla, Hungarian pointer +212 English setter +213 Irish setter, red setter +214 Gordon setter +215 Brittany spaniel +216 clumber, clumber spaniel +217 English springer, English springer spaniel +218 Welsh springer spaniel +219 cocker spaniel, English cocker spaniel, cocker +220 Sussex spaniel +221 Irish water spaniel +222 kuvasz +223 schipperke +224 groenendael +225 malinois +226 briard +227 kelpie +228 komondor +229 Old English sheepdog, bobtail +230 Shetland sheepdog, Shetland sheep dog, Shetland +231 collie +232 Border collie +233 Bouvier des Flandres, Bouviers des Flandres +234 Rottweiler +235 German shepherd, German shepherd dog, German police dog, alsatian +236 Doberman, Doberman pinscher +237 miniature pinscher +238 Greater Swiss Mountain dog +239 Bernese mountain dog +240 Appenzeller +241 EntleBucher +242 boxer +243 bull mastiff +244 Tibetan mastiff +245 French bulldog +246 Great Dane +247 Saint Bernard, St Bernard +248 Eskimo dog, husky +249 malamute, malemute, Alaskan malamute +250 Siberian husky +251 dalmatian, coach dog, carriage dog +252 affenpinscher, monkey pinscher, monkey dog +253 basenji +254 pug, pug-dog +255 Leonberg +256 Newfoundland, Newfoundland dog +257 Great Pyrenees +258 Samoyed, Samoyede +259 Pomeranian +260 chow, chow chow +261 keeshond +262 Brabancon griffon +263 Pembroke, Pembroke Welsh corgi +264 Cardigan, Cardigan Welsh corgi +265 toy poodle +266 miniature poodle +267 standard poodle +268 Mexican hairless +269 timber wolf, grey wolf, gray wolf, Canis lupus +270 white wolf, Arctic wolf, Canis lupus tundrarum +271 red wolf, maned wolf, Canis rufus, Canis niger +272 coyote, prairie wolf, brush wolf, Canis latrans +273 dingo, warrigal, warragal, Canis dingo +274 dhole, Cuon alpinus +275 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +276 hyena, hyaena +277 red fox, Vulpes vulpes +278 kit fox, Vulpes macrotis +279 Arctic fox, white fox, Alopex lagopus +280 grey fox, gray fox, Urocyon cinereoargenteus +281 tabby, tabby cat +282 tiger cat +283 Persian cat +284 Siamese cat, Siamese +285 Egyptian cat +286 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +287 lynx, catamount +288 leopard, Panthera pardus +289 snow leopard, ounce, Panthera uncia +290 jaguar, panther, Panthera onca, Felis onca +291 lion, king of beasts, Panthera leo +292 tiger, Panthera tigris +293 cheetah, chetah, Acinonyx jubatus +294 brown bear, bruin, Ursus arctos +295 American black bear, black bear, Ursus americanus, Euarctos americanus +296 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +297 sloth bear, Melursus ursinus, Ursus ursinus +298 mongoose +299 meerkat, mierkat +300 tiger beetle +301 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +302 ground beetle, carabid beetle +303 long-horned beetle, longicorn, longicorn beetle +304 leaf beetle, chrysomelid +305 dung beetle +306 rhinoceros beetle +307 weevil +308 fly +309 bee +310 ant, emmet, pismire +311 grasshopper, hopper +312 cricket +313 walking stick, walkingstick, stick insect +314 cockroach, roach +315 mantis, mantid +316 cicada, cicala +317 leafhopper +318 lacewing, lacewing fly +319 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +320 damselfly +321 admiral +322 ringlet, ringlet butterfly +323 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +324 cabbage butterfly +325 sulphur butterfly, sulfur butterfly +326 lycaenid, lycaenid butterfly +327 starfish, sea star +328 sea urchin +329 sea cucumber, holothurian +330 wood rabbit, cottontail, cottontail rabbit +331 hare +332 Angora, Angora rabbit +333 hamster +334 porcupine, hedgehog +335 fox squirrel, eastern fox squirrel, Sciurus niger +336 marmot +337 beaver +338 guinea pig, Cavia cobaya +339 sorrel +340 zebra +341 hog, pig, grunter, squealer, Sus scrofa +342 wild boar, boar, Sus scrofa +343 warthog +344 hippopotamus, hippo, river horse, Hippopotamus amphibius +345 ox +346 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +347 bison +348 ram, tup +349 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +350 ibex, Capra ibex +351 hartebeest +352 impala, Aepyceros melampus +353 gazelle +354 Arabian camel, dromedary, Camelus dromedarius +355 llama +356 weasel +357 mink +358 polecat, fitch, foulmart, foumart, Mustela putorius +359 black-footed ferret, ferret, Mustela nigripes +360 otter +361 skunk, polecat, wood pussy +362 badger +363 armadillo +364 three-toed sloth, ai, Bradypus tridactylus +365 orangutan, orang, orangutang, Pongo pygmaeus +366 gorilla, Gorilla gorilla +367 chimpanzee, chimp, Pan troglodytes +368 gibbon, Hylobates lar +369 siamang, Hylobates syndactylus, Symphalangus syndactylus +370 guenon, guenon monkey +371 patas, hussar monkey, Erythrocebus patas +372 baboon +373 macaque +374 langur +375 colobus, colobus monkey +376 proboscis monkey, Nasalis larvatus +377 marmoset +378 capuchin, ringtail, Cebus capucinus +379 howler monkey, howler +380 titi, titi monkey +381 spider monkey, Ateles geoffroyi +382 squirrel monkey, Saimiri sciureus +383 Madagascar cat, ring-tailed lemur, Lemur catta +384 indri, indris, Indri indri, Indri brevicaudatus +385 Indian elephant, Elephas maximus +386 African elephant, Loxodonta africana +387 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +388 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +389 barracouta, snoek +390 eel +391 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +392 rock beauty, Holocanthus tricolor +393 anemone fish +394 sturgeon +395 gar, garfish, garpike, billfish, Lepisosteus osseus +396 lionfish +397 puffer, pufferfish, blowfish, globefish +398 abacus +399 abaya +400 academic gown, academic robe, judge's robe +401 accordion, piano accordion, squeeze box +402 acoustic guitar +403 aircraft carrier, carrier, flattop, attack aircraft carrier +404 airliner +405 airship, dirigible +406 altar +407 ambulance +408 amphibian, amphibious vehicle +409 analog clock +410 apiary, bee house +411 apron +412 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +413 assault rifle, assault gun +414 backpack, back pack, knapsack, packsack, rucksack, haversack +415 bakery, bakeshop, bakehouse +416 balance beam, beam +417 balloon +418 ballpoint, ballpoint pen, ballpen, Biro +419 Band Aid +420 banjo +421 bannister, banister, balustrade, balusters, handrail +422 barbell +423 barber chair +424 barbershop +425 barn +426 barometer +427 barrel, cask +428 barrow, garden cart, lawn cart, wheelbarrow +429 baseball +430 basketball +431 bassinet +432 bassoon +433 bathing cap, swimming cap +434 bath towel +435 bathtub, bathing tub, bath, tub +436 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +437 beacon, lighthouse, beacon light, pharos +438 beaker +439 bearskin, busby, shako +440 beer bottle +441 beer glass +442 bell cote, bell cot +443 bib +444 bicycle-built-for-two, tandem bicycle, tandem +445 bikini, two-piece +446 binder, ring-binder +447 binoculars, field glasses, opera glasses +448 birdhouse +449 boathouse +450 bobsled, bobsleigh, bob +451 bolo tie, bolo, bola tie, bola +452 bonnet, poke bonnet +453 bookcase +454 bookshop, bookstore, bookstall +455 bottlecap +456 bow +457 bow tie, bow-tie, bowtie +458 brass, memorial tablet, plaque +459 brassiere, bra, bandeau +460 breakwater, groin, groyne, mole, bulwark, seawall, jetty +461 breastplate, aegis, egis +462 broom +463 bucket, pail +464 buckle +465 bulletproof vest +466 bullet train, bullet +467 butcher shop, meat market +468 cab, hack, taxi, taxicab +469 caldron, cauldron +470 candle, taper, wax light +471 cannon +472 canoe +473 can opener, tin opener +474 cardigan +475 car mirror +476 carousel, carrousel, merry-go-round, roundabout, whirligig +477 carpenter's kit, tool kit +478 carton +479 car wheel +480 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +481 cassette +482 cassette player +483 castle +484 catamaran +485 CD player +486 cello, violoncello +487 cellular telephone, cellular phone, cellphone, cell, mobile phone +488 chain +489 chainlink fence +490 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +491 chain saw, chainsaw +492 chest +493 chiffonier, commode +494 chime, bell, gong +495 china cabinet, china closet +496 Christmas stocking +497 church, church building +498 cinema, movie theater, movie theatre, movie house, picture palace +499 cleaver, meat cleaver, chopper +500 cliff dwelling +501 cloak +502 clog, geta, patten, sabot +503 cocktail shaker +504 coffee mug +505 coffeepot +506 coil, spiral, volute, whorl, helix +507 combination lock +508 computer keyboard, keypad +509 confectionery, confectionary, candy store +510 container ship, containership, container vessel +511 convertible +512 corkscrew, bottle screw +513 cornet, horn, trumpet, trump +514 cowboy boot +515 cowboy hat, ten-gallon hat +516 cradle +517 crane +518 crash helmet +519 crate +520 crib, cot +521 Crock Pot +522 croquet ball +523 crutch +524 cuirass +525 dam, dike, dyke +526 desk +527 desktop computer +528 dial telephone, dial phone +529 diaper, nappy, napkin +530 digital clock +531 digital watch +532 dining table, board +533 dishrag, dishcloth +534 dishwasher, dish washer, dishwashing machine +535 disk brake, disc brake +536 dock, dockage, docking facility +537 dogsled, dog sled, dog sleigh +538 dome +539 doormat, welcome mat +540 drilling platform, offshore rig +541 drum, membranophone, tympan +542 drumstick +543 dumbbell +544 Dutch oven +545 electric fan, blower +546 electric guitar +547 electric locomotive +548 entertainment center +549 envelope +550 espresso maker +551 face powder +552 feather boa, boa +553 file, file cabinet, filing cabinet +554 fireboat +555 fire engine, fire truck +556 fire screen, fireguard +557 flagpole, flagstaff +558 flute, transverse flute +559 folding chair +560 football helmet +561 forklift +562 fountain +563 fountain pen +564 four-poster +565 freight car +566 French horn, horn +567 frying pan, frypan, skillet +568 fur coat +569 garbage truck, dustcart +570 gasmask, respirator, gas helmet +571 gas pump, gasoline pump, petrol pump, island dispenser +572 goblet +573 go-kart +574 golf ball +575 golfcart, golf cart +576 gondola +577 gong, tam-tam +578 gown +579 grand piano, grand +580 greenhouse, nursery, glasshouse +581 grille, radiator grille +582 grocery store, grocery, food market, market +583 guillotine +584 hair slide +585 hair spray +586 half track +587 hammer +588 hamper +589 hand blower, blow dryer, blow drier, hair dryer, hair drier +590 hand-held computer, hand-held microcomputer +591 handkerchief, hankie, hanky, hankey +592 hard disc, hard disk, fixed disk +593 harmonica, mouth organ, harp, mouth harp +594 harp +595 harvester, reaper +596 hatchet +597 holster +598 home theater, home theatre +599 honeycomb +600 hook, claw +601 hoopskirt, crinoline +602 horizontal bar, high bar +603 horse cart, horse-cart +604 hourglass +605 iPod +606 iron, smoothing iron +607 jack-o'-lantern +608 jean, blue jean, denim +609 jeep, landrover +610 jersey, T-shirt, tee shirt +611 jigsaw puzzle +612 jinrikisha, ricksha, rickshaw +613 joystick +614 kimono +615 knee pad +616 knot +617 lab coat, laboratory coat +618 ladle +619 lampshade, lamp shade +620 laptop, laptop computer +621 lawn mower, mower +622 lens cap, lens cover +623 letter opener, paper knife, paperknife +624 library +625 lifeboat +626 lighter, light, igniter, ignitor +627 limousine, limo +628 liner, ocean liner +629 lipstick, lip rouge +630 Loafer +631 lotion +632 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +633 loupe, jeweler's loupe +634 lumbermill, sawmill +635 magnetic compass +636 mailbag, postbag +637 mailbox, letter box +638 maillot +639 maillot, tank suit +640 manhole cover +641 maraca +642 marimba, xylophone +643 mask +644 matchstick +645 maypole +646 maze, labyrinth +647 measuring cup +648 medicine chest, medicine cabinet +649 megalith, megalithic structure +650 microphone, mike +651 microwave, microwave oven +652 military uniform +653 milk can +654 minibus +655 miniskirt, mini +656 minivan +657 missile +658 mitten +659 mixing bowl +660 mobile home, manufactured home +661 Model T +662 modem +663 monastery +664 monitor +665 moped +666 mortar +667 mortarboard +668 mosque +669 mosquito net +670 motor scooter, scooter +671 mountain bike, all-terrain bike, off-roader +672 mountain tent +673 mouse, computer mouse +674 mousetrap +675 moving van +676 muzzle +677 nail +678 neck brace +679 necklace +680 nipple +681 notebook, notebook computer +682 obelisk +683 oboe, hautboy, hautbois +684 ocarina, sweet potato +685 odometer, hodometer, mileometer, milometer +686 oil filter +687 organ, pipe organ +688 oscilloscope, scope, cathode-ray oscilloscope, CRO +689 overskirt +690 oxcart +691 oxygen mask +692 packet +693 paddle, boat paddle +694 paddlewheel, paddle wheel +695 padlock +696 paintbrush +697 pajama, pyjama, pj's, jammies +698 palace +699 panpipe, pandean pipe, syrinx +700 paper towel +701 parachute, chute +702 parallel bars, bars +703 park bench +704 parking meter +705 passenger car, coach, carriage +706 patio, terrace +707 pay-phone, pay-station +708 pedestal, plinth, footstall +709 pencil box, pencil case +710 pencil sharpener +711 perfume, essence +712 Petri dish +713 photocopier +714 pick, plectrum, plectron +715 pickelhaube +716 picket fence, paling +717 pickup, pickup truck +718 pier +719 piggy bank, penny bank +720 pill bottle +721 pillow +722 ping-pong ball +723 pinwheel +724 pirate, pirate ship +725 pitcher, ewer +726 plane, carpenter's plane, woodworking plane +727 planetarium +728 plastic bag +729 plate rack +730 plow, plough +731 plunger, plumber's helper +732 Polaroid camera, Polaroid Land camera +733 pole +734 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +735 poncho +736 pool table, billiard table, snooker table +737 pop bottle, soda bottle +738 pot, flowerpot +739 potter's wheel +740 power drill +741 prayer rug, prayer mat +742 printer +743 prison, prison house +744 projectile, missile +745 projector +746 puck, hockey puck +747 punching bag, punch bag, punching ball, punchball +748 purse +749 quill, quill pen +750 quilt, comforter, comfort, puff +751 racer, race car, racing car +752 racket, racquet +753 radiator +754 radio, wireless +755 radio telescope, radio reflector +756 rain barrel +757 recreational vehicle, RV, R.V. +758 reel +759 reflex camera +760 refrigerator, icebox +761 remote control, remote +762 restaurant, eating house, eating place, eatery +763 revolver, six-gun, six-shooter +764 rifle +765 rocking chair, rocker +766 rotisserie +767 rubber eraser, rubber, pencil eraser +768 rugby ball +769 rule, ruler +770 running shoe +771 safe +772 safety pin +773 saltshaker, salt shaker +774 sandal +775 sarong +776 sax, saxophone +777 scabbard +778 scale, weighing machine +779 school bus +780 schooner +781 scoreboard +782 screen, CRT screen +783 screw +784 screwdriver +785 seat belt, seatbelt +786 sewing machine +787 shield, buckler +788 shoe shop, shoe-shop, shoe store +789 shoji +790 shopping basket +791 shopping cart +792 shovel +793 shower cap +794 shower curtain +795 ski +796 ski mask +797 sleeping bag +798 slide rule, slipstick +799 sliding door +800 slot, one-armed bandit +801 snorkel +802 snowmobile +803 snowplow, snowplough +804 soap dispenser +805 soccer ball +806 sock +807 solar dish, solar collector, solar furnace +808 sombrero +809 soup bowl +810 space bar +811 space heater +812 space shuttle +813 spatula +814 speedboat +815 spider web, spider's web +816 spindle +817 sports car, sport car +818 spotlight, spot +819 stage +820 steam locomotive +821 steel arch bridge +822 steel drum +823 stethoscope +824 stole +825 stone wall +826 stopwatch, stop watch +827 stove +828 strainer +829 streetcar, tram, tramcar, trolley, trolley car +830 stretcher +831 studio couch, day bed +832 stupa, tope +833 submarine, pigboat, sub, U-boat +834 suit, suit of clothes +835 sundial +836 sunglass +837 sunglasses, dark glasses, shades +838 sunscreen, sunblock, sun blocker +839 suspension bridge +840 swab, swob, mop +841 sweatshirt +842 swimming trunks, bathing trunks +843 swing +844 switch, electric switch, electrical switch +845 syringe +846 table lamp +847 tank, army tank, armored combat vehicle, armoured combat vehicle +848 tape player +849 teapot +850 teddy, teddy bear +851 television, television system +852 tennis ball +853 thatch, thatched roof +854 theater curtain, theatre curtain +855 thimble +856 thresher, thrasher, threshing machine +857 throne +858 tile roof +859 toaster +860 tobacco shop, tobacconist shop, tobacconist +861 toilet seat +862 torch +863 totem pole +864 tow truck, tow car, wrecker +865 toyshop +866 tractor +867 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +868 tray +869 trench coat +870 tricycle, trike, velocipede +871 trimaran +872 tripod +873 triumphal arch +874 trolleybus, trolley coach, trackless trolley +875 trombone +876 tub, vat +877 turnstile +878 typewriter keyboard +879 umbrella +880 unicycle, monocycle +881 upright, upright piano +882 vacuum, vacuum cleaner +883 vase +884 vault +885 velvet +886 vending machine +887 vestment +888 viaduct +889 violin, fiddle +890 volleyball +891 waffle iron +892 wall clock +893 wallet, billfold, notecase, pocketbook +894 wardrobe, closet, press +895 warplane, military plane +896 washbasin, handbasin, washbowl, lavabo, wash-hand basin +897 washer, automatic washer, washing machine +898 water bottle +899 water jug +900 water tower +901 whiskey jug +902 whistle +903 wig +904 window screen +905 window shade +906 Windsor tie +907 wine bottle +908 wing +909 wok +910 wooden spoon +911 wool, woolen, woollen +912 worm fence, snake fence, snake-rail fence, Virginia fence +913 wreck +914 yawl +915 yurt +916 web site, website, internet site, site +917 comic book +918 crossword puzzle, crossword +919 street sign +920 traffic light, traffic signal, stoplight +921 book jacket, dust cover, dust jacket, dust wrapper +922 menu +923 plate +924 guacamole +925 consomme +926 hot pot, hotpot +927 trifle +928 ice cream, icecream +929 ice lolly, lolly, lollipop, popsicle +930 French loaf +931 bagel, beigel +932 pretzel +933 cheeseburger +934 hotdog, hot dog, red hot +935 mashed potato +936 head cabbage +937 broccoli +938 cauliflower +939 zucchini, courgette +940 spaghetti squash +941 acorn squash +942 butternut squash +943 cucumber, cuke +944 artichoke, globe artichoke +945 bell pepper +946 cardoon +947 mushroom +948 Granny Smith +949 strawberry +950 orange +951 lemon +952 fig +953 pineapple, ananas +954 banana +955 jackfruit, jak, jack +956 custard apple +957 pomegranate +958 hay +959 carbonara +960 chocolate sauce, chocolate syrup +961 dough +962 meat loaf, meatloaf +963 pizza, pizza pie +964 potpie +965 burrito +966 red wine +967 espresso +968 cup +969 eggnog +970 alp +971 bubble +972 cliff, drop, drop-off +973 coral reef +974 geyser +975 lakeside, lakeshore +976 promontory, headland, head, foreland +977 sandbar, sand bar +978 seashore, coast, seacoast, sea-coast +979 valley, vale +980 volcano +981 ballplayer, baseball player +982 groom, bridegroom +983 scuba diver +984 rapeseed +985 daisy +986 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +987 corn +988 acorn +989 hip, rose hip, rosehip +990 buckeye, horse chestnut, conker +991 coral fungus +992 agaric +993 gyromitra +994 stinkhorn, carrion fungus +995 earthstar +996 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +997 bolete +998 ear, spike, capitulum +999 toilet tissue, toilet paper, bathroom tissue diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/logger.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/logger.py new file mode 100644 index 000000000..bc8de3640 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/logger.py @@ -0,0 +1,138 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +import logging +import datetime +import paddle.distributed as dist + +_logger = None + + +def init_logger(name='ppcls', log_file=None, log_level=logging.INFO): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified a FileHandler will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + Returns: + logging.Logger: The expected logger. + """ + global _logger + assert _logger is None, "logger should not be initialized twice or more." + _logger = logging.getLogger(name) + + formatter = logging.Formatter( + '[%(asctime)s] %(name)s %(levelname)s: %(message)s', + datefmt="%Y/%m/%d %H:%M:%S") + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setFormatter(formatter) + _logger.addHandler(stream_handler) + if log_file is not None and dist.get_rank() == 0: + log_file_folder = os.path.split(log_file)[0] + os.makedirs(log_file_folder, exist_ok=True) + file_handler = logging.FileHandler(log_file, 'a') + file_handler.setFormatter(formatter) + _logger.addHandler(file_handler) + if dist.get_rank() == 0: + _logger.setLevel(log_level) + else: + _logger.setLevel(logging.ERROR) + _logger.propagate = False + + +def log_at_trainer0(log): + """ + logs will print multi-times when calling Fleet API. + Only display single log and ignore the others. + """ + + def wrapper(fmt, *args): + if dist.get_rank() == 0: + log(fmt, *args) + + return wrapper + + +@log_at_trainer0 +def info(fmt, *args): + _logger.info(fmt, *args) + + +@log_at_trainer0 +def debug(fmt, *args): + _logger.debug(fmt, *args) + + +@log_at_trainer0 +def warning(fmt, *args): + _logger.warning(fmt, *args) + + +@log_at_trainer0 +def error(fmt, *args): + _logger.error(fmt, *args) + + +def scaler(name, value, step, writer): + """ + This function will draw a scalar curve generated by the visualdl. + Usage: Install visualdl: pip3 install visualdl==2.0.0b4 + and then: + visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 + to preview loss corve in real time. + """ + if writer is None: + return + writer.add_scalar(tag=name, step=step, value=value) + + +def advertise(): + """ + Show the advertising message like the following: + + =========================================================== + == PaddleClas is powered by PaddlePaddle ! == + =========================================================== + == == + == For more info please go to the following website. == + == == + == https://github.com/PaddlePaddle/PaddleClas == + =========================================================== + + """ + copyright = "PaddleClas is powered by PaddlePaddle !" + ad = "For more info please go to the following website." + website = "https://github.com/PaddlePaddle/PaddleClas" + AD_LEN = 6 + len(max([copyright, ad, website], key=len)) + + info("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( + "=" * (AD_LEN + 4), + "=={}==".format(copyright.center(AD_LEN)), + "=" * (AD_LEN + 4), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(ad.center(AD_LEN)), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(website.center(AD_LEN)), + "=" * (AD_LEN + 4), )) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/metrics.py new file mode 100644 index 000000000..b0db68a75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/metrics.py @@ -0,0 +1,107 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.metrics import precision_recall_fscore_support +from sklearn.metrics import average_precision_score +from sklearn.preprocessing import binarize + +import numpy as np + +__all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"] + + +def multi_hot_encode(logits, threshold=0.5): + """ + Encode logits to multi-hot by elementwise for multilabel + """ + + return binarize(logits, threshold=threshold) + + +def hamming_distance(output, target): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + return hamming_loss(target, output) + + +def accuracy_score(output, target, base="sample"): + """ + Hard metric for multilabel classification + Args: + output: + target: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + assert base in ["sample", "label"], 'must be one of ["sample", "label"]' + + if base == "sample": + accuracy = accuracy_metric(target, output) + elif base == "label": + mcm = multilabel_confusion_matrix(target, output) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + + accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps)) + + return accuracy + + +def precision_recall_fscore(output, target): + """ + Metric based label for multilabel classification + Returns: + precisions: + recalls: + fscores: + """ + + precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output) + + return precisions, recalls, fscores + + +def mean_average_precision(logits, target): + """ + Calculate average precision + Args: + logits: probability from network before sigmoid or softmax + target: ground truth, 0 or 1 + """ + if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError("logits and target should be np.ndarray.") + + aps = [] + for i in range(target.shape[1]): + ap = average_precision_score(target[:, i], logits[:, i]) + aps.append(ap) + + return np.mean(aps) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/misc.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/misc.py new file mode 100644 index 000000000..08ab7b6f7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/misc.py @@ -0,0 +1,63 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['AverageMeter'] + + +class AverageMeter(object): + """ + Computes and stores the average and current value + Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + + def __init__(self, name='', fmt='f', postfix="", need_avg=True): + self.name = name + self.fmt = fmt + self.postfix = postfix + self.need_avg = need_avg + self.reset() + + def reset(self): + """ reset """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ update """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + @property + def total(self): + return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format( + self=self) + + @property + def total_minute(self): + return '{self.name} {s:{self.fmt}}{self.postfix} min'.format( + s=self.sum / 60, self=self) + + @property + def mean(self): + return '{self.name}: {self.avg:{self.fmt}}{self.postfix}'.format( + self=self) if self.need_avg else '' + + @property + def value(self): + return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format( + self=self) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/model_zoo.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/model_zoo.py new file mode 100644 index 000000000..fc527f6a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import requests +import shutil +import tarfile +import tqdm +import zipfile + +from ppcls.arch import similar_architectures +from ppcls.utils import logger + +__all__ = ['get'] + +DOWNLOAD_RETRY_LIMIT = 3 + + +class UrlError(Exception): + """ UrlError + """ + + def __init__(self, url='', code=''): + message = "Downloading from {} failed with code {}!".format(url, code) + super(UrlError, self).__init__(message) + + +class ModelNameError(Exception): + """ ModelNameError + """ + + def __init__(self, message=''): + super(ModelNameError, self).__init__(message) + + +class RetryError(Exception): + """ RetryError + """ + + def __init__(self, url='', times=''): + message = "Download from {} failed. Retry({}) limit reached".format( + url, times) + super(RetryError, self).__init__(message) + + +def _get_url(architecture, postfix="pdparams"): + prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/" + fname = architecture + "_pretrained." + postfix + return prefix + fname + + +def _move_and_merge_tree(src, dst): + """ + Move src directory to dst, if dst is already exists, + merge src to dst + """ + if not os.path.exists(dst): + shutil.move(src, dst) + elif os.path.isfile(src): + shutil.move(src, dst) + else: + for fp in os.listdir(src): + src_fp = os.path.join(src, fp) + dst_fp = os.path.join(dst, fp) + if os.path.isdir(src_fp): + if os.path.isdir(dst_fp): + _move_and_merge_tree(src_fp, dst_fp) + else: + shutil.move(src_fp, dst_fp) + elif os.path.isfile(src_fp) and \ + not os.path.isfile(dst_fp): + shutil.move(src_fp, dst_fp) + + +def _download(url, path): + """ + Download from url, save to path. + url (str): download url + path (str): download to given path + """ + if not os.path.exists(path): + os.makedirs(path) + + fname = os.path.split(url)[-1] + fullname = os.path.join(path, fname) + retry_cnt = 0 + + while not os.path.exists(fullname): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RetryError(url, DOWNLOAD_RETRY_LIMIT) + + logger.info("Downloading {} from {}".format(fname, url)) + + req = requests.get(url, stream=True) + if req.status_code != 200: + raise UrlError(url, req.status_code) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + for chunk in tqdm.tqdm( + req.iter_content(chunk_size=1024), + total=(int(total_size) + 1023) // 1024, + unit='KB'): + f.write(chunk) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + fpath = os.path.split(fname)[0] + fpath_tmp = os.path.join(fpath, 'tmp') + if os.path.isdir(fpath_tmp): + shutil.rmtree(fpath_tmp) + os.makedirs(fpath_tmp) + + if fname.find('tar') >= 0: + with tarfile.open(fname) as tf: + tf.extractall(path=fpath_tmp) + elif fname.find('zip') >= 0: + with zipfile.ZipFile(fname) as zf: + zf.extractall(path=fpath_tmp) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + fs = os.listdir(fpath_tmp) + assert len( + fs + ) == 1, "There should just be 1 pretrained path in an archive file but got {}.".format( + len(fs)) + + f = fs[0] + src_dir = os.path.join(fpath_tmp, f) + dst_dir = os.path.join(fpath, f) + _move_and_merge_tree(src_dir, dst_dir) + + shutil.rmtree(fpath_tmp) + os.remove(fname) + + return f + + +def _get_pretrained(): + with open('./ppcls/utils/pretrained.list') as flist: + pretrained = [line.strip() for line in flist] + return pretrained + + +def _check_pretrained_name(architecture): + assert isinstance(architecture, str), \ + ("the type of architecture({}) should be str". format(architecture)) + pretrained = _get_pretrained() + similar_names = similar_architectures(architecture, pretrained) + model_list = ', '.join(similar_names) + err = "{} is not exist! Maybe you want: [{}]" \ + "".format(architecture, model_list) + if architecture not in similar_names: + raise ModelNameError(err) + + +def list_models(): + pretrained = _get_pretrained() + msg = "All avialable pretrained models are as follows: {}".format( + pretrained) + logger.info(msg) + return + + +def get(architecture, path, decompress=False, postfix="pdparams"): + """ + Get the pretrained model. + """ + _check_pretrained_name(architecture) + url = _get_url(architecture, postfix=postfix) + fname = _download(url, path) + if postfix == "tar" and decompress: + _decompress(fname) + logger.info("download {} finished ".format(fname)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/pretrained.list b/cv/classification/resnet50/paddlepaddle/ppcls/utils/pretrained.list new file mode 100644 index 000000000..36d70f5a2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/pretrained.list @@ -0,0 +1,121 @@ +ResNet18 +ResNet34 +ResNet50 +ResNet101 +ResNet152 +ResNet50_vc +ResNet18_vd +ResNet34_vd +ResNet50_vd +ResNet50_vd_v2 +ResNet101_vd +ResNet152_vd +ResNet200_vd +ResNet50_vd_ssld +ResNet50_vd_ssld_v2 +Fix_ResNet50_vd_ssld_v2 +ResNet101_vd_ssld +MobileNetV3_large_x0_35 +MobileNetV3_large_x0_5 +MobileNetV3_large_x0_75 +MobileNetV3_large_x1_0 +MobileNetV3_large_x1_25 +MobileNetV3_small_x0_35 +MobileNetV3_small_x0_5 +MobileNetV3_small_x0_75 +MobileNetV3_small_x1_0 +MobileNetV3_small_x1_25 +MobileNetV3_large_x1_0_ssld +MobileNetV3_large_x1_0_ssld_int8 +MobileNetV3_small_x1_0_ssld +MobileNetV2_x0_25 +MobileNetV2_x0_5 +MobileNetV2_x0_75 +MobileNetV2 +MobileNetV2_x1_5 +MobileNetV2_x2_0 +MobileNetV2_ssld +MobileNetV1_x0_25 +MobileNetV1_x0_5 +MobileNetV1_x0_75 +MobileNetV1 +MobileNetV1_ssld +ShuffleNetV2_x0_25 +ShuffleNetV2_x0_33 +ShuffleNetV2_x0_5 +ShuffleNetV2 +ShuffleNetV2_x1_5 +ShuffleNetV2_x2_0 +ShuffleNetV2_swish +ResNeXt50_32x4d +ResNeXt50_64x4d +ResNeXt101_32x4d +ResNeXt101_64x4d +ResNeXt152_32x4d +ResNeXt152_64x4d +ResNeXt50_vd_32x4d +ResNeXt50_vd_64x4d +ResNeXt101_vd_32x4d +ResNeXt101_vd_64x4d +ResNeXt152_vd_32x4d +ResNeXt152_vd_64x4d +SE_ResNet18_vd +SE_ResNet34_vd +SE_ResNet50_vd +SE_ResNeXt50_32x4d +SE_ResNeXt101_32x4d +SE_ResNeXt50_vd_32x4d +SENet154_vd +Res2Net50_26w_4s +Res2Net50_vd_26w_4s +Res2Net50_14w_8s +Res2Net101_vd_26w_4s +Res2Net200_vd_26w_4s +GoogLeNet +InceptionV4 +Xception41 +Xception41_deeplab +Xception65 +Xception65_deeplab +Xception71 +HRNet_W18_C +HRNet_W30_C +HRNet_W32_C +HRNet_W40_C +HRNet_W44_C +HRNet_W48_C +HRNet_W64_C +DPN68 +DPN92 +DPN98 +DPN107 +DPN131 +DenseNet121 +DenseNet161 +DenseNet169 +DenseNet201 +DenseNet264 +EfficientNetB0_small +EfficientNetB0 +EfficientNetB1 +EfficientNetB2 +EfficientNetB3 +EfficientNetB4 +EfficientNetB5 +EfficientNetB6 +EfficientNetB7 +ResNeXt101_32x8d_wsl +ResNeXt101_32x16d_wsl +ResNeXt101_32x32d_wsl +ResNeXt101_32x48d_wsl +Fix_ResNeXt101_32x48d_wsl +AlexNet +SqueezeNet1_0 +SqueezeNet1_1 +VGG11 +VGG13 +VGG16 +VGG19 +DarkNet53_ImageNet1k +ResNet50_ACNet_deploy +CSPResNet50_leaky diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/profiler.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/profiler.py new file mode 100644 index 000000000..7cf945a26 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/profiler.py @@ -0,0 +1,111 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle + +# A global variable to record the number of calling times for profiler +# functions. It is used to specify the tracing range of training steps. +_profiler_step_id = 0 + +# A global variable to avoid parsing from string every time. +_profiler_options = None + + +class ProfilerOptions(object): + ''' + Use a string to initialize a ProfilerOptions. + The string should be in the format: "key1=value1;key2=value;key3=value3". + For example: + "profile_path=model.profile" + "batch_range=[50, 60]; profile_path=model.profile" + "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile" + + ProfilerOptions supports following key-value pair: + batch_range - a integer list, e.g. [100, 110]. + state - a string, the optional values are 'CPU', 'GPU' or 'All'. + sorted_key - a string, the optional values are 'calls', 'total', + 'max', 'min' or 'ave. + tracer_option - a string, the optional values are 'Default', 'OpDetail', + 'AllOpDetail'. + profile_path - a string, the path to save the serialized profile data, + which can be used to generate a timeline. + exit_on_finished - a boolean. + ''' + + def __init__(self, options_str): + assert isinstance(options_str, str) + + self._options = { + 'batch_range': [10, 20], + 'state': 'All', + 'sorted_key': 'total', + 'tracer_option': 'Default', + 'profile_path': '/tmp/profile', + 'exit_on_finished': True + } + self._parse_from_string(options_str) + + def _parse_from_string(self, options_str): + for kv in options_str.replace(' ', '').split(';'): + key, value = kv.split('=') + if key == 'batch_range': + value_list = value.replace('[', '').replace(']', '').split(',') + value_list = list(map(int, value_list)) + if len(value_list) >= 2 and value_list[0] >= 0 and value_list[ + 1] > value_list[0]: + self._options[key] = value_list + elif key == 'exit_on_finished': + self._options[key] = value.lower() in ("yes", "true", "t", "1") + elif key in [ + 'state', 'sorted_key', 'tracer_option', 'profile_path' + ]: + self._options[key] = value + + def __getitem__(self, name): + if self._options.get(name, None) is None: + raise ValueError( + "ProfilerOptions does not have an option named %s." % name) + return self._options[name] + + +def add_profiler_step(options_str=None): + ''' + Enable the operator-level timing using PaddlePaddle's profiler. + The profiler uses a independent variable to count the profiler steps. + One call of this function is treated as a profiler step. + + Args: + profiler_options - a string to initialize the ProfilerOptions. + Default is None, and the profiler is disabled. + ''' + if options_str is None: + return + + global _profiler_step_id + global _profiler_options + + if _profiler_options is None: + _profiler_options = ProfilerOptions(options_str) + + if _profiler_step_id == _profiler_options['batch_range'][0]: + paddle.utils.profiler.start_profiler( + _profiler_options['state'], _profiler_options['tracer_option']) + elif _profiler_step_id == _profiler_options['batch_range'][1]: + paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'], + _profiler_options['profile_path']) + if _profiler_options['exit_on_finished']: + sys.exit(0) + + _profiler_step_id += 1 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls/utils/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls/utils/save_load.py new file mode 100644 index 000000000..625a28483 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls/utils/save_load.py @@ -0,0 +1,136 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle +from ppcls.utils import logger +from .download import get_weights_path_from_url + +__all__ = ['init_model', 'save_model', 'load_dygraph_pretrain'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def load_dygraph_pretrain(model, path=None): + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + param_state_dict = paddle.load(path + ".pdparams") + model.set_dict(param_state_dict) + return + + +def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld=False): + if use_ssld: + pretrained_url = pretrained_url.replace("_pretrained", + "_ssld_pretrained") + local_weight_path = get_weights_path_from_url(pretrained_url).replace( + ".pdparams", "") + load_dygraph_pretrain(model, path=local_weight_path) + return + + +def load_distillation_model(model, pretrained_model): + logger.info("In distillation mode, teacher model will be " + "loaded firstly before student model.") + + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + + teacher = model.teacher if hasattr(model, + "teacher") else model._layers.teacher + student = model.student if hasattr(model, + "student") else model._layers.student + load_dygraph_pretrain(teacher, path=pretrained_model[0]) + logger.info("Finish initing teacher model from {}".format( + pretrained_model)) + # load student model + if len(pretrained_model) >= 2: + load_dygraph_pretrain(student, path=pretrained_model[1]) + logger.info("Finish initing student model from {}".format( + pretrained_model)) + + +def init_model(config, net, optimizer=None): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints and optimizer is not None: + assert os.path.exists(checkpoints + ".pdparams"), \ + "Given dir {}.pdparams not exist.".format(checkpoints) + assert os.path.exists(checkpoints + ".pdopt"), \ + "Given dir {}.pdopt not exist.".format(checkpoints) + para_dict = paddle.load(checkpoints + ".pdparams") + opti_dict = paddle.load(checkpoints + ".pdopt") + metric_dict = paddle.load(checkpoints + ".pdstates") + net.set_dict(para_dict) + optimizer.set_state_dict(opti_dict) + logger.info("Finish load checkpoints from {}".format(checkpoints)) + return metric_dict + + pretrained_model = config.get('pretrained_model') + use_distillation = config.get('use_distillation', False) + if pretrained_model: + if use_distillation: + load_distillation_model(net, pretrained_model) + else: # common load + load_dygraph_pretrain(net, path=pretrained_model) + logger.info( + logger.coloring("Finish load pretrained model from {}".format( + pretrained_model), "HEADER")) + + +def save_model(net, + optimizer, + metric_info, + model_path, + model_name="", + prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, model_name) + _mkdir_if_not_exist(model_path) + model_path = os.path.join(model_path, prefix) + + paddle.save(net.state_dict(), model_path + ".pdparams") + paddle.save(optimizer.state_dict(), model_path + ".pdopt") + paddle.save(metric_info, model_path + ".pdstates") + logger.info("Already save model in {}".format(model_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/__init__.py new file mode 100644 index 000000000..d6cdb6f8f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer + +from .arch import * +from .optimizer import * +from .data import * +from .utils import * diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/__init__.py new file mode 100644 index 000000000..2d5e29db8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/__init__.py @@ -0,0 +1,134 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import copy +import importlib + +import paddle.nn as nn +from paddle.jit import to_static +from paddle.static import InputSpec + +from . import backbone, gears +from .backbone import * +from .gears import build_gear +from .utils import * +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils import logger +from ppcls.utils.save_load import load_dygraph_pretrain +from ppcls.arch.slim import prune_model, quantize_model + +__all__ = ["build_model", "RecModel", "DistillationModel"] + + +def build_model(config): + arch_config = copy.deepcopy(config["Arch"]) + model_type = arch_config.pop("name") + mod = importlib.import_module(__name__) + arch = getattr(mod, model_type)(**arch_config) + if isinstance(arch, TheseusLayer): + prune_model(config, arch) + quantize_model(config, arch) + return arch + + +def apply_to_static(config, model): + support_to_static = config['Global'].get('to_static', False) + + if support_to_static: + specs = None + if 'image_shape' in config['Global']: + specs = [InputSpec([None] + config['Global']['image_shape'])] + model = to_static(model, input_spec=specs) + logger.info("Successfully to apply @to_static with specs: {}".format( + specs)) + return model + + +class RecModel(TheseusLayer): + def __init__(self, **config): + super().__init__() + backbone_config = config["Backbone"] + backbone_name = backbone_config.pop("name") + self.backbone = eval(backbone_name)(**backbone_config) + if "BackboneStopLayer" in config: + backbone_stop_layer = config["BackboneStopLayer"]["name"] + self.backbone.stop_after(backbone_stop_layer) + + if "Neck" in config: + self.neck = build_gear(config["Neck"]) + else: + self.neck = None + + if "Head" in config: + self.head = build_gear(config["Head"]) + else: + self.head = None + + def forward(self, x, label=None): + out = dict() + x = self.backbone(x) + out["backbone"] = x + if self.neck is not None: + x = self.neck(x) + out["neck"] = x + out["features"] = x + if self.head is not None: + y = self.head(x, label) + out["logits"] = y + return out + + +class DistillationModel(nn.Layer): + def __init__(self, + models=None, + pretrained_list=None, + freeze_params_list=None, + **kargs): + super().__init__() + assert isinstance(models, list) + self.model_list = [] + self.model_name_list = [] + if pretrained_list is not None: + assert len(pretrained_list) == len(models) + + if freeze_params_list is None: + freeze_params_list = [False] * len(models) + assert len(freeze_params_list) == len(models) + for idx, model_config in enumerate(models): + assert len(model_config) == 1 + key = list(model_config.keys())[0] + model_config = model_config[key] + model_name = model_config.pop("name") + model = eval(model_name)(**model_config) + + if freeze_params_list[idx]: + for param in model.parameters(): + param.trainable = False + self.model_list.append(self.add_sublayer(key, model)) + self.model_name_list.append(key) + + if pretrained_list is not None: + for idx, pretrained in enumerate(pretrained_list): + if pretrained is not None: + load_dygraph_pretrain( + self.model_name_list[idx], path=pretrained) + + def forward(self, x, label=None): + result_dict = dict() + for idx, model_name in enumerate(self.model_name_list): + if label is None: + result_dict[model_name] = self.model_list[idx](x) + else: + result_dict[model_name] = self.model_list[idx](x, label) + return result_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/__init__.py new file mode 100644 index 000000000..74d266414 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/__init__.py @@ -0,0 +1,34 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import inspect + +from ppcls.arch.backbone.legendary_models.resnet import ResNet50, ResNet50_vd +from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc + +# help whl get all the models' api (class type) and components' api (func type) +def get_apis(): + current_func = sys._getframe().f_code.co_name + current_module = sys.modules[__name__] + api = [] + for _, obj in inspect.getmembers(current_module, + inspect.isclass) + inspect.getmembers( + current_module, inspect.isfunction): + api.append(obj.__name__) + api.remove(current_func) + return api + + +__all__ = get_apis() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/theseus_layer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/theseus_layer.py new file mode 100644 index 000000000..908d94445 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/base/theseus_layer.py @@ -0,0 +1,301 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, List, Dict, Union, Callable, Any + +from paddle import nn +from ppcls.utils import logger + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, inputs): + return inputs + + +class TheseusLayer(nn.Layer): + def __init__(self, *args, **kwargs): + super(TheseusLayer, self).__init__() + self.res_dict = {} + self.res_name = self.full_name() + self.pruner = None + self.quanter = None + + def _return_dict_hook(self, layer, input, output): + res_dict = {"output": output} + # 'list' is needed to avoid error raised by popping self.res_dict + for res_key in list(self.res_dict): + # clear the res_dict because the forward process may change according to input + res_dict[res_key] = self.res_dict.pop(res_key) + return res_dict + + def init_res(self, + stages_pattern, + return_patterns=None, + return_stages=None): + if return_patterns and return_stages: + msg = f"The 'return_patterns' would be ignored when 'return_stages' is set." + logger.warning(msg) + return_stages = None + + if return_stages is True: + return_patterns = stages_pattern + # return_stages is int or bool + if type(return_stages) is int: + return_stages = [return_stages] + if isinstance(return_stages, list): + if max(return_stages) > len(stages_pattern) or min( + return_stages) < 0: + msg = f"The 'return_stages' set error. Illegal value(s) have been ignored. The stages' pattern list is {stages_pattern}." + logger.warning(msg) + return_stages = [ + val for val in return_stages + if val >= 0 and val < len(stages_pattern) + ] + return_patterns = [stages_pattern[i] for i in return_stages] + + if return_patterns: + self.update_res(return_patterns) + + def replace_sub(self, *args, **kwargs) -> None: + msg = "The function 'replace_sub()' is deprecated, please use 'upgrade_sublayer()' instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) + + def upgrade_sublayer(self, + layer_name_pattern: Union[str, List[str]], + handle_func: Callable[[nn.Layer, str], nn.Layer] + ) -> Dict[str, nn.Layer]: + """use 'handle_func' to modify the sub-layer(s) specified by 'layer_name_pattern'. + + Args: + layer_name_pattern (Union[str, List[str]]): The name of layer to be modified by 'handle_func'. + handle_func (Callable[[nn.Layer, str], nn.Layer]): The function to modify target layer specified by 'layer_name_pattern'. The formal params are the layer(nn.Layer) and pattern(str) that is (a member of) layer_name_pattern (when layer_name_pattern is List type). And the return is the layer processed. + + Returns: + Dict[str, nn.Layer]: The key is the pattern and corresponding value is the result returned by 'handle_func()'. + + Examples: + + from paddle import nn + import paddleclas + + def rep_func(layer: nn.Layer, pattern: str): + new_layer = nn.Conv2D( + in_channels=layer._in_channels, + out_channels=layer._out_channels, + kernel_size=5, + padding=2 + ) + return new_layer + + net = paddleclas.MobileNetV1() + res = net.replace_sub(layer_name_pattern=["blocks[11].depthwise_conv.conv", "blocks[12].depthwise_conv.conv"], handle_func=rep_func) + print(res) + # {'blocks[11].depthwise_conv.conv': the corresponding new_layer, 'blocks[12].depthwise_conv.conv': the corresponding new_layer} + """ + + if not isinstance(layer_name_pattern, list): + layer_name_pattern = [layer_name_pattern] + + hit_layer_pattern_list = [] + for pattern in layer_name_pattern: + # parse pattern to find target layer and its parent + layer_list = parse_pattern_str(pattern=pattern, parent_layer=self) + if not layer_list: + continue + sub_layer_parent = layer_list[-2]["layer"] if len( + layer_list) > 1 else self + + sub_layer = layer_list[-1]["layer"] + sub_layer_name = layer_list[-1]["name"] + sub_layer_index = layer_list[-1]["index"] + + new_sub_layer = handle_func(sub_layer, pattern) + + if sub_layer_index: + getattr(sub_layer_parent, + sub_layer_name)[sub_layer_index] = new_sub_layer + else: + setattr(sub_layer_parent, sub_layer_name, new_sub_layer) + + hit_layer_pattern_list.append(pattern) + return hit_layer_pattern_list + + def stop_after(self, stop_layer_name: str) -> bool: + """stop forward and backward after 'stop_layer_name'. + + Args: + stop_layer_name (str): The name of layer that stop forward and backward after this layer. + + Returns: + bool: 'True' if successful, 'False' otherwise. + """ + + layer_list = parse_pattern_str(stop_layer_name, self) + if not layer_list: + return False + + parent_layer = self + for layer_dict in layer_list: + name, index = layer_dict["name"], layer_dict["index"] + if not set_identity(parent_layer, name, index): + msg = f"Failed to set the layers that after stop_layer_name('{stop_layer_name}') to IdentityLayer. The error layer's name is '{name}'." + logger.warning(msg) + return False + parent_layer = layer_dict["layer"] + + return True + + def update_res( + self, + return_patterns: Union[str, List[str]]) -> Dict[str, nn.Layer]: + """update the result(s) to be returned. + + Args: + return_patterns (Union[str, List[str]]): The name of layer to return output. + + Returns: + Dict[str, nn.Layer]: The pattern(str) and corresponding layer(nn.Layer) that have been set successfully. + """ + + # clear res_dict that could have been set + self.res_dict = {} + + class Handler(object): + def __init__(self, res_dict): + # res_dict is a reference + self.res_dict = res_dict + + def __call__(self, layer, pattern): + layer.res_dict = self.res_dict + layer.res_name = pattern + if hasattr(layer, "hook_remove_helper"): + layer.hook_remove_helper.remove() + layer.hook_remove_helper = layer.register_forward_post_hook( + save_sub_res_hook) + return layer + + handle_func = Handler(self.res_dict) + + hit_layer_pattern_list = self.upgrade_sublayer( + return_patterns, handle_func=handle_func) + + if hasattr(self, "hook_remove_helper"): + self.hook_remove_helper.remove() + self.hook_remove_helper = self.register_forward_post_hook( + self._return_dict_hook) + + return hit_layer_pattern_list + + +def save_sub_res_hook(layer, input, output): + layer.res_dict[layer.res_name] = output + + +def set_identity(parent_layer: nn.Layer, + layer_name: str, + layer_index: str=None) -> bool: + """set the layer specified by layer_name and layer_index to Indentity. + + Args: + parent_layer (nn.Layer): The parent layer of target layer specified by layer_name and layer_index. + layer_name (str): The name of target layer to be set to Indentity. + layer_index (str, optional): The index of target layer to be set to Indentity in parent_layer. Defaults to None. + + Returns: + bool: True if successfully, False otherwise. + """ + + stop_after = False + for sub_layer_name in parent_layer._sub_layers: + if stop_after: + parent_layer._sub_layers[sub_layer_name] = Identity() + continue + if sub_layer_name == layer_name: + stop_after = True + + if layer_index and stop_after: + stop_after = False + for sub_layer_index in parent_layer._sub_layers[ + layer_name]._sub_layers: + if stop_after: + parent_layer._sub_layers[layer_name][ + sub_layer_index] = Identity() + continue + if layer_index == sub_layer_index: + stop_after = True + + return stop_after + + +def parse_pattern_str(pattern: str, parent_layer: nn.Layer) -> Union[ + None, List[Dict[str, Union[nn.Layer, str, None]]]]: + """parse the string type pattern. + + Args: + pattern (str): The pattern to discribe layer. + parent_layer (nn.Layer): The root layer relative to the pattern. + + Returns: + Union[None, List[Dict[str, Union[nn.Layer, str, None]]]]: None if failed. If successfully, the members are layers parsed in order: + [ + {"layer": first layer, "name": first layer's name parsed, "index": first layer's index parsed if exist}, + {"layer": second layer, "name": second layer's name parsed, "index": second layer's index parsed if exist}, + ... + ] + """ + + pattern_list = pattern.split(".") + if not pattern_list: + msg = f"The pattern('{pattern}') is illegal. Please check and retry." + logger.warning(msg) + return None + + layer_list = [] + while len(pattern_list) > 0: + if '[' in pattern_list[0]: + target_layer_name = pattern_list[0].split('[')[0] + target_layer_index = pattern_list[0].split('[')[1].split(']')[0] + else: + target_layer_name = pattern_list[0] + target_layer_index = None + + target_layer = getattr(parent_layer, target_layer_name, None) + + if target_layer is None: + msg = f"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}')." + logger.warning(msg) + return None + + if target_layer_index and target_layer: + if int(target_layer_index) < 0 or int(target_layer_index) >= len( + target_layer): + msg = f"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0." + logger.warning(msg) + return None + + target_layer = target_layer[target_layer_index] + + layer_list.append({ + "layer": target_layer, + "name": target_layer_name, + "index": target_layer_index + }) + + pattern_list = pattern_list[1:] + parent_layer = target_layer + return layer_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/__init__.py new file mode 100644 index 000000000..550e5544c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/__init__.py @@ -0,0 +1,8 @@ +from .resnet import ResNet50 + +# from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd +# from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W64_C +# from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +# from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +# from .inception_v3 import InceptionV3 +# from .vgg import VGG11, VGG13, VGG16, VGG19 \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/resnet.py new file mode 100644 index 000000000..74c5c5fa6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/legendary_models/resnet.py @@ -0,0 +1,591 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = { + "ResNet18": ["blocks[1]", "blocks[3]", "blocks[5]", "blocks[7]"], + "ResNet34": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet50": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet101": ["blocks[2]", "blocks[6]", "blocks[29]", "blocks[32]"], + "ResNet152": ["blocks[2]", "blocks[10]", "blocks[46]", "blocks[49]"], + "ResNet200": ["blocks[2]", "blocks[14]", "blocks[62]", "blocks[65]"] +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None, + return_stages=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["200"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet200"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/resnet_vc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/resnet_vc.py new file mode 100644 index 000000000..6b972dc7b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/model_zoo/resnet_vc.py @@ -0,0 +1,309 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet50_vc": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vc_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + return y + + +class ResNet_vc(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(ResNet_vc, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet50_vc(pretrained=False, use_ssld=False, **kwargs): + model = ResNet_vc(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNet50_vc"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/__init__.py new file mode 100644 index 000000000..ae9549246 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/__init__.py @@ -0,0 +1,3 @@ +from .resnet_variant import ResNet50_last_stage_stride1 +# from .vgg_variant import VGG19Sigmoid +# from .pp_lcnet_variant import PPLCNet_x2_5_Tanh \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/resnet_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/resnet_variant.py new file mode 100644 index 000000000..0219344b1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/backbone/variant_models/resnet_variant.py @@ -0,0 +1,23 @@ +from paddle.nn import Conv2D +from ppcls.arch.backbone.legendary_models.resnet import ResNet50, MODEL_URLS, _load_pretrained + +__all__ = ["ResNet50_last_stage_stride1"] + + +def ResNet50_last_stage_stride1(pretrained=False, use_ssld=False, **kwargs): + def replace_function(conv, pattern): + new_conv = Conv2D( + in_channels=conv._in_channels, + out_channels=conv._out_channels, + kernel_size=conv._kernel_size, + stride=1, + padding=conv._padding, + groups=conv._groups, + bias_attr=conv._bias_attr) + return new_conv + + pattern = ["blocks[13].conv1.conv", "blocks[13].short.conv"] + model = ResNet50(pretrained=False, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/__init__.py new file mode 100644 index 000000000..75ca41d8a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .arcmargin import ArcMargin +from .cosmargin import CosMargin +from .circlemargin import CircleMargin +from .fc import FC +from .vehicle_neck import VehicleNeck + +__all__ = ['build_gear'] + + +def build_gear(config): + support_dict = [ + 'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck' + ] + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'head only support {}'.format(support_dict)) + module_class = eval(module_name)(**config) + return module_class diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/arcmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/arcmargin.py new file mode 100644 index 000000000..22cc76e1d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/arcmargin.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import math + + +class ArcMargin(nn.Layer): + def __init__(self, + embedding_size, + class_num, + margin=0.5, + scale=80.0, + easy_margin=False): + super().__init__() + self.embedding_size = embedding_size + self.class_num = class_num + self.margin = margin + self.scale = scale + self.easy_margin = easy_margin + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label=None): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6) + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + phi = cos * cos_m - sin * sin_m + + th = math.cos(self.margin) * (-1) + mm = math.sin(self.margin) * self.margin + if self.easy_margin: + phi = self._paddle_where_more_than(cos, 0, phi, cos) + else: + phi = self._paddle_where_more_than(cos, th, phi, cos - mm) + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, phi) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output + + def _paddle_where_more_than(self, target, limit, x, y): + mask = paddle.cast(x=(target > limit), dtype='float32') + output = paddle.multiply(mask, x) + paddle.multiply((1.0 - mask), y) + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/circlemargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/circlemargin.py new file mode 100644 index 000000000..d1bce83cb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/circlemargin.py @@ -0,0 +1,59 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CircleMargin(nn.Layer): + def __init__(self, embedding_size, class_num, margin, scale): + super(CircleMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + feat_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, feat_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + logits = paddle.matmul(input, weight) + if not self.training or label is None: + return logits + + alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.) + alpha_n = paddle.clip(logits.detach() + self.margin, min=0.) + delta_p = 1 - self.margin + delta_n = self.margin + + m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1]) + + logits_p = alpha_p * (logits - delta_p) + logits_n = alpha_n * (logits - delta_n) + pre_logits = logits_p * m_hot + logits_n * (1 - m_hot) + pre_logits = self.scale * pre_logits + + return pre_logits diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/cosmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/cosmargin.py new file mode 100644 index 000000000..578b64c2b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/cosmargin.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import math +import paddle.nn as nn + + +class CosMargin(paddle.nn.Layer): + def __init__(self, embedding_size, class_num, margin=0.35, scale=64.0): + super(CosMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + label.stop_gradient = True + + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + + cos_m = cos - self.margin + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, cos_m) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/fc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/fc.py new file mode 100644 index 000000000..b32474195 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/fc.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + + +class FC(nn.Layer): + def __init__(self, embedding_size, class_num): + super(FC, self).__init__() + self.embedding_size = embedding_size + self.class_num = class_num + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierNormal()) + self.fc = paddle.nn.Linear( + self.embedding_size, self.class_num, weight_attr=weight_attr) + + def forward(self, input, label=None): + out = self.fc(input) + return out diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/identity_head.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/identity_head.py new file mode 100644 index 000000000..7d11e5742 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/identity_head.py @@ -0,0 +1,9 @@ +from paddle import nn + + +class IdentityHead(nn.Layer): + def __init__(self): + super(IdentityHead, self).__init__() + + def forward(self, x, label=None): + return {"features": x, "logits": None} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/vehicle_neck.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/vehicle_neck.py new file mode 100644 index 000000000..05f4e333f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/gears/vehicle_neck.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn + + +class VehicleNeck(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format='NCHW'): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=weight_attr, + data_format=data_format) + self.flatten = nn.Flatten() + + def forward(self, x): + x = self.conv(x) + x = self.flatten(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/__init__.py new file mode 100644 index 000000000..3733059ce --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.arch.slim.prune import prune_model +from ppcls.arch.slim.quant import quantize_model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/prune.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/prune.py new file mode 100644 index 000000000..c0c9d220b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/prune.py @@ -0,0 +1,65 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + + +def prune_model(config, model): + if config.get("Slim", False) and config["Slim"].get("prune", False): + import paddleslim + prune_method_name = config["Slim"]["prune"]["name"].lower() + assert prune_method_name in [ + "fpgm", "l1_norm" + ], "The prune methods only support 'fpgm' and 'l1_norm'" + if prune_method_name == "fpgm": + model.pruner = paddleslim.dygraph.FPGMFilterPruner( + model, [1] + config["Global"]["image_shape"]) + else: + model.pruner = paddleslim.dygraph.L1NormFilterPruner( + model, [1] + config["Global"]["image_shape"]) + + # prune model + _prune_model(config, model) + else: + model.pruner = None + + + +def _prune_model(config, model): + from paddleslim.analysis import dygraph_flops as flops + logger.info("FLOPs before pruning: {}GFLOPs".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9)) + model.eval() + + params = [] + for sublayer in model.sublayers(): + for param in sublayer.parameters(include_sublayers=False): + if isinstance(sublayer, paddle.nn.Conv2D): + params.append(param.name) + ratios = {} + for param in params: + ratios[param] = config["Slim"]["prune"]["pruned_ratio"] + plan = model.pruner.prune_vars(ratios, [0]) + + logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9, + plan.pruned_flops)) + + for param in model.parameters(): + if "conv2d" in param.name: + logger.info("{}\t{}".format(param.name, param.shape)) + + model.train() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/quant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/quant.py new file mode 100644 index 000000000..b8f59a78f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/slim/quant.py @@ -0,0 +1,55 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ppcls.utils import logger + +QUANT_CONFIG = { + # weight preprocess type, default is None and no preprocessing is performed. + 'weight_preprocess_type': None, + # activation preprocess type, default is None and no preprocessing is performed. + 'activation_preprocess_type': None, + # weight quantize type, default is 'channel_wise_abs_max' + 'weight_quantize_type': 'channel_wise_abs_max', + # activation quantize type, default is 'moving_average_abs_max' + 'activation_quantize_type': 'moving_average_abs_max', + # weight quantize bit num, default is 8 + 'weight_bits': 8, + # activation quantize bit num, default is 8 + 'activation_bits': 8, + # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8' + 'dtype': 'int8', + # window size for 'range_abs_max' quantization. default is 10000 + 'window_size': 10000, + # The decay coefficient of moving average, default is 0.9 + 'moving_rate': 0.9, + # for dygraph quantization, layers of type in quantizable_layer_type will be quantized + 'quantizable_layer_type': ['Conv2D', 'Linear'], +} + + +def quantize_model(config, model): + if config.get("Slim", False) and config["Slim"].get("quant", False): + from paddleslim.dygraph.quant import QAT + assert config["Slim"]["quant"]["name"].lower( + ) == 'pact', 'Only PACT quantization method is supported now' + QUANT_CONFIG["activation_preprocess_type"] = "PACT" + model.quanter = QAT(config=QUANT_CONFIG) + model.quanter.quantize(model) + logger.info("QAT model summary:") + paddle.summary(model, (1, 3, 224, 224)) + else: + model.quanter = None + return diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/utils.py new file mode 100644 index 000000000..308475d7d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/arch/utils.py @@ -0,0 +1,53 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import types +from difflib import SequenceMatcher + +from . import backbone + + +def get_architectures(): + """ + get all of model architectures + """ + names = [] + for k, v in backbone.__dict__.items(): + if isinstance(v, (types.FunctionType, six.class_types)): + names.append(k) + return names + + +def get_blacklist_model_in_static_mode(): + from ppcls.arch.backbone import distilled_vision_transformer + from ppcls.arch.backbone import vision_transformer + blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__ + return blacklist + + +def similar_architectures(name='', names=[], thresh=0.1, topk=10): + """ + inferred similar architectures + """ + scores = [] + for idx, n in enumerate(names): + if n.startswith('__'): + continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: + scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml new file mode 100644 index 000000000..ab4c29c30 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml new file mode 100644 index 000000000..d75fede9e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml new file mode 100644 index 000000000..2fefb9f4b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - CutmixOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml new file mode 100644 index 000000000..4bf530664 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - Cutout: + n_holes: 1 + length: 112 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml new file mode 100644 index 000000000..c0016aa00 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - GridMask: + d1: 96 + d2: 224 + rotate: 1 + ratio: 0.5 + mode: 0 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml new file mode 100644 index 000000000..12e4ac8db --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - HideAndSeek: + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml new file mode 100644 index 000000000..3434cab5a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml new file mode 100644 index 000000000..153451e13 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - RandAugment: + num_layers: 2 + magnitude: 5 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml new file mode 100644 index 000000000..8e89c5ca1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50.yaml new file mode 100644 index 000000000..c2da23fb3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml new file mode 100644 index 000000000..2d6de3b21 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_amp_4x8.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 90 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: False + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.875 + multi_precision: True + lr: + name: Cosine + warmup_epoch: 8 + learning_rate: 8.192 + regularizer: + name: 'L2' + coeff: 2.5e-05 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_vd.yaml new file mode 100644 index 000000000..be7b2d9db --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/ImageNet/ResNet/ResNet50_vd.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/quick_start/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/quick_start/ResNet50_vd.yaml new file mode 100644 index 000000000..30d745599 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/configs/quick_start/ResNet50_vd.yaml @@ -0,0 +1,107 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 5 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./data/datasets/flowers102/ + cls_label_path: ./data/datasets/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./data/datasets/flowers102/ + cls_label_path: ./data/datasets/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/engine.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/engine.py new file mode 100644 index 000000000..019cf1650 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/engine.py @@ -0,0 +1,468 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import platform +import paddle +import paddle.distributed as dist +from visualdl import LogWriter +from paddle import nn +import numpy as np +import random + +from ppcls.utils.check import check_gpu +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.utils.config import print_config +from ppcls.data import build_dataloader +from ppcls.arch import build_model, RecModel, DistillationModel, TheseusLayer +from ppcls.arch import apply_to_static +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url +from ppcls.utils.save_load import init_model +from ppcls.utils import save_load + +from ppcls.data.utils.get_image_list import get_image_list +from ppcls.data.postprocess import build_postprocess +from ppcls.data import create_operators +from ppcls.engine.train import train_epoch +from ppcls.engine import evaluation +from ppcls.arch.gears.identity_head import IdentityHead + + +class Engine(object): + def __init__(self, config, mode="train"): + assert mode in ["train", "eval", "infer", "export"] + self.mode = mode + self.config = config + self.eval_mode = self.config["Global"].get("eval_mode", + "classification") + if "Head" in self.config["Arch"] or self.config["Arch"].get("is_rec", + False): + self.is_rec = True + else: + self.is_rec = False + + # set seed + seed = self.config["Global"].get("seed", False) + if seed or seed == 0: + assert isinstance(seed, int), "The 'seed' must be a integer!" + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + # init logger + self.output_dir = self.config['Global']['output_dir'] + log_file = os.path.join(self.output_dir, self.config["Arch"]["name"], + f"{mode}.log") + init_logger(log_file=log_file) + print_config(config) + + # init train_func and eval_func + assert self.eval_mode in ["classification", "retrieval"], logger.error( + "Invalid eval mode: {}".format(self.eval_mode)) + self.train_epoch_func = train_epoch + self.eval_func = getattr(evaluation, self.eval_mode + "_eval") + + self.use_dali = self.config['Global'].get("use_dali", False) + + # for visualdl + self.vdl_writer = None + if self.config['Global'][ + 'use_visualdl'] and mode == "train" and dist.get_rank() == 0: + vdl_writer_path = os.path.join(self.output_dir, "vdl") + if not os.path.exists(vdl_writer_path): + os.makedirs(vdl_writer_path) + self.vdl_writer = LogWriter(logdir=vdl_writer_path) + + # set device + assert self.config["Global"]["device"] in ["cpu", "gpu", "xpu", "npu", "mlu"] + self.device = paddle.set_device(self.config["Global"]["device"]) + logger.info('train with paddle {} and device {}'.format( + paddle.__version__, self.device)) + + # AMP training + self.amp = True if "AMP" in self.config and self.mode == "train" else False + if self.amp and self.config["AMP"] is not None: + self.scale_loss = self.config["AMP"].get("scale_loss", 1.0) + self.use_dynamic_loss_scaling = self.config["AMP"].get( + "use_dynamic_loss_scaling", False) + else: + self.scale_loss = 1.0 + self.use_dynamic_loss_scaling = False + if self.amp: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_max_inplace_grad_add': 8, + } + if paddle.is_compiled_with_cuda(): + AMP_RELATED_FLAGS_SETTING.update({ + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1 + }) + paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + + if "class_num" in config["Global"]: + global_class_num = config["Global"]["class_num"] + if "class_num" not in config["Arch"]: + config["Arch"]["class_num"] = global_class_num + msg = f"The Global.class_num will be deprecated. Please use Arch.class_num instead. Arch.class_num has been set to {global_class_num}." + else: + msg = "The Global.class_num will be deprecated. Please use Arch.class_num instead. The Global.class_num has been ignored." + logger.warning(msg) + #TODO(gaotingquan): support rec + class_num = config["Arch"].get("class_num", None) + self.config["DataLoader"].update({"class_num": class_num}) + # build dataloader + if self.mode == 'train': + self.train_dataloader = build_dataloader( + self.config["DataLoader"], "Train", self.device, self.use_dali) + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + if self.eval_mode == "classification": + self.eval_dataloader = build_dataloader( + self.config["DataLoader"], "Eval", self.device, + self.use_dali) + elif self.eval_mode == "retrieval": + self.gallery_query_dataloader = None + if len(self.config["DataLoader"]["Eval"].keys()) == 1: + key = list(self.config["DataLoader"]["Eval"].keys())[0] + self.gallery_query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], key, self.device, + self.use_dali) + else: + self.gallery_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Gallery", + self.device, self.use_dali) + self.query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Query", + self.device, self.use_dali) + + # build loss + if self.mode == "train": + loss_info = self.config["Loss"]["Train"] + self.train_loss_func = build_loss(loss_info) + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + loss_config = self.config.get("Loss", None) + if loss_config is not None: + loss_config = loss_config.get("Eval") + if loss_config is not None: + self.eval_loss_func = build_loss(loss_config) + else: + self.eval_loss_func = None + else: + self.eval_loss_func = None + + # build metric + if self.mode == 'train': + metric_config = self.config.get("Metric") + if metric_config is not None: + metric_config = metric_config.get("Train") + if metric_config is not None: + if hasattr( + self.train_dataloader, "collate_fn" + ) and self.train_dataloader.collate_fn is not None: + for m_idx, m in enumerate(metric_config): + if "TopkAcc" in m: + msg = f"'TopkAcc' metric can not be used when setting 'batch_transform_ops' in config. The 'TopkAcc' metric has been removed." + logger.warning(msg) + break + metric_config.pop(m_idx) + self.train_metric_func = build_metrics(metric_config) + else: + self.train_metric_func = None + else: + self.train_metric_func = None + + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + metric_config = self.config.get("Metric") + if self.eval_mode == "classification": + if metric_config is not None: + metric_config = metric_config.get("Eval") + if metric_config is not None: + self.eval_metric_func = build_metrics(metric_config) + elif self.eval_mode == "retrieval": + if metric_config is None: + metric_config = [{"name": "Recallk", "topk": (1, 5)}] + else: + metric_config = metric_config["Eval"] + self.eval_metric_func = build_metrics(metric_config) + else: + self.eval_metric_func = None + + # build model + self.model = build_model(self.config) + # set @to_static for benchmark, skip this by default. + apply_to_static(self.config, self.model) + + # load_pretrain + if self.config["Global"]["pretrained_model"] is not None: + if self.config["Global"]["pretrained_model"].startswith("http"): + load_dygraph_pretrain_from_url( + self.model, self.config["Global"]["pretrained_model"]) + else: + load_dygraph_pretrain( + self.model, self.config["Global"]["pretrained_model"]) + + # build optimizer + if self.mode == 'train': + self.optimizer, self.lr_sch = build_optimizer( + self.config["Optimizer"], self.config["Global"]["epochs"], + len(self.train_dataloader), [self.model]) + + # for amp training + if self.amp: + self.scaler = paddle.amp.GradScaler( + init_loss_scaling=self.scale_loss, + use_dynamic_loss_scaling=self.use_dynamic_loss_scaling) + amp_level = self.config['AMP'].get("level", "O1") + if amp_level not in ["O1", "O2"]: + msg = "[Parameter Error]: The optimize level of AMP only support 'O1' and 'O2'. The level has been set 'O1'." + logger.warning(msg) + self.config['AMP']["level"] = "O1" + amp_level = "O1" + self.model, self.optimizer = paddle.amp.decorate( + models=self.model, + optimizers=self.optimizer, + level=amp_level, + save_dtype='float32') + + # for distributed + world_size = dist.get_world_size() + self.config["Global"]["distributed"] = world_size != 1 + if world_size != 4 and self.mode == "train": + msg = f"The training strategy in config files provided by PaddleClas is based on 4 gpus. But the number of gpus is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use config files in PaddleClas to train." + logger.warning(msg) + if self.config["Global"]["distributed"]: + dist.init_parallel_env() + self.model = paddle.DataParallel(self.model) + + # build postprocess for infer + if self.mode == 'infer': + self.preprocess_func = create_operators(self.config["Infer"][ + "transforms"]) + self.postprocess_func = build_postprocess(self.config["Infer"][ + "PostProcess"]) + + def train(self): + assert self.mode == "train" + print_batch_step = self.config['Global']['print_batch_step'] + save_interval = self.config["Global"]["save_interval"] + best_metric = { + "metric": 0.0, + "epoch": 0, + } + # key: + # val: metrics list word + self.output_info = dict() + self.time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + # global iter counter + self.global_step = 0 + + if self.config["Global"]["checkpoints"] is not None: + metric_info = init_model(self.config["Global"], self.model, + self.optimizer) + if metric_info is not None: + best_metric.update(metric_info) + + self.max_iter = len(self.train_dataloader) - 1 if platform.system( + ) == "Windows" else len(self.train_dataloader) + for epoch_id in range(best_metric["epoch"] + 1, + self.config["Global"]["epochs"] + 1): + acc = 0.0 + # for one epoch train + self.train_epoch_func(self, epoch_id, print_batch_step) + + if self.use_dali: + self.train_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, self.output_info[key].avg) + for key in self.output_info + ]) + logger.info("[Train][Epoch {}/{}][Avg]{}".format( + epoch_id, self.config["Global"]["epochs"], metric_msg)) + self.output_info.clear() + + # eval model and save model if possible + if self.config["Global"][ + "eval_during_train"] and epoch_id % self.config["Global"][ + "eval_interval"] == 0: + acc = self.eval(epoch_id) + if acc > best_metric["metric"]: + best_metric["metric"] = acc + best_metric["epoch"] = epoch_id + save_load.save_model( + self.model, + self.optimizer, + best_metric, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="best_model") + logger.info("[Eval][Epoch {}][best metric: {}]".format( + epoch_id, best_metric["metric"])) + logger.scaler( + name="eval_acc", + value=acc, + step=epoch_id, + writer=self.vdl_writer) + + self.model.train() + + # save model + if epoch_id % save_interval == 0: + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="epoch_{}".format(epoch_id)) + # save the latest model + save_load.save_model( + self.model, + self.optimizer, {"metric": acc, + "epoch": epoch_id}, + self.output_dir, + model_name=self.config["Arch"]["name"], + prefix="latest") + + if self.vdl_writer is not None: + self.vdl_writer.close() + + @paddle.no_grad() + def eval(self, epoch_id=0): + assert self.mode in ["train", "eval"] + self.model.eval() + eval_result = self.eval_func(self, epoch_id) + self.model.train() + return eval_result + + @paddle.no_grad() + def infer(self): + assert self.mode == "infer" and self.eval_mode == "classification" + total_trainer = dist.get_world_size() + local_rank = dist.get_rank() + image_list = get_image_list(self.config["Infer"]["infer_imgs"]) + # data split + image_list = image_list[local_rank::total_trainer] + + batch_size = self.config["Infer"]["batch_size"] + self.model.eval() + batch_data = [] + image_file_list = [] + for idx, image_file in enumerate(image_list): + with open(image_file, 'rb') as f: + x = f.read() + for process in self.preprocess_func: + x = process(x) + batch_data.append(x) + image_file_list.append(image_file) + if len(batch_data) >= batch_size or idx == len(image_list) - 1: + batch_tensor = paddle.to_tensor(batch_data) + out = self.model(batch_tensor) + if isinstance(out, list): + out = out[0] + if isinstance(out, dict) and "logits" in out: + out = out["logits"] + if isinstance(out, dict) and "output" in out: + out = out["output"] + result = self.postprocess_func(out, image_file_list) + print(result) + batch_data.clear() + image_file_list.clear() + + def export(self): + assert self.mode == "export" + use_multilabel = self.config["Global"].get("use_multilabel", False) + model = ExportModel(self.config["Arch"], self.model, use_multilabel) + if self.config["Global"]["pretrained_model"] is not None: + load_dygraph_pretrain(model.base_model, + self.config["Global"]["pretrained_model"]) + + model.eval() + save_path = os.path.join(self.config["Global"]["save_inference_dir"], + "inference") + if model.quanter: + model.quanter.save_quantized_model( + model.base_model, + save_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + else: + model = paddle.jit.to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + paddle.jit.save(model, save_path) + + +class ExportModel(TheseusLayer): + """ + ExportModel: add softmax onto the model + """ + + def __init__(self, config, model, use_multilabel): + super().__init__() + self.base_model = model + # we should choose a final model to export + if isinstance(self.base_model, DistillationModel): + self.infer_model_name = config["infer_model_name"] + else: + self.infer_model_name = None + + self.infer_output_key = config.get("infer_output_key", None) + if self.infer_output_key == "features" and isinstance(self.base_model, + RecModel): + self.base_model.head = IdentityHead() + if use_multilabel: + self.out_act = nn.Sigmoid() + else: + if config.get("infer_add_softmax", True): + self.out_act = nn.Softmax(axis=-1) + else: + self.out_act = None + + def eval(self): + self.training = False + for layer in self.sublayers(): + layer.training = False + layer.eval() + + def forward(self, x): + x = self.base_model(x) + if isinstance(x, list): + x = x[0] + if self.infer_model_name is not None: + x = x[self.infer_model_name] + if self.infer_output_key is not None: + x = x[self.infer_output_key] + if self.out_act is not None: + x = self.out_act(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/__init__.py new file mode 100644 index 000000000..e0cd77888 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.engine.evaluation.classification import classification_eval +from ppcls.engine.evaluation.retrieval import retrieval_eval diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/classification.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/classification.py new file mode 100644 index 000000000..79fb1d692 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/classification.py @@ -0,0 +1,174 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import platform +import paddle + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def classification_eval(engine, epoch_id=0): + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + metric_key = None + tic = time.time() + accum_samples = 0 + total_samples = len( + engine.eval_dataloader. + dataset) if not engine.use_dali else engine.eval_dataloader.size + max_iter = len(engine.eval_dataloader) - 1 if platform.system( + ) == "Windows" else len(engine.eval_dataloader) + for iter_id, batch in enumerate(engine.eval_dataloader): + if iter_id >= max_iter: + break + if iter_id == 5: + for key in time_info: + time_info[key].reset() + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + time_info["reader_cost"].update(time.time() - tic) + batch_size = batch[0].shape[0] + batch[0] = paddle.to_tensor(batch[0]).astype("float32") + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + + # image input + if engine.amp: + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + out = engine.model(batch[0]) + else: + out = engine.model(batch[0]) + + # just for DistributedBatchSampler issue: repeat sampling + current_samples = batch_size * paddle.distributed.get_world_size() + accum_samples += current_samples + + # gather Tensor when distributed + if paddle.distributed.get_world_size() > 1: + label_list = [] + paddle.distributed.all_gather(label_list, batch[1]) + labels = paddle.concat(label_list, 0) + + if isinstance(out, dict): + if "Student" in out: + out = out["Student"] + if isinstance(out, dict): + out = out["logits"] + elif "logits" in out: + out = out["logits"] + else: + msg = "Error: Wrong key in out!" + raise Exception(msg) + if isinstance(out, list): + preds = [] + for x in out: + pred_list = [] + paddle.distributed.all_gather(pred_list, x) + pred_x = paddle.concat(pred_list, 0) + preds.append(pred_x) + else: + pred_list = [] + paddle.distributed.all_gather(pred_list, out) + preds = paddle.concat(pred_list, 0) + + if accum_samples > total_samples and not engine.use_dali: + preds = preds[:total_samples + current_samples - accum_samples] + labels = labels[:total_samples + current_samples - + accum_samples] + current_samples = total_samples + current_samples - accum_samples + else: + labels = batch[1] + preds = out + + # calc loss + if engine.eval_loss_func is not None: + if engine.amp and engine.config["AMP"].get("use_fp16_test", False): + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + loss_dict = engine.eval_loss_func(preds, labels) + else: + loss_dict = engine.eval_loss_func(preds, labels) + + for key in loss_dict: + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + output_info[key].update(loss_dict[key].numpy()[0], + current_samples) + # calc metric + if engine.eval_metric_func is not None: + metric_dict = engine.eval_metric_func(preds, labels) + for key in metric_dict: + if metric_key is None: + metric_key = key + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + + output_info[key].update(metric_dict[key].numpy()[0], + current_samples) + + time_info["batch_cost"].update(time.time() - tic) + + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + if engine.use_dali: + engine.eval_dataloader.reset() + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return output_info[metric_key].avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/retrieval.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/retrieval.py new file mode 100644 index 000000000..b481efae1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/evaluation/retrieval.py @@ -0,0 +1,179 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import platform +import paddle +from ppcls.utils import logger + + +def retrieval_eval(engine, epoch_id=0): + engine.model.eval() + # step1. build gallery + if engine.gallery_query_dataloader is not None: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery_query') + query_feas, query_img_id, query_query_id = gallery_feas, gallery_img_id, gallery_unique_id + else: + gallery_feas, gallery_img_id, gallery_unique_id = cal_feature( + engine, name='gallery') + query_feas, query_img_id, query_query_id = cal_feature( + engine, name='query') + + # step2. do evaluation + sim_block_size = engine.config["Global"].get("sim_block_size", 64) + sections = [sim_block_size] * (len(query_feas) // sim_block_size) + if len(query_feas) % sim_block_size: + sections.append(len(query_feas) % sim_block_size) + fea_blocks = paddle.split(query_feas, num_or_sections=sections) + if query_query_id is not None: + query_id_blocks = paddle.split( + query_query_id, num_or_sections=sections) + image_id_blocks = paddle.split(query_img_id, num_or_sections=sections) + metric_key = None + + if engine.eval_loss_func is None: + metric_dict = {metric_key: 0.} + else: + metric_dict = dict() + for block_idx, block_fea in enumerate(fea_blocks): + similarity_matrix = paddle.matmul( + block_fea, gallery_feas, transpose_y=True) + if query_query_id is not None: + query_id_block = query_id_blocks[block_idx] + query_id_mask = (query_id_block != gallery_unique_id.t()) + + image_id_block = image_id_blocks[block_idx] + image_id_mask = (image_id_block != gallery_img_id.t()) + + keep_mask = paddle.logical_or(query_id_mask, image_id_mask) + similarity_matrix = similarity_matrix * keep_mask.astype( + "float32") + else: + keep_mask = None + + metric_tmp = engine.eval_metric_func(similarity_matrix, + image_id_blocks[block_idx], + gallery_img_id, keep_mask) + + for key in metric_tmp: + if key not in metric_dict: + metric_dict[key] = metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + else: + metric_dict[key] += metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + + metric_info_list = [] + for key in metric_dict: + if metric_key is None: + metric_key = key + metric_info_list.append("{}: {:.5f}".format(key, metric_dict[key])) + metric_msg = ", ".join(metric_info_list) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + return metric_dict[metric_key] + + +def cal_feature(engine, name='gallery'): + has_unique_id = False + all_unique_id = None + + if name == 'gallery': + dataloader = engine.gallery_dataloader + elif name == 'query': + dataloader = engine.query_dataloader + elif name == 'gallery_query': + dataloader = engine.gallery_query_dataloader + else: + raise RuntimeError("Only support gallery or query dataset") + + batch_feas_list = [] + img_id_list = [] + unique_id_list = [] + max_iter = len(dataloader) - 1 if platform.system() == "Windows" else len( + dataloader) + for idx, batch in enumerate(dataloader): # load is very time-consuming + if idx >= max_iter: + break + if idx % engine.config["Global"]["print_batch_step"] == 0: + logger.info( + f"{name} feature calculation process: [{idx}/{len(dataloader)}]" + ) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch = [paddle.to_tensor(x) for x in batch] + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + if len(batch) == 3: + has_unique_id = True + batch[2] = batch[2].reshape([-1, 1]).astype("int64") + out = engine.model(batch[0], batch[1]) + if "Student" in out: + out = out["Student"] + batch_feas = out["features"] + + # do norm + if engine.config["Global"].get("feature_normalize", True): + feas_norm = paddle.sqrt( + paddle.sum(paddle.square(batch_feas), axis=1, keepdim=True)) + batch_feas = paddle.divide(batch_feas, feas_norm) + + # do binarize + if engine.config["Global"].get("feature_binarize") == "round": + batch_feas = paddle.round(batch_feas).astype("float32") * 2.0 - 1.0 + + if engine.config["Global"].get("feature_binarize") == "sign": + batch_feas = paddle.sign(batch_feas).astype("float32") + + if paddle.distributed.get_world_size() > 1: + batch_feas_gather = [] + img_id_gather = [] + unique_id_gather = [] + paddle.distributed.all_gather(batch_feas_gather, batch_feas) + paddle.distributed.all_gather(img_id_gather, batch[1]) + batch_feas_list.append(paddle.concat(batch_feas_gather)) + img_id_list.append(paddle.concat(img_id_gather)) + if has_unique_id: + paddle.distributed.all_gather(unique_id_gather, batch[2]) + unique_id_list.append(paddle.concat(unique_id_gather)) + else: + batch_feas_list.append(batch_feas) + img_id_list.append(batch[1]) + if has_unique_id: + unique_id_list.append(batch[2]) + + if engine.use_dali: + dataloader.reset() + + all_feas = paddle.concat(batch_feas_list) + all_img_id = paddle.concat(img_id_list) + if has_unique_id: + all_unique_id = paddle.concat(unique_id_list) + + # just for DistributedBatchSampler issue: repeat sampling + total_samples = len( + dataloader.dataset) if not engine.use_dali else dataloader.size + all_feas = all_feas[:total_samples] + all_img_id = all_img_id[:total_samples] + if has_unique_id: + all_unique_id = all_unique_id[:total_samples] + + logger.info("Build {} done, all feat shape: {}, begin to eval..".format( + name, all_feas.shape)) + return all_feas, all_img_id, all_unique_id diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/__init__.py new file mode 100644 index 000000000..800d3a41e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ppcls.engine.train.train import train_epoch diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/train.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/train.py new file mode 100644 index 000000000..b15c1088a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/train.py @@ -0,0 +1,82 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import time +import paddle +from ppcls.engine.train.utils import update_loss, update_metric, log_info +from ppcls.utils import profiler + + +def train_epoch(engine, epoch_id, print_batch_step): + tic = time.time() + for iter_id, batch in enumerate(engine.train_dataloader): + if iter_id >= engine.max_iter: + break + profiler.add_profiler_step(engine.config["profiler_options"]) + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + engine.time_info["reader_cost"].update(time.time() - tic) + if engine.use_dali: + batch = [ + paddle.to_tensor(batch[0]['data']), + paddle.to_tensor(batch[0]['label']) + ] + batch_size = batch[0].shape[0] + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([batch_size, -1]) + engine.global_step += 1 + + # image input + if engine.amp: + amp_level = engine.config['AMP'].get("level", "O1").upper() + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=amp_level): + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + else: + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + + # step opt and lr + if engine.amp: + scaled = engine.scaler.scale(loss_dict["loss"]) + scaled.backward() + engine.scaler.minimize(engine.optimizer, scaled) + else: + loss_dict["loss"].backward() + engine.optimizer.step() + engine.optimizer.clear_grad() + engine.lr_sch.step() + + # below code just for logging + # update metric_for_logger + update_metric(engine, out, batch, batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + tic = time.time() + + +def forward(engine, batch): + if not engine.is_rec: + return engine.model(batch[0]) + else: + return engine.model(batch[0], batch[1]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/utils.py new file mode 100644 index 000000000..92eb35d75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/engine/train/utils.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import datetime +from ppcls.utils import logger +from ppcls.utils.misc import AverageMeter + + +def update_metric(trainer, out, batch, batch_size): + # calc metric + if trainer.train_metric_func is not None: + metric_dict = trainer.train_metric_func(out, batch[-1]) + for key in metric_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(metric_dict[key].numpy()[0], + batch_size) + + +def update_loss(trainer, loss_dict, batch_size): + # update_output_info + for key in loss_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + + +def log_info(trainer, batch_size, epoch_id, iter_id): + lr_msg = "lr: {:.5f}".format(trainer.lr_sch.get_lr()) + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, trainer.output_info[key].avg) + for key in trainer.output_info + ]) + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, trainer.time_info[key].avg) + for key in trainer.time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / trainer.time_info["batch_cost"].avg) + eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 + ) * len(trainer.train_dataloader) - iter_id + ) * trainer.time_info["batch_cost"].avg + eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) + logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( + epoch_id, trainer.config["Global"]["epochs"], iter_id, + len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, + eta_msg)) + + logger.scaler( + name="lr", + value=trainer.lr_sch.get_lr(), + step=trainer.global_step, + writer=trainer.vdl_writer) + for key in trainer.output_info: + logger.scaler( + name="train_{}".format(key), + value=trainer.output_info[key].avg, + step=trainer.global_step, + writer=trainer.vdl_writer) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/__init__.py new file mode 100644 index 000000000..7c50ff76f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/__init__.py @@ -0,0 +1,47 @@ +import copy + +import paddle +import paddle.nn as nn +from ppcls.utils import logger + +from .celoss import CELoss, MixCELoss +from .distanceloss import DistanceLoss + +class CombinedLoss(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.loss_func = [] + self.loss_weight = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + name = list(config)[0] + param = config[name] + assert "weight" in param, "weight must be in param, but param just contains {}".format( + param.keys()) + self.loss_weight.append(param.pop("weight")) + self.loss_func.append(eval(name)(**param)) + + def __call__(self, input, batch): + loss_dict = {} + # just for accelerate classification traing speed + if len(self.loss_func) == 1: + loss = self.loss_func[0](input, batch) + loss_dict.update(loss) + loss_dict["loss"] = list(loss.values())[0] + else: + for idx, loss_func in enumerate(self.loss_func): + loss = loss_func(input, batch) + weight = self.loss_weight[idx] + loss = {key: loss[key] * weight for key in loss} + loss_dict.update(loss) + loss_dict["loss"] = paddle.add_n(list(loss_dict.values())) + return loss_dict + + +def build_loss(config): + module_class = CombinedLoss(copy.deepcopy(config)) + logger.debug("build loss {} success.".format(module_class)) + return module_class diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/celoss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/celoss.py new file mode 100644 index 000000000..a78926170 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/celoss.py @@ -0,0 +1,67 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppcls.utils import logger + + +class CELoss(nn.Layer): + """ + Cross entropy loss + """ + + def __init__(self, epsilon=None): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + + def _labelsmoothing(self, target, class_num): + if len(target.shape) == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + if self.epsilon is not None: + class_num = x.shape[-1] + label = self._labelsmoothing(label, class_num) + x = -F.log_softmax(x, axis=-1) + loss = paddle.sum(x * label, axis=-1) + else: + if label.shape[-1] == x.shape[-1]: + label = F.softmax(label, axis=-1) + soft_label = True + else: + soft_label = False + loss = F.cross_entropy(x, label=label, soft_label=soft_label) + loss = loss.mean() + return {"CELoss": loss} + + +class MixCELoss(object): + def __init__(self, *args, **kwargs): + msg = "\"MixCELos\" is deprecated, please use \"CELoss\" instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/comfunc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/comfunc.py new file mode 100644 index 000000000..277bdd6b5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/comfunc.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def rerange_index(batch_size, samples_each_class): + tmp = np.arange(0, batch_size * batch_size) + tmp = tmp.reshape(-1, batch_size) + rerange_index = [] + + for i in range(batch_size): + step = i // samples_each_class + start = step * samples_each_class + end = (step + 1) * samples_each_class + + pos_idx = [] + neg_idx = [] + for j, k in enumerate(tmp[i]): + if j >= start and j < end: + if j == i: + pos_idx.insert(0, k) + else: + pos_idx.append(k) + else: + neg_idx.append(k) + rerange_index += (pos_idx + neg_idx) + + rerange_index = np.array(rerange_index).astype(np.int32) + return rerange_index diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/distanceloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/distanceloss.py new file mode 100644 index 000000000..0a09f0cb2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/loss/distanceloss.py @@ -0,0 +1,43 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn import L1Loss +from paddle.nn import MSELoss as L2Loss +from paddle.nn import SmoothL1Loss + + +class DistanceLoss(nn.Layer): + """ + DistanceLoss: + mode: loss mode + """ + + def __init__(self, mode="l2", **kargs): + super().__init__() + assert mode in ["l1", "l2", "smooth_l1"] + if mode == "l1": + self.loss_func = nn.L1Loss(**kargs) + elif mode == "l2": + self.loss_func = nn.MSELoss(**kargs) + elif mode == "smooth_l1": + self.loss_func = nn.SmoothL1Loss(**kargs) + self.mode = mode + + def forward(self, x, y): + loss = self.loss_func(x, y) + return {"loss_{}".format(self.mode): loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/__init__.py new file mode 100644 index 000000000..94721235b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/__init__.py @@ -0,0 +1,51 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +from paddle import nn +import copy +from collections import OrderedDict + +from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk +from .metrics import DistillationTopkAcc +from .metrics import GoogLeNetTopkAcc +from .metrics import HammingDistance, AccuracyScore + + +class CombinedMetrics(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.metric_func_list = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + metric_name = list(config)[0] + metric_params = config[metric_name] + if metric_params is not None: + self.metric_func_list.append( + eval(metric_name)(**metric_params)) + else: + self.metric_func_list.append(eval(metric_name)()) + + def __call__(self, *args, **kwargs): + metric_dict = OrderedDict() + for idx, metric_func in enumerate(self.metric_func_list): + metric_dict.update(metric_func(*args, **kwargs)) + return metric_dict + + +def build_metrics(config): + metrics_list = CombinedMetrics(copy.deepcopy(config)) + return metrics_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/metrics.py new file mode 100644 index 000000000..03e742082 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/metric/metrics.py @@ -0,0 +1,306 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.preprocessing import binarize + + +class TopkAcc(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + + metric_dict = dict() + for k in self.topk: + metric_dict["top{}".format(k)] = paddle.metric.accuracy( + x, label, k=k) + return metric_dict + + +class mAP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + div = paddle.arange(acc_sum.shape[1]).astype("float32") + 1 + precision = paddle.divide(acc_sum, div) + + #calc map + precision_mask = paddle.multiply(equal_flag, precision) + ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, + axis=1) + metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + return metric_dict + + +class mINP(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + #do accumulative sum + div = paddle.arange(equal_flag.shape[1]).astype("float32") + 2 + minus = paddle.divide(equal_flag, div) + auxilary = paddle.subtract(equal_flag, minus) + hard_index = paddle.argmax(auxilary, axis=1).astype("float32") + all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) + mINP = paddle.mean(all_INP) + metric_dict["mINP"] = mINP.numpy()[0] + return metric_dict + + +class Recallk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + real_query_num = paddle.sum(equal_flag, axis=1) + real_query_num = paddle.sum( + paddle.greater_than(real_query_num, paddle.to_tensor(0.)).astype( + "float32")) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + mask = paddle.greater_than(acc_sum, + paddle.to_tensor(0.)).astype("float32") + all_cmc = (paddle.sum(mask, axis=0) / real_query_num).numpy() + + for k in self.topk: + metric_dict["recall{}".format(k)] = all_cmc[k - 1] + return metric_dict + + +class Precisionk(nn.Layer): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=True) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + Ns = paddle.arange(gallery_img_id.shape[0]) + 1 + equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1) + Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy() + + for k in self.topk: + metric_dict["precision@{}".format(k)] = Precision_at_k[k - 1] + + return metric_dict + + +class DistillationTopkAcc(TopkAcc): + def __init__(self, model_key, feature_key=None, topk=(1, 5)): + super().__init__(topk=topk) + self.model_key = model_key + self.feature_key = feature_key + + def forward(self, x, label): + if isinstance(x, dict): + x = x[self.model_key] + if self.feature_key is not None: + x = x[self.feature_key] + return super().forward(x, label) + + +class GoogLeNetTopkAcc(TopkAcc): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + return super().forward(x[0], label) + + +class MutiLabelMetric(object): + def __init__(self): + pass + + def _multi_hot_encode(self, logits, threshold=0.5): + return binarize(logits, threshold=threshold) + + def __call__(self, output): + output = F.sigmoid(output) + preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5) + return preds + + +class HammingDistance(MutiLabelMetric): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + def __init__(self): + super().__init__() + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + metric_dict["HammingDistance"] = paddle.to_tensor( + hamming_loss(target, preds)) + return metric_dict + + +class AccuracyScore(MutiLabelMetric): + """ + Hard metric for multilabel classification + Args: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + def __init__(self, base="label"): + super().__init__() + assert base in ["sample", "label" + ], 'must be one of ["sample", "label"]' + self.base = base + + def __call__(self, output, target): + preds = super().__call__(output) + metric_dict = dict() + if self.base == "sample": + accuracy = accuracy_metric(target, preds) + elif self.base == "label": + mcm = multilabel_confusion_matrix(target, preds) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + accuracy = (sum(tps) + sum(tns)) / ( + sum(tps) + sum(tns) + sum(fns) + sum(fps)) + metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) + return metric_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/__init__.py new file mode 100644 index 000000000..61db39f89 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/__init__.py @@ -0,0 +1,72 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import paddle + +from ppcls.utils import logger + +from . import optimizer + +__all__ = ['build_optimizer'] + + +def build_lr_scheduler(lr_config, epochs, step_each_epoch): + from . import learning_rate + lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) + if 'name' in lr_config: + lr_name = lr_config.pop('name') + lr = getattr(learning_rate, lr_name)(**lr_config) + if isinstance(lr, paddle.optimizer.lr.LRScheduler): + return lr + else: + return lr() + else: + lr = lr_config['learning_rate'] + return lr + + +# model_list is None in static graph +def build_optimizer(config, epochs, step_each_epoch, model_list=None): + config = copy.deepcopy(config) + # step1 build lr + lr = build_lr_scheduler(config.pop('lr'), epochs, step_each_epoch) + logger.debug("build lr ({}) success..".format(lr)) + # step2 build regularization + if 'regularizer' in config and config['regularizer'] is not None: + if 'weight_decay' in config: + logger.warning( + "ConfigError: Only one of regularizer and weight_decay can be set in Optimizer Config. \"weight_decay\" has been ignored." + ) + reg_config = config.pop('regularizer') + reg_name = reg_config.pop('name') + 'Decay' + reg = getattr(paddle.regularizer, reg_name)(**reg_config) + config["weight_decay"] = reg + logger.debug("build regularizer ({}) success..".format(reg)) + # step3 build optimizer + optim_name = config.pop('name') + if 'clip_norm' in config: + clip_norm = config.pop('clip_norm') + grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_norm) + else: + grad_clip = None + optim = getattr(optimizer, optim_name)(learning_rate=lr, + grad_clip=grad_clip, + **config)(model_list=model_list) + logger.debug("build optimizer ({}) success..".format(optim)) + return optim, lr diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/learning_rate.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/learning_rate.py new file mode 100644 index 000000000..b59387dd9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/learning_rate.py @@ -0,0 +1,326 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from paddle.optimizer import lr +from paddle.optimizer.lr import LRScheduler + +from ppcls.utils import logger + + +class Linear(object): + """ + Linear learning rate decay + Args: + lr (float): The initial learning rate. It is a python float number. + epochs(int): The decay step size. It determines the decay cycle. + end_lr(float, optional): The minimum final learning rate. Default: 0.0001. + power(float, optional): Power of polynomial. Default: 1.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + epochs, + step_each_epoch, + end_lr=0.0, + power=1.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.steps = (epochs - warmup_epoch) * step_each_epoch + self.end_lr = end_lr + self.power = power + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PolynomialDecay( + learning_rate=self.learning_rate, + decay_steps=self.steps, + end_lr=self.end_lr, + power=self.power, + last_epoch=self. + last_epoch) if self.steps > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Cosine(object): + """ + Cosine learning rate decay + lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1) + Args: + lr(float): initial learning rate + step_each_epoch(int): steps each epoch + epochs(int): total training epochs + eta_min(float): Minimum learning rate. Default: 0.0. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_each_epoch, + epochs, + eta_min=0.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.learning_rate = learning_rate + self.T_max = (epochs - warmup_epoch) * step_each_epoch + self.eta_min = eta_min + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.CosineAnnealingDecay( + learning_rate=self.learning_rate, + T_max=self.T_max, + eta_min=self.eta_min, + last_epoch=self. + last_epoch) if self.T_max > 0 else self.learning_rate + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Step(object): + """ + Piecewise learning rate decay + Args: + step_each_epoch(int): steps each epoch + learning_rate (float): The initial learning rate. It is a python float number. + step_size (int): the interval to update. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + learning_rate, + step_size, + step_each_epoch, + epochs, + gamma, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.step_size = step_each_epoch * step_size + self.learning_rate = learning_rate + self.gamma = gamma + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.StepDecay( + learning_rate=self.learning_rate, + step_size=self.step_size, + gamma=self.gamma, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch) + return learning_rate + + +class Piecewise(object): + """ + Piecewise learning rate decay + Args: + boundaries(list): A list of steps numbers. The type of element in the list is python int. + values(list): A list of learning rate values that will be picked during different epoch boundaries. + The type of element in the list is python float. + warmup_epoch(int): The epoch numbers for LinearWarmup. Default: 0. + warmup_start_lr(float): Initial learning rate of warm up. Default: 0.0. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + """ + + def __init__(self, + step_each_epoch, + decay_epochs, + values, + epochs, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + **kwargs): + super().__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.boundaries = [step_each_epoch * e for e in decay_epochs] + self.values = values + self.last_epoch = last_epoch + self.warmup_steps = round(warmup_epoch * step_each_epoch) + self.warmup_start_lr = warmup_start_lr + + def __call__(self): + learning_rate = lr.PiecewiseDecay( + boundaries=self.boundaries, + values=self.values, + last_epoch=self.last_epoch) + if self.warmup_steps > 0: + learning_rate = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.values[0], + last_epoch=self.last_epoch) + return learning_rate + + +class MultiStepDecay(LRScheduler): + """ + Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones. + The algorithm can be described as the code below. + .. code-block:: text + learning_rate = 0.5 + milestones = [30, 50] + gamma = 0.1 + if epoch < 30: + learning_rate = 0.5 + elif epoch < 50: + learning_rate = 0.05 + else: + learning_rate = 0.005 + Args: + learning_rate (float): The initial learning rate. It is a python float number. + milestones (tuple|list): List or tuple of each boundaries. Must be increasing. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` . + It should be less than 1.0. Default: 0.1. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . + + Returns: + ``MultiStepDecay`` instance to schedule learning rate. + Examples: + + .. code-block:: python + import paddle + import numpy as np + # train on default dynamic graph mode + linear = paddle.nn.Linear(10, 10) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) + for epoch in range(20): + for batch_id in range(5): + x = paddle.uniform([10, 10]) + out = linear(x) + loss = paddle.mean(out) + loss.backward() + sgd.step() + sgd.clear_gradients() + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + # train on static graph mode + paddle.enable_static() + main_prog = paddle.static.Program() + start_prog = paddle.static.Program() + with paddle.static.program_guard(main_prog, start_prog): + x = paddle.static.data(name='x', shape=[None, 4, 5]) + y = paddle.static.data(name='y', shape=[None, 4, 5]) + z = paddle.static.nn.fc(x, 100) + loss = paddle.mean(z) + scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) + sgd = paddle.optimizer.SGD(learning_rate=scheduler) + sgd.minimize(loss) + exe = paddle.static.Executor() + exe.run(start_prog) + for epoch in range(20): + for batch_id in range(5): + out = exe.run( + main_prog, + feed={ + 'x': np.random.randn(3, 4, 5).astype('float32'), + 'y': np.random.randn(3, 4, 5).astype('float32') + }, + fetch_list=loss.name) + scheduler.step() # If you update learning rate each step + # scheduler.step() # If you update learning rate each epoch + """ + + def __init__(self, + learning_rate, + milestones, + epochs, + step_each_epoch, + gamma=0.1, + last_epoch=-1, + verbose=False): + if not isinstance(milestones, (tuple, list)): + raise TypeError( + "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." + % type(milestones)) + if not all([ + milestones[i] < milestones[i + 1] + for i in range(len(milestones) - 1) + ]): + raise ValueError('The elements of milestones must be incremented') + if gamma >= 1.0: + raise ValueError('gamma should be < 1.0.') + self.milestones = [x * step_each_epoch for x in milestones] + self.gamma = gamma + super().__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + for i in range(len(self.milestones)): + if self.last_epoch < self.milestones[i]: + return self.base_lr * (self.gamma**i) + return self.base_lr * (self.gamma**len(self.milestones)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/optimizer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/optimizer.py new file mode 100644 index 000000000..4422ea70d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/optimizer/optimizer.py @@ -0,0 +1,217 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from paddle import optimizer as optim +import paddle + +from ppcls.utils import logger + + +class Momentum(object): + """ + Simple Momentum optimizer with velocity state. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum, + weight_decay=None, + grad_clip=None, + multi_precision=True): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.multi_precision = multi_precision + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters) + if hasattr(opt, '_use_multi_tensor'): + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters, + use_multi_tensor=True) + return opt + + +class Adam(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + parameter_list=None, + weight_decay=None, + grad_clip=None, + name=None, + lazy_mode=False, + multi_precision=False): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.parameter_list = parameter_list + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.name = name + self.lazy_mode = lazy_mode + self.multi_precision = multi_precision + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.Adam( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name, + lazy_mode=self.lazy_mode, + multi_precision=self.multi_precision, + parameters=parameters) + return opt + + +class RMSProp(object): + """ + Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + rho (float) - rho value in equation. + epsilon (float) - avoid division by zero, default is 1e-6. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum=0.0, + rho=0.95, + epsilon=1e-6, + weight_decay=None, + grad_clip=None, + multi_precision=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.rho = rho + self.epsilon = epsilon + self.weight_decay = weight_decay + self.grad_clip = grad_clip + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.RMSProp( + learning_rate=self.learning_rate, + momentum=self.momentum, + rho=self.rho, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters) + return opt + + +class AdamW(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + weight_decay=None, + multi_precision=False, + grad_clip=None, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False, + **args): + super().__init__() + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.multi_precision = multi_precision + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + + # TODO(gaotingquan): model_list is None when in static graph, "no_weight_decay" not work. + if model_list is None: + if self.one_dim_param_no_weight_decay or len( + self.no_weight_decay_name_list) != 0: + msg = "\"AdamW\" does not support setting \"no_weight_decay\" in static graph. Please use dynamic graph." + logger.error(Exception(msg)) + raise Exception(msg) + + self.no_weight_decay_param_name_list = [ + p.name for model in model_list for n, p in model.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] if model_list else [] + + if self.one_dim_param_no_weight_decay: + self.no_weight_decay_param_name_list += [ + p.name for model in model_list + for n, p in model.named_parameters() if len(p.shape) == 1 + ] if model_list else [] + + opt = optim.AdamW( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + parameters=parameters, + weight_decay=self.weight_decay, + multi_precision=self.multi_precision, + grad_clip=self.grad_clip, + apply_decay_param_fun=self._apply_decay_param_fun) + return opt + + def _apply_decay_param_fun(self, name): + return name not in self.no_weight_decay_param_name_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/program.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/program.py new file mode 100644 index 000000000..b3534a2cf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/program.py @@ -0,0 +1,449 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import numpy as np + +from collections import OrderedDict + +import paddle +import paddle.nn.functional as F + +from paddle.distributed import fleet +from paddle.distributed.fleet import DistributedStrategy + +# from ppcls.optimizer import OptimizerBuilder +# from ppcls.optimizer.learning_rate import LearningRateBuilder + +from ppcls.arch import build_model +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.optimizer import build_lr_scheduler + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger, profiler + + +def create_feeds(image_shape, use_mix=False, class_num=None, dtype="float32"): + """ + Create feeds as model input + + Args: + image_shape(list[int]): model input shape, such as [3, 224, 224] + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + class_num(int): the class number of network, required if use_mix + + Returns: + feeds(dict): dict of model input variables + """ + feeds = OrderedDict() + feeds['data'] = paddle.static.data( + name="data", shape=[None] + image_shape, dtype=dtype) + + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + feeds['target'] = paddle.static.data( + name="target", shape=[None, class_num], dtype="float32") + else: + feeds['label'] = paddle.static.data( + name="label", shape=[None, 1], dtype="int64") + + return feeds + + +def create_fetchs(out, + feeds, + architecture, + topk=5, + epsilon=None, + class_num=None, + use_mix=False, + config=None, + mode="Train"): + """ + Create fetchs as model outputs(included loss and measures), + will call create_loss and create_metric(if use_mix). + Args: + out(variable): model output variable + feeds(dict): dict of model input variables. + If use mix_up, it will not include label. + architecture(dict): architecture information, + name(such as ResNet50) is needed + topk(int): usually top5 + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + class_num(int): the class number of network, required if use_mix + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + config(dict): model config + + Returns: + fetchs(dict): dict of model outputs(included loss and measures) + """ + fetchs = OrderedDict() + # build loss + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + target = paddle.reshape(feeds['target'], [-1, class_num]) + else: + target = paddle.reshape(feeds['label'], [-1, 1]) + + loss_func = build_loss(config["Loss"][mode]) + loss_dict = loss_func(out, target) + + loss_out = loss_dict["loss"] + fetchs['loss'] = (loss_out, AverageMeter('loss', '7.4f', need_avg=True)) + + # build metric + if not use_mix: + metric_func = build_metrics(config["Metric"][mode]) + + metric_dict = metric_func(out, target) + + for key in metric_dict: + if mode != "Train" and paddle.distributed.get_world_size() > 1: + paddle.distributed.all_reduce( + metric_dict[key], op=paddle.distributed.ReduceOp.SUM) + metric_dict[key] = metric_dict[ + key] / paddle.distributed.get_world_size() + + fetchs[key] = (metric_dict[key], AverageMeter( + key, '7.4f', need_avg=True)) + + return fetchs + + +def create_optimizer(config, step_each_epoch): + # create learning_rate instance + optimizer, lr_sch = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], step_each_epoch) + return optimizer, lr_sch + + +def create_strategy(config): + """ + Create build strategy and exec strategy. + + Args: + config(dict): config + + Returns: + build_strategy: build strategy + exec_strategy: exec strategy + """ + build_strategy = paddle.static.BuildStrategy() + exec_strategy = paddle.static.ExecutionStrategy() + + exec_strategy.num_threads = 1 + exec_strategy.num_iteration_per_drop_scope = ( + 10000 + if 'AMP' in config and config.AMP.get("level", "O1") == "O2" else 10) + + fuse_op = True if 'AMP' in config else False + + fuse_bn_act_ops = config.get('fuse_bn_act_ops', fuse_op) + fuse_elewise_add_act_ops = config.get('fuse_elewise_add_act_ops', fuse_op) + fuse_bn_add_act_ops = config.get('fuse_bn_add_act_ops', fuse_op) + enable_addto = config.get('enable_addto', fuse_op) + + build_strategy.fuse_bn_act_ops = fuse_bn_act_ops + build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops + build_strategy.fuse_bn_add_act_ops = fuse_bn_add_act_ops + build_strategy.enable_addto = enable_addto + + return build_strategy, exec_strategy + + +def dist_optimizer(config, optimizer): + """ + Create a distributed optimizer based on a normal optimizer + + Args: + config(dict): + optimizer(): a normal optimizer + + Returns: + optimizer: a distributed optimizer + """ + build_strategy, exec_strategy = create_strategy(config) + + dist_strategy = DistributedStrategy() + dist_strategy.execution_strategy = exec_strategy + dist_strategy.build_strategy = build_strategy + + dist_strategy.nccl_comm_num = 1 + dist_strategy.fuse_all_reduce_ops = True + dist_strategy.fuse_grad_size_in_MB = 16 + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + + return optimizer + + +def mixed_precision_optimizer(config, optimizer): + if 'AMP' in config: + amp_cfg = config.AMP if config.AMP else dict() + scale_loss = amp_cfg.get('scale_loss', 1.0) + use_dynamic_loss_scaling = amp_cfg.get('use_dynamic_loss_scaling', + False) + use_pure_fp16 = amp_cfg.get("level", "O1") == "O2" + optimizer = paddle.static.amp.decorate( + optimizer, + init_loss_scaling=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling, + use_pure_fp16=use_pure_fp16, + use_fp16_guard=True) + + return optimizer + + +def build(config, + main_prog, + startup_prog, + class_num=None, + step_each_epoch=100, + is_train=True, + is_distributed=True): + """ + Build a program using a model and an optimizer + 1. create feeds + 2. create a dataloader + 3. create a model + 4. create fetchs + 5. create an optimizer + + Args: + config(dict): config + main_prog(): main program + startup_prog(): startup program + class_num(int): the class number of network, required if use_mix + is_train(bool): train or eval + is_distributed(bool): whether to use distributed training method + + Returns: + dataloader(): a bridge between the model and the data + fetchs(dict): dict of model outputs(included loss and measures) + """ + with paddle.static.program_guard(main_prog, startup_prog): + with paddle.utils.unique_name.guard(): + mode = "Train" if is_train else "Eval" + use_mix = "batch_transform_ops" in config["DataLoader"][mode][ + "dataset"] + feeds = create_feeds( + config["Global"]["image_shape"], + use_mix, + class_num=class_num, + dtype="float32") + + # build model + # data_format should be assigned in arch-dict + input_image_channel = config["Global"]["image_shape"][ + 0] # default as [3, 224, 224] + model = build_model(config) + out = model(feeds["data"]) + # end of build model + + fetchs = create_fetchs( + out, + feeds, + config["Arch"], + epsilon=config.get('ls_epsilon'), + class_num=class_num, + use_mix=use_mix, + config=config, + mode=mode) + lr_scheduler = None + optimizer = None + if is_train: + optimizer, lr_scheduler = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], + step_each_epoch) + optimizer = mixed_precision_optimizer(config, optimizer) + if is_distributed: + optimizer = dist_optimizer(config, optimizer) + optimizer.minimize(fetchs['loss'][0]) + return fetchs, lr_scheduler, feeds, optimizer + + +def compile(config, program, loss_name=None, share_prog=None): + """ + Compile the program + + Args: + config(dict): config + program(): the program which is wrapped by + loss_name(str): loss name + share_prog(): the shared program, used for evaluation during training + + Returns: + compiled_program(): a compiled program + """ + build_strategy, exec_strategy = create_strategy(config) + + compiled_program = paddle.static.CompiledProgram( + program).with_data_parallel( + share_vars_from=share_prog, + loss_name=loss_name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + return compiled_program + + +total_step = 0 + + +def run(dataloader, + exe, + program, + feeds, + fetchs, + epoch=0, + mode='train', + config=None, + vdl_writer=None, + lr_scheduler=None, + profiler_options=None): + """ + Feed data to the model and fetch the measures and loss + + Args: + dataloader(paddle io dataloader): + exe(): + program(): + fetchs(dict): dict of measures and the loss + epoch(int): epoch of training or evaluation + model(str): log only + + Returns: + """ + fetch_list = [f[0] for f in fetchs.values()] + metric_dict = OrderedDict([("lr", AverageMeter( + 'lr', 'f', postfix=",", need_avg=False))]) + + for k in fetchs: + metric_dict[k] = fetchs[k][1] + + metric_dict["batch_time"] = AverageMeter( + 'batch_cost', '.5f', postfix=" s,") + metric_dict["reader_time"] = AverageMeter( + 'reader_cost', '.5f', postfix=" s,") + + for m in metric_dict.values(): + m.reset() + + use_dali = config["Global"].get('use_dali', False) + tic = time.time() + + if not use_dali: + dataloader = dataloader() + + idx = 0 + batch_size = None + while True: + # The DALI maybe raise RuntimeError for some particular images, such as ImageNet1k/n04418357_26036.JPEG + try: + batch = next(dataloader) + except StopIteration: + break + except RuntimeError: + logger.warning( + "Except RuntimeError when reading data from dataloader, try to read once again..." + ) + continue + idx += 1 + # ignore the warmup iters + if idx == 5: + metric_dict["batch_time"].reset() + metric_dict["reader_time"].reset() + + metric_dict['reader_time'].update(time.time() - tic) + + profiler.add_profiler_step(profiler_options) + + if use_dali: + batch_size = batch[0]["data"].shape()[0] + feed_dict = batch[0] + else: + batch_size = batch[0].shape()[0] + feed_dict = { + key.name: batch[idx] + for idx, key in enumerate(feeds.values()) + } + + metrics = exe.run(program=program, + feed=feed_dict, + fetch_list=fetch_list) + + for name, m in zip(fetchs.keys(), metrics): + metric_dict[name].update(np.mean(m), batch_size) + metric_dict["batch_time"].update(time.time() - tic) + if mode == "train": + metric_dict['lr'].update(lr_scheduler.get_lr()) + + fetchs_str = ' '.join([ + str(metric_dict[key].mean) + if "time" in key else str(metric_dict[key].value) + for key in metric_dict + ]) + ips_info = " ips: {:.5f} images/sec.".format( + batch_size / metric_dict["batch_time"].avg) + fetchs_str += ips_info + + if lr_scheduler is not None: + lr_scheduler.step() + + if vdl_writer: + global total_step + logger.scaler('loss', metrics[0][0], total_step, vdl_writer) + total_step += 1 + if mode == 'eval': + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} step:{:<4d} {:s}".format(mode, idx, + fetchs_str)) + else: + epoch_str = "epoch:{:<3d}".format(epoch) + step_str = "{:s} step:{:<4d}".format(mode, idx) + + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} {:s} {:s}".format(epoch_str, step_str, + fetchs_str)) + + tic = time.time() + + end_str = ' '.join([str(m.mean) for m in metric_dict.values()] + + [metric_dict["batch_time"].total]) + ips_info = "ips: {:.5f} images/sec.".format(batch_size / + metric_dict["batch_time"].avg) + if mode == 'eval': + logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) + else: + end_epoch_str = "END epoch:{:<3d}".format(epoch) + logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str, + ips_info)) + if use_dali: + dataloader.reset() + + # return top1_acc in order to save the best model + if mode == 'eval': + return fetchs["top1"][1].avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/save_load.py new file mode 100644 index 000000000..13badfddc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/save_load.py @@ -0,0 +1,139 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle + +from ppcls.utils import logger + +__all__ = ['init_model', 'save_model'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def _load_state(path): + if os.path.exists(path + '.pdopt'): + # XXX another hack to ignore the optimizer state + tmp = tempfile.mkdtemp() + dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) + shutil.copy(path + '.pdparams', dst + '.pdparams') + state = paddle.static.load_program_state(dst) + shutil.rmtree(tmp) + else: + state = paddle.static.load_program_state(path) + return state + + +def load_params(exe, prog, path, ignore_params=None): + """ + Load model from the given path. + Args: + exe (fluid.Executor): The fluid.Executor object. + prog (fluid.Program): load weight to which Program object. + path (string): URL string or loca model path. + ignore_params (list): ignore variable to load when finetuning. + It can be specified by finetune_exclude_pretrained_params + and the usage can refer to the document + docs/advanced_tutorials/TRANSFER_LEARNING.md + """ + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + + logger.info("Loading parameters from {}...".format(path)) + + ignore_set = set() + state = _load_state(path) + + # ignore the parameter which mismatch the shape + # between the model and pretrain weight. + all_var_shape = {} + for block in prog.blocks: + for param in block.all_parameters(): + all_var_shape[param.name] = param.shape + ignore_set.update([ + name for name, shape in all_var_shape.items() + if name in state and shape != state[name].shape + ]) + + if ignore_params: + all_var_names = [var.name for var in prog.list_vars()] + ignore_list = filter( + lambda var: any([re.match(name, var) for name in ignore_params]), + all_var_names) + ignore_set.update(list(ignore_list)) + + if len(ignore_set) > 0: + for k in ignore_set: + if k in state: + logger.warning( + 'variable {} is already excluded automatically'.format(k)) + del state[k] + + paddle.static.set_program_state(prog, state) + + +def init_model(config, program, exe): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints: + paddle.static.load(program, checkpoints, exe) + logger.info("Finish initing model from {}".format(checkpoints)) + return + + pretrained_model = config.get('pretrained_model') + if pretrained_model: + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + for pretrain in pretrained_model: + load_params(exe, program, pretrain) + logger.info("Finish initing model from {}".format(pretrained_model)) + + +def save_model(program, model_path, epoch_id, prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, str(epoch_id)) + _mkdir_if_not_exist(model_path) + model_prefix = os.path.join(model_path, prefix) + paddle.static.save(program, model_prefix) + logger.info("Already save model in {}".format(model_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/train.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/train.py new file mode 100644 index 000000000..dd16cdb4c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/static/train.py @@ -0,0 +1,212 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../'))) + +import paddle +from paddle.distributed import fleet +from visualdl import LogWriter + +from ppcls.data import build_dataloader +from ppcls.utils.config import get_config, print_config +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.static.save_load import init_model, save_model +from ppcls.static import program + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/ResNet/ResNet50.yaml', + help='config file path') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args + + +def main(args): + """ + all the config of training paradigm should be in config["Global"] + """ + config = get_config(args.config, overrides=args.override, show=False) + global_config = config["Global"] + + mode = "train" + + log_file = os.path.join(global_config['output_dir'], + config["Arch"]["name"], f"{mode}.log") + init_logger(log_file=log_file) + print_config(config) + + if global_config.get("is_distributed", True): + fleet.init(is_collective=True) + # assign the device + use_gpu = global_config.get("use_gpu", True) + # amp related config + if 'AMP' in config: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_cudnn_exhaustive_search': 1, + 'FLAGS_conv_workspace_size_limit': 1500, + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, + 'FLAGS_max_inplace_grad_add': 8, + } + os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' + paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + + use_xpu = global_config.get("use_xpu", False) + use_npu = global_config.get("use_npu", False) + use_mlu = global_config.get("use_mlu", False) + assert ( + use_gpu and use_xpu and use_npu and use_mlu + ) is not True, "gpu, xpu, npu and mlu can not be true in the same time in static mode!" + + if use_gpu: + device = paddle.set_device('gpu') + elif use_xpu: + device = paddle.set_device('xpu') + elif use_npu: + device = paddle.set_device('npu') + elif use_mlu: + device = paddle.set_device('mlu') + else: + device = paddle.set_device('cpu') + + # visualDL + vdl_writer = None + if global_config["use_visualdl"]: + vdl_dir = os.path.join(global_config["output_dir"], "vdl") + vdl_writer = LogWriter(vdl_dir) + + # build dataloader + eval_dataloader = None + use_dali = global_config.get('use_dali', False) + + class_num = config["Arch"].get("class_num", None) + config["DataLoader"].update({"class_num": class_num}) + train_dataloader = build_dataloader( + config["DataLoader"], "Train", device=device, use_dali=use_dali) + if global_config["eval_during_train"]: + eval_dataloader = build_dataloader( + config["DataLoader"], "Eval", device=device, use_dali=use_dali) + + step_each_epoch = len(train_dataloader) + + # startup_prog is used to do some parameter init work, + # and train prog is used to hold the network + startup_prog = paddle.static.Program() + train_prog = paddle.static.Program() + + best_top1_acc = 0.0 # best top1 acc record + + train_fetchs, lr_scheduler, train_feeds, optimizer = program.build( + config, + train_prog, + startup_prog, + class_num, + step_each_epoch=step_each_epoch, + is_train=True, + is_distributed=global_config.get("is_distributed", True)) + + if global_config["eval_during_train"]: + eval_prog = paddle.static.Program() + eval_fetchs, _, eval_feeds, _ = program.build( + config, + eval_prog, + startup_prog, + is_train=False, + is_distributed=global_config.get("is_distributed", True)) + # clone to prune some content which is irrelevant in eval_prog + eval_prog = eval_prog.clone(for_test=True) + + # create the "Executor" with the statement of which device + exe = paddle.static.Executor(device) + # Parameter initialization + exe.run(startup_prog) + # load pretrained models or checkpoints + init_model(global_config, train_prog, exe) + + if 'AMP' in config and config.AMP.get("level", "O1") == "O2": + optimizer.amp_init( + device, + scope=paddle.static.global_scope(), + test_program=eval_prog + if global_config["eval_during_train"] else None) + + if not global_config.get("is_distributed", True): + compiled_train_prog = program.compile( + config, train_prog, loss_name=train_fetchs["loss"][0].name) + else: + compiled_train_prog = train_prog + + if eval_dataloader is not None: + compiled_eval_prog = program.compile(config, eval_prog) + + for epoch_id in range(global_config["epochs"]): + # 1. train with train dataset + program.run(train_dataloader, exe, compiled_train_prog, train_feeds, + train_fetchs, epoch_id, 'train', config, vdl_writer, + lr_scheduler, args.profiler_options) + # 2. evaate with eval dataset + if global_config["eval_during_train"] and epoch_id % global_config[ + "eval_interval"] == 0: + top1_acc = program.run(eval_dataloader, exe, compiled_eval_prog, + eval_feeds, eval_fetchs, epoch_id, "eval", + config) + if top1_acc > best_top1_acc: + best_top1_acc = top1_acc + message = "The best top1 acc {:.5f}, in epoch: {:d}".format( + best_top1_acc, epoch_id) + logger.info(message) + if epoch_id % global_config["save_interval"] == 0: + + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, "best_model") + + # 3. save the persistable model + if epoch_id % global_config["save_interval"] == 0: + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, epoch_id) + + +if __name__ == '__main__': + paddle.enable_static() + args = parse_args() + main(args) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/__init__.py new file mode 100644 index 000000000..632cc7882 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import logger +from . import misc +from . import model_zoo +from . import metrics + +from .save_load import init_model, save_model +from .config import get_config +from .misc import AverageMeter +from .metrics import multi_hot_encode +from .metrics import hamming_distance +from .metrics import accuracy_score +from .metrics import precision_recall_fscore +from .metrics import mean_average_precision diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/check.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/check.py new file mode 100644 index 000000000..bc7030818 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/check.py @@ -0,0 +1,151 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +import paddle +from paddle import is_compiled_with_cuda + +from ppcls.arch import get_architectures +from ppcls.arch import similar_architectures +from ppcls.arch import get_blacklist_model_in_static_mode +from ppcls.utils import logger + + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.8.0 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." + try: + pass + # paddle.utils.require_version('0.0.0') + except Exception: + logger.error(err) + sys.exit(1) + + +def check_gpu(): + """ + Log error and exit when using paddlepaddle cpu version. + """ + err = "You are using paddlepaddle cpu version! Please try to " \ + "install paddlepaddle-gpu to run model on GPU." + + try: + assert is_compiled_with_cuda() + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_architecture(architecture): + """ + check architecture and recommend similar architectures + """ + assert isinstance(architecture, dict), \ + ("the type of architecture({}) should be dict". format(architecture)) + assert "name" in architecture, \ + ("name must be in the architecture keys, just contains: {}". format( + architecture.keys())) + + similar_names = similar_architectures(architecture["name"], + get_architectures()) + model_list = ', '.join(similar_names) + err = "Architecture [{}] is not exist! Maybe you want: [{}]" \ + "".format(architecture["name"], model_list) + try: + assert architecture["name"] in similar_names + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_model_with_running_mode(architecture): + """ + check whether the model is consistent with the operating mode + """ + # some model are not supported in the static mode + blacklist = get_blacklist_model_in_static_mode() + if not paddle.in_dynamic_mode() and architecture["name"] in blacklist: + logger.error("Model: {} is not supported in the staic mode.".format( + architecture["name"])) + sys.exit(1) + return + + +def check_mix(architecture, use_mix=False): + """ + check mix parameter + """ + err = "Cannot use mix processing in GoogLeNet, " \ + "please set use_mix = False." + try: + if architecture["name"] == "GoogLeNet": + assert use_mix is not True + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_classes_num(classes_num): + """ + check classes_num + """ + err = "classes_num({}) should be a positive integer" \ + "and larger than 1".format(classes_num) + try: + assert isinstance(classes_num, int) + assert classes_num > 1 + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_data_dir(path): + """ + check cata_dir + """ + err = "Data path is not exist, please given a right path" \ + "".format(path) + try: + assert os.isdir(path) + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_function_params(config, key): + """ + check specify config + """ + k_config = config.get(key) + assert k_config is not None, \ + ('{} is required in config'.format(key)) + + assert k_config.get('function'), \ + ('function is required {} config'.format(key)) + params = k_config.get('params') + assert params is not None, \ + ('params is required in {} config'.format(key)) + assert isinstance(params, dict), \ + ('the params in {} config should be a dict'.format(key)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/config.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/config.py new file mode 100644 index 000000000..e3277c480 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/config.py @@ -0,0 +1,210 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import argparse +import yaml +from ppcls.utils import logger +from ppcls.utils import check +__all__ = ['get_config'] + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + def __deepcopy__(self, content): + return copy.deepcopy(dict(self)) + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + placeholder = "-" * 60 + for k, v in sorted(d.items()): + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", k, v)) + if k.isupper(): + logger.info(placeholder) + + +def print_config(config): + """ + visualize configs + Arguments: + config: configs + """ + logger.advertise() + print_dict(config) + + +def check_config(config): + """ + Check config + """ + check.check_version() + use_gpu = config.get('use_gpu', True) + if use_gpu: + check.check_gpu() + architecture = config.get('ARCHITECTURE') + #check.check_architecture(architecture) + use_mix = config.get('use_mix', False) + check.check_mix(architecture, use_mix) + classes_num = config.get('classes_num') + check.check_classes_num(classes_num) + mode = config.get('mode', 'train') + if mode.lower() == 'train': + check.check_function_params(config, 'LEARNING_RATE') + check.check_function_params(config, 'OPTIMIZER') + + +def override(dl, ks, v): + """ + Recursively replace dict of list + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + if not ks[0] in dl: + print('A new filed ({}) detected!'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=None): + """ + Recursively override the config + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + Returns: + config(dict): replaced config + """ + if options is not None: + for opt in options: + assert isinstance(opt, str), ( + "option({}) should be a str".format(opt)) + assert "=" in opt, ( + "option({}) should contain a =" + "to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + return config + + +def get_config(fname, overrides=None, show=False): + """ + Read config from file + """ + assert os.path.exists(fname), ( + 'config file({}) is not exist'.format(fname)) + config = parse_config(fname) + override_config(config, overrides) + if show: + print_config(config) + # check_config(config) + return config + + +def parse_args(): + parser = argparse.ArgumentParser("generic-image-rec train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/config.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + args = parser.parse_args() + return args diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/download.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/download.py new file mode 100644 index 000000000..9c4575048 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/download.py @@ -0,0 +1,319 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import os.path as osp +import shutil +import requests +import hashlib +import tarfile +import zipfile +import time +from collections import OrderedDict +from tqdm import tqdm + +from ppcls.utils import logger + +__all__ = ['get_weights_path_from_url'] + +WEIGHTS_HOME = osp.expanduser("~/.paddleclas/weights") + +DOWNLOAD_RETRY_LIMIT = 3 + + +def is_url(path): + """ + Whether path is URL. + Args: + path (string): URL string or not. + """ + return path.startswith('http://') or path.startswith('https://') + + +def get_weights_path_from_url(url, md5sum=None): + """Get weights path from WEIGHT_HOME, if not exists, + download it from url. + + Args: + url (str): download url + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded weights. + + Examples: + .. code-block:: python + + from paddle.utils.download import get_weights_path_from_url + + resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams' + local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url) + + """ + path = get_path_from_url(url, WEIGHTS_HOME, md5sum) + return path + + +def _map_path(url, root_dir): + # parse path after download under root_dir + fname = osp.split(url)[-1] + fpath = fname + return osp.join(root_dir, fpath) + + +def _get_unique_endpoints(trainer_endpoints): + # Sorting is to avoid different environmental variables for each card + trainer_endpoints.sort() + ips = set() + unique_endpoints = set() + for endpoint in trainer_endpoints: + ip = endpoint.split(":")[0] + if ip in ips: + continue + ips.add(ip) + unique_endpoints.add(endpoint) + logger.info("unique_endpoints {}".format(unique_endpoints)) + return unique_endpoints + + +def get_path_from_url(url, + root_dir, + md5sum=None, + check_exist=True, + decompress=True): + """ Download from given url to root_dir. + if file or directory specified by url is exists under + root_dir, return the path directly, otherwise download + from url and decompress it, return the path. + + Args: + url (str): download url + root_dir (str): root dir for downloading, it should be + WEIGHTS_HOME or DATASET_HOME + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded models & weights & datasets. + """ + + from paddle.fluid.dygraph.parallel import ParallelEnv + + assert is_url(url), "downloading from {} not a url".format(url) + # parse path after download to decompress under root_dir + fullpath = _map_path(url, root_dir) + # Mainly used to solve the problem of downloading data from different + # machines in the case of multiple machines. Different ips will download + # data, and the same ip will only download data once. + unique_endpoints = _get_unique_endpoints(ParallelEnv() + .trainer_endpoints[:]) + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): + logger.info("Found {}".format(fullpath)) + else: + if ParallelEnv().current_endpoint in unique_endpoints: + fullpath = _download(url, root_dir, md5sum) + else: + while not os.path.exists(fullpath): + time.sleep(1) + + if ParallelEnv().current_endpoint in unique_endpoints: + if decompress and (tarfile.is_tarfile(fullpath) or + zipfile.is_zipfile(fullpath)): + fullpath = _decompress(fullpath) + + return fullpath + + +def _download(url, path, md5sum=None): + """ + Download from url, save to path. + + url (str): download url + path (str): download to given path + """ + if not osp.exists(path): + os.makedirs(path) + + fname = osp.split(url)[-1] + fullname = osp.join(path, fname) + retry_cnt = 0 + + while not (osp.exists(fullname) and _md5check(fullname, md5sum)): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RuntimeError("Download from {} failed. " + "Retry limit reached".format(url)) + + logger.info("Downloading {} from {}".format(fname, url)) + + try: + req = requests.get(url, stream=True) + except Exception as e: # requests.exceptions.ConnectionError + logger.info( + "Downloading {} from {} failed {} times with exception {}". + format(fname, url, retry_cnt + 1, str(e))) + time.sleep(1) + continue + + if req.status_code != 200: + raise RuntimeError("Downloading from {} failed with code " + "{}!".format(url, req.status_code)) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + with tqdm(total=(int(total_size) + 1023) // 1024) as pbar: + for chunk in req.iter_content(chunk_size=1024): + f.write(chunk) + pbar.update(1) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _md5check(fullname, md5sum=None): + if md5sum is None: + return True + + logger.info("File {} md5 checking...".format(fullname)) + md5 = hashlib.md5() + with open(fullname, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + md5.update(chunk) + calc_md5sum = md5.hexdigest() + + if calc_md5sum != md5sum: + logger.info("File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum)) + return False + return True + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + + if tarfile.is_tarfile(fname): + uncompressed_path = _uncompress_file_tar(fname) + elif zipfile.is_zipfile(fname): + uncompressed_path = _uncompress_file_zip(fname) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + return uncompressed_path + + +def _uncompress_file_zip(filepath): + files = zipfile.ZipFile(filepath, 'r') + file_list = files.namelist() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _uncompress_file_tar(filepath, mode="r:*"): + files = tarfile.open(filepath, mode) + file_list = files.getnames() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _is_a_single_file(file_list): + if len(file_list) == 1 and file_list[0].find(os.sep) < -1: + return True + return False + + +def _is_a_single_dir(file_list): + new_file_list = [] + for file_path in file_list: + if '/' in file_path: + file_path = file_path.replace('/', os.sep) + elif '\\' in file_path: + file_path = file_path.replace('\\', os.sep) + new_file_list.append(file_path) + + file_name = new_file_list[0].split(os.sep)[0] + for i in range(1, len(new_file_list)): + if file_name != new_file_list[i].split(os.sep)[0]: + return False + return True diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/ema.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/ema.py new file mode 100644 index 000000000..b54cdb1b2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/ema.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np + + +class ExponentialMovingAverage(): + """ + Exponential Moving Average + Code was heavily based on https://github.com/Wanger-SJTU/SegToolbox.Pytorch/blob/master/lib/utils/ema.py + """ + + def __init__(self, model, decay, thres_steps=True): + self._model = model + self._decay = decay + self._thres_steps = thres_steps + self._shadow = {} + self._backup = {} + + def register(self): + self._update_step = 0 + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + self._shadow[name] = param.numpy().copy() + + def update(self): + decay = min(self._decay, (1 + self._update_step) / ( + 10 + self._update_step)) if self._thres_steps else self._decay + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + new_val = np.array(param.numpy().copy()) + old_val = np.array(self._shadow[name]) + new_average = decay * old_val + (1 - decay) * new_val + self._shadow[name] = new_average + self._update_step += 1 + return decay + + def apply(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._shadow + self._backup[name] = np.array(param.numpy().copy()) + param.set_value(np.array(self._shadow[name])) + + def restore(self): + for name, param in self._model.named_parameters(): + if param.stop_gradient is False: + assert name in self._backup + param.set_value(self._backup[name]) + self._backup = {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/fm_vis.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/fm_vis.py new file mode 100644 index 000000000..a5368b10e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/fm_vis.py @@ -0,0 +1,97 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import cv2 +import utils +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../..'))) + +import paddle +from paddle.distributed import ParallelEnv + +from resnet import ResNet50 +from ppcls.utils.save_load import load_dygraph_pretrain + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", required=True, type=str) + parser.add_argument("-c", "--channel_num", type=int) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("--show", type=str2bool, default=False) + parser.add_argument("--interpolation", type=int, default=1) + parser.add_argument("--save_path", type=str, default=None) + parser.add_argument("--use_gpu", type=str2bool, default=True) + + return parser.parse_args() + + +def create_operators(interpolation=1): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + resize_op = utils.ResizeImage( + resize_short=256, interpolation=interpolation) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(data, ops): + for op in ops: + data = op(data) + return data + + +def main(): + args = parse_args() + operators = create_operators(args.interpolation) + # assign the place + place = 'gpu:{}'.format(ParallelEnv().dev_id) if args.use_gpu else 'cpu' + place = paddle.set_device(place) + + net = ResNet50() + load_dygraph_pretrain(net, args.pretrained_model) + + img = cv2.imread(args.image_file, cv2.IMREAD_COLOR) + data = preprocess(img, operators) + data = np.expand_dims(data, axis=0) + data = paddle.to_tensor(data) + net.eval() + _, fm = net(data) + assert args.channel_num >= 0 and args.channel_num <= fm.shape[ + 1], "the channel is out of the range, should be in {} but got {}".format( + [0, fm.shape[1]], args.channel_num) + + fm = (np.squeeze(fm[0][args.channel_num].numpy()) * 255).astype(np.uint8) + fm = cv2.resize(fm, (img.shape[1], img.shape[0])) + if args.save_path is not None: + print("the feature map is saved in path: {}".format(args.save_path)) + cv2.imwrite(args.save_path, fm) + + +if __name__ == "__main__": + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/resnet.py new file mode 100644 index 000000000..b75881414 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/resnet.py @@ -0,0 +1,535 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + fm = x + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x, fm + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["200"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/utils.py new file mode 100644 index 000000000..7c7014932 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/feature_maps_visualization/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +class DecodeImage(object): + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, img): + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + return img + + +class ResizeImage(object): + def __init__(self, resize_short=None, interpolation=1): + self.resize_short = resize_short + self.interpolation = interpolation + + def __call__(self, img): + img_h, img_w = img.shape[:2] + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + return cv2.resize(img, (w, h), interpolation=self.interpolation) + + +class CropImage(object): + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class NormalizeImage(object): + def __init__(self, scale=None, mean=None, std=None): + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, img): + img = img.transpose((2, 0, 1)) + return img diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/gallery2fc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/gallery2fc.py new file mode 100644 index 000000000..67b08529e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/gallery2fc.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import paddle +import cv2 + +from ppcls.arch import build_model +from ppcls.utils.config import parse_config, parse_args +from ppcls.utils.save_load import load_dygraph_pretrain +from ppcls.utils.logger import init_logger +from ppcls.data import create_operators +from ppcls.arch.slim import quantize_model + + +class GalleryLayer(paddle.nn.Layer): + def __init__(self, configs): + super().__init__() + self.configs = configs + embedding_size = self.configs["Arch"]["Head"]["embedding_size"] + self.batch_size = self.configs["IndexProcess"]["batch_size"] + self.image_shape = self.configs["Global"]["image_shape"].copy() + self.image_shape.insert(0, self.batch_size) + + image_root = self.configs["IndexProcess"]["image_root"] + data_file = self.configs["IndexProcess"]["data_file"] + delimiter = self.configs["IndexProcess"]["delimiter"] + self.gallery_images = [] + gallery_docs = [] + gallery_labels = [] + + with open(data_file, 'r', encoding='utf-8') as f: + lines = f.readlines() + for ori_line in lines: + line = ori_line.strip().split(delimiter) + text_num = len(line) + assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}" + image_file = os.path.join(image_root, line[0]) + + self.gallery_images.append(image_file) + gallery_docs.append(ori_line.strip()) + gallery_labels.append(line[1].strip()) + self.gallery_layer = paddle.nn.Linear(embedding_size, len(self.gallery_images), bias_attr=False) + self.gallery_layer.skip_quant = True + output_label_str = "" + for i, label_i in enumerate(gallery_labels): + output_label_str += "{} {}\n".format(i, label_i) + output_path = configs["Global"]["save_inference_dir"] + "_label.txt" + with open(output_path, "w") as f: + f.write(output_label_str) + + def forward(self, x, label=None): + x = paddle.nn.functional.normalize(x) + x = self.gallery_layer(x) + return x + + def build_gallery_layer(self, feature_extractor): + transform_configs = self.configs["IndexProcess"]["transform_ops"] + preprocess_ops = create_operators(transform_configs) + embedding_size = self.configs["Arch"]["Head"]["embedding_size"] + batch_index = 0 + input_tensor = paddle.zeros(self.image_shape) + gallery_feature = paddle.zeros((len(self.gallery_images), embedding_size)) + for i, image_path in enumerate(self.gallery_images): + image = cv2.imread(image_path)[:, :, ::-1] + for op in preprocess_ops: + image = op(image) + input_tensor[batch_index] = image + batch_index += 1 + if batch_index == self.batch_size or i == len(self.gallery_images) - 1: + batch_feature = feature_extractor(input_tensor)["features"] + for j in range(batch_index): + feature = batch_feature[j] + norm_feature = paddle.nn.functional.normalize(feature, axis=0) + gallery_feature[i - batch_index + j + 1] = norm_feature + self.gallery_layer.set_state_dict({"_layer.weight": gallery_feature.T}) + + +def export_fuse_model(configs): + slim_config = configs["Slim"].copy() + configs["Slim"] = None + fuse_model = build_model(configs) + fuse_model.head = GalleryLayer(configs) + configs["Slim"] = slim_config + quantize_model(configs, fuse_model) + load_dygraph_pretrain(fuse_model, configs["Global"]["pretrained_model"]) + fuse_model.eval() + fuse_model.head.build_gallery_layer(fuse_model) + save_path = configs["Global"]["save_inference_dir"] + fuse_model.quanter.save_quantized_model( + fuse_model, + save_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + configs["Global"]["image_shape"], + dtype='float32') + ]) + + +def main(): + args = parse_args() + configs = parse_config(args.config) + init_logger(name='gallery2fc') + export_fuse_model(configs) + + +if __name__ == '__main__': + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/imagenet1k_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/imagenet1k_label_list.txt new file mode 100644 index 000000000..376e18021 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/imagenet1k_label_list.txt @@ -0,0 +1,1000 @@ +0 tench, Tinca tinca +1 goldfish, Carassius auratus +2 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +3 tiger shark, Galeocerdo cuvieri +4 hammerhead, hammerhead shark +5 electric ray, crampfish, numbfish, torpedo +6 stingray +7 cock +8 hen +9 ostrich, Struthio camelus +10 brambling, Fringilla montifringilla +11 goldfinch, Carduelis carduelis +12 house finch, linnet, Carpodacus mexicanus +13 junco, snowbird +14 indigo bunting, indigo finch, indigo bird, Passerina cyanea +15 robin, American robin, Turdus migratorius +16 bulbul +17 jay +18 magpie +19 chickadee +20 water ouzel, dipper +21 kite +22 bald eagle, American eagle, Haliaeetus leucocephalus +23 vulture +24 great grey owl, great gray owl, Strix nebulosa +25 European fire salamander, Salamandra salamandra +26 common newt, Triturus vulgaris +27 eft +28 spotted salamander, Ambystoma maculatum +29 axolotl, mud puppy, Ambystoma mexicanum +30 bullfrog, Rana catesbeiana +31 tree frog, tree-frog +32 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +33 loggerhead, loggerhead turtle, Caretta caretta +34 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +35 mud turtle +36 terrapin +37 box turtle, box tortoise +38 banded gecko +39 common iguana, iguana, Iguana iguana +40 American chameleon, anole, Anolis carolinensis +41 whiptail, whiptail lizard +42 agama +43 frilled lizard, Chlamydosaurus kingi +44 alligator lizard +45 Gila monster, Heloderma suspectum +46 green lizard, Lacerta viridis +47 African chameleon, Chamaeleo chamaeleon +48 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +49 African crocodile, Nile crocodile, Crocodylus niloticus +50 American alligator, Alligator mississipiensis +51 triceratops +52 thunder snake, worm snake, Carphophis amoenus +53 ringneck snake, ring-necked snake, ring snake +54 hognose snake, puff adder, sand viper +55 green snake, grass snake +56 king snake, kingsnake +57 garter snake, grass snake +58 water snake +59 vine snake +60 night snake, Hypsiglena torquata +61 boa constrictor, Constrictor constrictor +62 rock python, rock snake, Python sebae +63 Indian cobra, Naja naja +64 green mamba +65 sea snake +66 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +67 diamondback, diamondback rattlesnake, Crotalus adamanteus +68 sidewinder, horned rattlesnake, Crotalus cerastes +69 trilobite +70 harvestman, daddy longlegs, Phalangium opilio +71 scorpion +72 black and gold garden spider, Argiope aurantia +73 barn spider, Araneus cavaticus +74 garden spider, Aranea diademata +75 black widow, Latrodectus mactans +76 tarantula +77 wolf spider, hunting spider +78 tick +79 centipede +80 black grouse +81 ptarmigan +82 ruffed grouse, partridge, Bonasa umbellus +83 prairie chicken, prairie grouse, prairie fowl +84 peacock +85 quail +86 partridge +87 African grey, African gray, Psittacus erithacus +88 macaw +89 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +90 lorikeet +91 coucal +92 bee eater +93 hornbill +94 hummingbird +95 jacamar +96 toucan +97 drake +98 red-breasted merganser, Mergus serrator +99 goose +100 black swan, Cygnus atratus +101 tusker +102 echidna, spiny anteater, anteater +103 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +104 wallaby, brush kangaroo +105 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +106 wombat +107 jellyfish +108 sea anemone, anemone +109 brain coral +110 flatworm, platyhelminth +111 nematode, nematode worm, roundworm +112 conch +113 snail +114 slug +115 sea slug, nudibranch +116 chiton, coat-of-mail shell, sea cradle, polyplacophore +117 chambered nautilus, pearly nautilus, nautilus +118 Dungeness crab, Cancer magister +119 rock crab, Cancer irroratus +120 fiddler crab +121 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +122 American lobster, Northern lobster, Maine lobster, Homarus americanus +123 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +124 crayfish, crawfish, crawdad, crawdaddy +125 hermit crab +126 isopod +127 white stork, Ciconia ciconia +128 black stork, Ciconia nigra +129 spoonbill +130 flamingo +131 little blue heron, Egretta caerulea +132 American egret, great white heron, Egretta albus +133 bittern +134 crane +135 limpkin, Aramus pictus +136 European gallinule, Porphyrio porphyrio +137 American coot, marsh hen, mud hen, water hen, Fulica americana +138 bustard +139 ruddy turnstone, Arenaria interpres +140 red-backed sandpiper, dunlin, Erolia alpina +141 redshank, Tringa totanus +142 dowitcher +143 oystercatcher, oyster catcher +144 pelican +145 king penguin, Aptenodytes patagonica +146 albatross, mollymawk +147 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +148 killer whale, killer, orca, grampus, sea wolf, Orcinus orca +149 dugong, Dugong dugon +150 sea lion +151 Chihuahua +152 Japanese spaniel +153 Maltese dog, Maltese terrier, Maltese +154 Pekinese, Pekingese, Peke +155 Shih-Tzu +156 Blenheim spaniel +157 papillon +158 toy terrier +159 Rhodesian ridgeback +160 Afghan hound, Afghan +161 basset, basset hound +162 beagle +163 bloodhound, sleuthhound +164 bluetick +165 black-and-tan coonhound +166 Walker hound, Walker foxhound +167 English foxhound +168 redbone +169 borzoi, Russian wolfhound +170 Irish wolfhound +171 Italian greyhound +172 whippet +173 Ibizan hound, Ibizan Podenco +174 Norwegian elkhound, elkhound +175 otterhound, otter hound +176 Saluki, gazelle hound +177 Scottish deerhound, deerhound +178 Weimaraner +179 Staffordshire bullterrier, Staffordshire bull terrier +180 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +181 Bedlington terrier +182 Border terrier +183 Kerry blue terrier +184 Irish terrier +185 Norfolk terrier +186 Norwich terrier +187 Yorkshire terrier +188 wire-haired fox terrier +189 Lakeland terrier +190 Sealyham terrier, Sealyham +191 Airedale, Airedale terrier +192 cairn, cairn terrier +193 Australian terrier +194 Dandie Dinmont, Dandie Dinmont terrier +195 Boston bull, Boston terrier +196 miniature schnauzer +197 giant schnauzer +198 standard schnauzer +199 Scotch terrier, Scottish terrier, Scottie +200 Tibetan terrier, chrysanthemum dog +201 silky terrier, Sydney silky +202 soft-coated wheaten terrier +203 West Highland white terrier +204 Lhasa, Lhasa apso +205 flat-coated retriever +206 curly-coated retriever +207 golden retriever +208 Labrador retriever +209 Chesapeake Bay retriever +210 German short-haired pointer +211 vizsla, Hungarian pointer +212 English setter +213 Irish setter, red setter +214 Gordon setter +215 Brittany spaniel +216 clumber, clumber spaniel +217 English springer, English springer spaniel +218 Welsh springer spaniel +219 cocker spaniel, English cocker spaniel, cocker +220 Sussex spaniel +221 Irish water spaniel +222 kuvasz +223 schipperke +224 groenendael +225 malinois +226 briard +227 kelpie +228 komondor +229 Old English sheepdog, bobtail +230 Shetland sheepdog, Shetland sheep dog, Shetland +231 collie +232 Border collie +233 Bouvier des Flandres, Bouviers des Flandres +234 Rottweiler +235 German shepherd, German shepherd dog, German police dog, alsatian +236 Doberman, Doberman pinscher +237 miniature pinscher +238 Greater Swiss Mountain dog +239 Bernese mountain dog +240 Appenzeller +241 EntleBucher +242 boxer +243 bull mastiff +244 Tibetan mastiff +245 French bulldog +246 Great Dane +247 Saint Bernard, St Bernard +248 Eskimo dog, husky +249 malamute, malemute, Alaskan malamute +250 Siberian husky +251 dalmatian, coach dog, carriage dog +252 affenpinscher, monkey pinscher, monkey dog +253 basenji +254 pug, pug-dog +255 Leonberg +256 Newfoundland, Newfoundland dog +257 Great Pyrenees +258 Samoyed, Samoyede +259 Pomeranian +260 chow, chow chow +261 keeshond +262 Brabancon griffon +263 Pembroke, Pembroke Welsh corgi +264 Cardigan, Cardigan Welsh corgi +265 toy poodle +266 miniature poodle +267 standard poodle +268 Mexican hairless +269 timber wolf, grey wolf, gray wolf, Canis lupus +270 white wolf, Arctic wolf, Canis lupus tundrarum +271 red wolf, maned wolf, Canis rufus, Canis niger +272 coyote, prairie wolf, brush wolf, Canis latrans +273 dingo, warrigal, warragal, Canis dingo +274 dhole, Cuon alpinus +275 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +276 hyena, hyaena +277 red fox, Vulpes vulpes +278 kit fox, Vulpes macrotis +279 Arctic fox, white fox, Alopex lagopus +280 grey fox, gray fox, Urocyon cinereoargenteus +281 tabby, tabby cat +282 tiger cat +283 Persian cat +284 Siamese cat, Siamese +285 Egyptian cat +286 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +287 lynx, catamount +288 leopard, Panthera pardus +289 snow leopard, ounce, Panthera uncia +290 jaguar, panther, Panthera onca, Felis onca +291 lion, king of beasts, Panthera leo +292 tiger, Panthera tigris +293 cheetah, chetah, Acinonyx jubatus +294 brown bear, bruin, Ursus arctos +295 American black bear, black bear, Ursus americanus, Euarctos americanus +296 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +297 sloth bear, Melursus ursinus, Ursus ursinus +298 mongoose +299 meerkat, mierkat +300 tiger beetle +301 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +302 ground beetle, carabid beetle +303 long-horned beetle, longicorn, longicorn beetle +304 leaf beetle, chrysomelid +305 dung beetle +306 rhinoceros beetle +307 weevil +308 fly +309 bee +310 ant, emmet, pismire +311 grasshopper, hopper +312 cricket +313 walking stick, walkingstick, stick insect +314 cockroach, roach +315 mantis, mantid +316 cicada, cicala +317 leafhopper +318 lacewing, lacewing fly +319 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +320 damselfly +321 admiral +322 ringlet, ringlet butterfly +323 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +324 cabbage butterfly +325 sulphur butterfly, sulfur butterfly +326 lycaenid, lycaenid butterfly +327 starfish, sea star +328 sea urchin +329 sea cucumber, holothurian +330 wood rabbit, cottontail, cottontail rabbit +331 hare +332 Angora, Angora rabbit +333 hamster +334 porcupine, hedgehog +335 fox squirrel, eastern fox squirrel, Sciurus niger +336 marmot +337 beaver +338 guinea pig, Cavia cobaya +339 sorrel +340 zebra +341 hog, pig, grunter, squealer, Sus scrofa +342 wild boar, boar, Sus scrofa +343 warthog +344 hippopotamus, hippo, river horse, Hippopotamus amphibius +345 ox +346 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +347 bison +348 ram, tup +349 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +350 ibex, Capra ibex +351 hartebeest +352 impala, Aepyceros melampus +353 gazelle +354 Arabian camel, dromedary, Camelus dromedarius +355 llama +356 weasel +357 mink +358 polecat, fitch, foulmart, foumart, Mustela putorius +359 black-footed ferret, ferret, Mustela nigripes +360 otter +361 skunk, polecat, wood pussy +362 badger +363 armadillo +364 three-toed sloth, ai, Bradypus tridactylus +365 orangutan, orang, orangutang, Pongo pygmaeus +366 gorilla, Gorilla gorilla +367 chimpanzee, chimp, Pan troglodytes +368 gibbon, Hylobates lar +369 siamang, Hylobates syndactylus, Symphalangus syndactylus +370 guenon, guenon monkey +371 patas, hussar monkey, Erythrocebus patas +372 baboon +373 macaque +374 langur +375 colobus, colobus monkey +376 proboscis monkey, Nasalis larvatus +377 marmoset +378 capuchin, ringtail, Cebus capucinus +379 howler monkey, howler +380 titi, titi monkey +381 spider monkey, Ateles geoffroyi +382 squirrel monkey, Saimiri sciureus +383 Madagascar cat, ring-tailed lemur, Lemur catta +384 indri, indris, Indri indri, Indri brevicaudatus +385 Indian elephant, Elephas maximus +386 African elephant, Loxodonta africana +387 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +388 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +389 barracouta, snoek +390 eel +391 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +392 rock beauty, Holocanthus tricolor +393 anemone fish +394 sturgeon +395 gar, garfish, garpike, billfish, Lepisosteus osseus +396 lionfish +397 puffer, pufferfish, blowfish, globefish +398 abacus +399 abaya +400 academic gown, academic robe, judge's robe +401 accordion, piano accordion, squeeze box +402 acoustic guitar +403 aircraft carrier, carrier, flattop, attack aircraft carrier +404 airliner +405 airship, dirigible +406 altar +407 ambulance +408 amphibian, amphibious vehicle +409 analog clock +410 apiary, bee house +411 apron +412 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +413 assault rifle, assault gun +414 backpack, back pack, knapsack, packsack, rucksack, haversack +415 bakery, bakeshop, bakehouse +416 balance beam, beam +417 balloon +418 ballpoint, ballpoint pen, ballpen, Biro +419 Band Aid +420 banjo +421 bannister, banister, balustrade, balusters, handrail +422 barbell +423 barber chair +424 barbershop +425 barn +426 barometer +427 barrel, cask +428 barrow, garden cart, lawn cart, wheelbarrow +429 baseball +430 basketball +431 bassinet +432 bassoon +433 bathing cap, swimming cap +434 bath towel +435 bathtub, bathing tub, bath, tub +436 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +437 beacon, lighthouse, beacon light, pharos +438 beaker +439 bearskin, busby, shako +440 beer bottle +441 beer glass +442 bell cote, bell cot +443 bib +444 bicycle-built-for-two, tandem bicycle, tandem +445 bikini, two-piece +446 binder, ring-binder +447 binoculars, field glasses, opera glasses +448 birdhouse +449 boathouse +450 bobsled, bobsleigh, bob +451 bolo tie, bolo, bola tie, bola +452 bonnet, poke bonnet +453 bookcase +454 bookshop, bookstore, bookstall +455 bottlecap +456 bow +457 bow tie, bow-tie, bowtie +458 brass, memorial tablet, plaque +459 brassiere, bra, bandeau +460 breakwater, groin, groyne, mole, bulwark, seawall, jetty +461 breastplate, aegis, egis +462 broom +463 bucket, pail +464 buckle +465 bulletproof vest +466 bullet train, bullet +467 butcher shop, meat market +468 cab, hack, taxi, taxicab +469 caldron, cauldron +470 candle, taper, wax light +471 cannon +472 canoe +473 can opener, tin opener +474 cardigan +475 car mirror +476 carousel, carrousel, merry-go-round, roundabout, whirligig +477 carpenter's kit, tool kit +478 carton +479 car wheel +480 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +481 cassette +482 cassette player +483 castle +484 catamaran +485 CD player +486 cello, violoncello +487 cellular telephone, cellular phone, cellphone, cell, mobile phone +488 chain +489 chainlink fence +490 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +491 chain saw, chainsaw +492 chest +493 chiffonier, commode +494 chime, bell, gong +495 china cabinet, china closet +496 Christmas stocking +497 church, church building +498 cinema, movie theater, movie theatre, movie house, picture palace +499 cleaver, meat cleaver, chopper +500 cliff dwelling +501 cloak +502 clog, geta, patten, sabot +503 cocktail shaker +504 coffee mug +505 coffeepot +506 coil, spiral, volute, whorl, helix +507 combination lock +508 computer keyboard, keypad +509 confectionery, confectionary, candy store +510 container ship, containership, container vessel +511 convertible +512 corkscrew, bottle screw +513 cornet, horn, trumpet, trump +514 cowboy boot +515 cowboy hat, ten-gallon hat +516 cradle +517 crane +518 crash helmet +519 crate +520 crib, cot +521 Crock Pot +522 croquet ball +523 crutch +524 cuirass +525 dam, dike, dyke +526 desk +527 desktop computer +528 dial telephone, dial phone +529 diaper, nappy, napkin +530 digital clock +531 digital watch +532 dining table, board +533 dishrag, dishcloth +534 dishwasher, dish washer, dishwashing machine +535 disk brake, disc brake +536 dock, dockage, docking facility +537 dogsled, dog sled, dog sleigh +538 dome +539 doormat, welcome mat +540 drilling platform, offshore rig +541 drum, membranophone, tympan +542 drumstick +543 dumbbell +544 Dutch oven +545 electric fan, blower +546 electric guitar +547 electric locomotive +548 entertainment center +549 envelope +550 espresso maker +551 face powder +552 feather boa, boa +553 file, file cabinet, filing cabinet +554 fireboat +555 fire engine, fire truck +556 fire screen, fireguard +557 flagpole, flagstaff +558 flute, transverse flute +559 folding chair +560 football helmet +561 forklift +562 fountain +563 fountain pen +564 four-poster +565 freight car +566 French horn, horn +567 frying pan, frypan, skillet +568 fur coat +569 garbage truck, dustcart +570 gasmask, respirator, gas helmet +571 gas pump, gasoline pump, petrol pump, island dispenser +572 goblet +573 go-kart +574 golf ball +575 golfcart, golf cart +576 gondola +577 gong, tam-tam +578 gown +579 grand piano, grand +580 greenhouse, nursery, glasshouse +581 grille, radiator grille +582 grocery store, grocery, food market, market +583 guillotine +584 hair slide +585 hair spray +586 half track +587 hammer +588 hamper +589 hand blower, blow dryer, blow drier, hair dryer, hair drier +590 hand-held computer, hand-held microcomputer +591 handkerchief, hankie, hanky, hankey +592 hard disc, hard disk, fixed disk +593 harmonica, mouth organ, harp, mouth harp +594 harp +595 harvester, reaper +596 hatchet +597 holster +598 home theater, home theatre +599 honeycomb +600 hook, claw +601 hoopskirt, crinoline +602 horizontal bar, high bar +603 horse cart, horse-cart +604 hourglass +605 iPod +606 iron, smoothing iron +607 jack-o'-lantern +608 jean, blue jean, denim +609 jeep, landrover +610 jersey, T-shirt, tee shirt +611 jigsaw puzzle +612 jinrikisha, ricksha, rickshaw +613 joystick +614 kimono +615 knee pad +616 knot +617 lab coat, laboratory coat +618 ladle +619 lampshade, lamp shade +620 laptop, laptop computer +621 lawn mower, mower +622 lens cap, lens cover +623 letter opener, paper knife, paperknife +624 library +625 lifeboat +626 lighter, light, igniter, ignitor +627 limousine, limo +628 liner, ocean liner +629 lipstick, lip rouge +630 Loafer +631 lotion +632 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +633 loupe, jeweler's loupe +634 lumbermill, sawmill +635 magnetic compass +636 mailbag, postbag +637 mailbox, letter box +638 maillot +639 maillot, tank suit +640 manhole cover +641 maraca +642 marimba, xylophone +643 mask +644 matchstick +645 maypole +646 maze, labyrinth +647 measuring cup +648 medicine chest, medicine cabinet +649 megalith, megalithic structure +650 microphone, mike +651 microwave, microwave oven +652 military uniform +653 milk can +654 minibus +655 miniskirt, mini +656 minivan +657 missile +658 mitten +659 mixing bowl +660 mobile home, manufactured home +661 Model T +662 modem +663 monastery +664 monitor +665 moped +666 mortar +667 mortarboard +668 mosque +669 mosquito net +670 motor scooter, scooter +671 mountain bike, all-terrain bike, off-roader +672 mountain tent +673 mouse, computer mouse +674 mousetrap +675 moving van +676 muzzle +677 nail +678 neck brace +679 necklace +680 nipple +681 notebook, notebook computer +682 obelisk +683 oboe, hautboy, hautbois +684 ocarina, sweet potato +685 odometer, hodometer, mileometer, milometer +686 oil filter +687 organ, pipe organ +688 oscilloscope, scope, cathode-ray oscilloscope, CRO +689 overskirt +690 oxcart +691 oxygen mask +692 packet +693 paddle, boat paddle +694 paddlewheel, paddle wheel +695 padlock +696 paintbrush +697 pajama, pyjama, pj's, jammies +698 palace +699 panpipe, pandean pipe, syrinx +700 paper towel +701 parachute, chute +702 parallel bars, bars +703 park bench +704 parking meter +705 passenger car, coach, carriage +706 patio, terrace +707 pay-phone, pay-station +708 pedestal, plinth, footstall +709 pencil box, pencil case +710 pencil sharpener +711 perfume, essence +712 Petri dish +713 photocopier +714 pick, plectrum, plectron +715 pickelhaube +716 picket fence, paling +717 pickup, pickup truck +718 pier +719 piggy bank, penny bank +720 pill bottle +721 pillow +722 ping-pong ball +723 pinwheel +724 pirate, pirate ship +725 pitcher, ewer +726 plane, carpenter's plane, woodworking plane +727 planetarium +728 plastic bag +729 plate rack +730 plow, plough +731 plunger, plumber's helper +732 Polaroid camera, Polaroid Land camera +733 pole +734 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +735 poncho +736 pool table, billiard table, snooker table +737 pop bottle, soda bottle +738 pot, flowerpot +739 potter's wheel +740 power drill +741 prayer rug, prayer mat +742 printer +743 prison, prison house +744 projectile, missile +745 projector +746 puck, hockey puck +747 punching bag, punch bag, punching ball, punchball +748 purse +749 quill, quill pen +750 quilt, comforter, comfort, puff +751 racer, race car, racing car +752 racket, racquet +753 radiator +754 radio, wireless +755 radio telescope, radio reflector +756 rain barrel +757 recreational vehicle, RV, R.V. +758 reel +759 reflex camera +760 refrigerator, icebox +761 remote control, remote +762 restaurant, eating house, eating place, eatery +763 revolver, six-gun, six-shooter +764 rifle +765 rocking chair, rocker +766 rotisserie +767 rubber eraser, rubber, pencil eraser +768 rugby ball +769 rule, ruler +770 running shoe +771 safe +772 safety pin +773 saltshaker, salt shaker +774 sandal +775 sarong +776 sax, saxophone +777 scabbard +778 scale, weighing machine +779 school bus +780 schooner +781 scoreboard +782 screen, CRT screen +783 screw +784 screwdriver +785 seat belt, seatbelt +786 sewing machine +787 shield, buckler +788 shoe shop, shoe-shop, shoe store +789 shoji +790 shopping basket +791 shopping cart +792 shovel +793 shower cap +794 shower curtain +795 ski +796 ski mask +797 sleeping bag +798 slide rule, slipstick +799 sliding door +800 slot, one-armed bandit +801 snorkel +802 snowmobile +803 snowplow, snowplough +804 soap dispenser +805 soccer ball +806 sock +807 solar dish, solar collector, solar furnace +808 sombrero +809 soup bowl +810 space bar +811 space heater +812 space shuttle +813 spatula +814 speedboat +815 spider web, spider's web +816 spindle +817 sports car, sport car +818 spotlight, spot +819 stage +820 steam locomotive +821 steel arch bridge +822 steel drum +823 stethoscope +824 stole +825 stone wall +826 stopwatch, stop watch +827 stove +828 strainer +829 streetcar, tram, tramcar, trolley, trolley car +830 stretcher +831 studio couch, day bed +832 stupa, tope +833 submarine, pigboat, sub, U-boat +834 suit, suit of clothes +835 sundial +836 sunglass +837 sunglasses, dark glasses, shades +838 sunscreen, sunblock, sun blocker +839 suspension bridge +840 swab, swob, mop +841 sweatshirt +842 swimming trunks, bathing trunks +843 swing +844 switch, electric switch, electrical switch +845 syringe +846 table lamp +847 tank, army tank, armored combat vehicle, armoured combat vehicle +848 tape player +849 teapot +850 teddy, teddy bear +851 television, television system +852 tennis ball +853 thatch, thatched roof +854 theater curtain, theatre curtain +855 thimble +856 thresher, thrasher, threshing machine +857 throne +858 tile roof +859 toaster +860 tobacco shop, tobacconist shop, tobacconist +861 toilet seat +862 torch +863 totem pole +864 tow truck, tow car, wrecker +865 toyshop +866 tractor +867 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +868 tray +869 trench coat +870 tricycle, trike, velocipede +871 trimaran +872 tripod +873 triumphal arch +874 trolleybus, trolley coach, trackless trolley +875 trombone +876 tub, vat +877 turnstile +878 typewriter keyboard +879 umbrella +880 unicycle, monocycle +881 upright, upright piano +882 vacuum, vacuum cleaner +883 vase +884 vault +885 velvet +886 vending machine +887 vestment +888 viaduct +889 violin, fiddle +890 volleyball +891 waffle iron +892 wall clock +893 wallet, billfold, notecase, pocketbook +894 wardrobe, closet, press +895 warplane, military plane +896 washbasin, handbasin, washbowl, lavabo, wash-hand basin +897 washer, automatic washer, washing machine +898 water bottle +899 water jug +900 water tower +901 whiskey jug +902 whistle +903 wig +904 window screen +905 window shade +906 Windsor tie +907 wine bottle +908 wing +909 wok +910 wooden spoon +911 wool, woolen, woollen +912 worm fence, snake fence, snake-rail fence, Virginia fence +913 wreck +914 yawl +915 yurt +916 web site, website, internet site, site +917 comic book +918 crossword puzzle, crossword +919 street sign +920 traffic light, traffic signal, stoplight +921 book jacket, dust cover, dust jacket, dust wrapper +922 menu +923 plate +924 guacamole +925 consomme +926 hot pot, hotpot +927 trifle +928 ice cream, icecream +929 ice lolly, lolly, lollipop, popsicle +930 French loaf +931 bagel, beigel +932 pretzel +933 cheeseburger +934 hotdog, hot dog, red hot +935 mashed potato +936 head cabbage +937 broccoli +938 cauliflower +939 zucchini, courgette +940 spaghetti squash +941 acorn squash +942 butternut squash +943 cucumber, cuke +944 artichoke, globe artichoke +945 bell pepper +946 cardoon +947 mushroom +948 Granny Smith +949 strawberry +950 orange +951 lemon +952 fig +953 pineapple, ananas +954 banana +955 jackfruit, jak, jack +956 custard apple +957 pomegranate +958 hay +959 carbonara +960 chocolate sauce, chocolate syrup +961 dough +962 meat loaf, meatloaf +963 pizza, pizza pie +964 potpie +965 burrito +966 red wine +967 espresso +968 cup +969 eggnog +970 alp +971 bubble +972 cliff, drop, drop-off +973 coral reef +974 geyser +975 lakeside, lakeshore +976 promontory, headland, head, foreland +977 sandbar, sand bar +978 seashore, coast, seacoast, sea-coast +979 valley, vale +980 volcano +981 ballplayer, baseball player +982 groom, bridegroom +983 scuba diver +984 rapeseed +985 daisy +986 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +987 corn +988 acorn +989 hip, rose hip, rosehip +990 buckeye, horse chestnut, conker +991 coral fungus +992 agaric +993 gyromitra +994 stinkhorn, carrion fungus +995 earthstar +996 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +997 bolete +998 ear, spike, capitulum +999 toilet tissue, toilet paper, bathroom tissue diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/logger.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/logger.py new file mode 100644 index 000000000..bc8de3640 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/logger.py @@ -0,0 +1,138 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +import logging +import datetime +import paddle.distributed as dist + +_logger = None + + +def init_logger(name='ppcls', log_file=None, log_level=logging.INFO): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified a FileHandler will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + Returns: + logging.Logger: The expected logger. + """ + global _logger + assert _logger is None, "logger should not be initialized twice or more." + _logger = logging.getLogger(name) + + formatter = logging.Formatter( + '[%(asctime)s] %(name)s %(levelname)s: %(message)s', + datefmt="%Y/%m/%d %H:%M:%S") + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setFormatter(formatter) + _logger.addHandler(stream_handler) + if log_file is not None and dist.get_rank() == 0: + log_file_folder = os.path.split(log_file)[0] + os.makedirs(log_file_folder, exist_ok=True) + file_handler = logging.FileHandler(log_file, 'a') + file_handler.setFormatter(formatter) + _logger.addHandler(file_handler) + if dist.get_rank() == 0: + _logger.setLevel(log_level) + else: + _logger.setLevel(logging.ERROR) + _logger.propagate = False + + +def log_at_trainer0(log): + """ + logs will print multi-times when calling Fleet API. + Only display single log and ignore the others. + """ + + def wrapper(fmt, *args): + if dist.get_rank() == 0: + log(fmt, *args) + + return wrapper + + +@log_at_trainer0 +def info(fmt, *args): + _logger.info(fmt, *args) + + +@log_at_trainer0 +def debug(fmt, *args): + _logger.debug(fmt, *args) + + +@log_at_trainer0 +def warning(fmt, *args): + _logger.warning(fmt, *args) + + +@log_at_trainer0 +def error(fmt, *args): + _logger.error(fmt, *args) + + +def scaler(name, value, step, writer): + """ + This function will draw a scalar curve generated by the visualdl. + Usage: Install visualdl: pip3 install visualdl==2.0.0b4 + and then: + visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 + to preview loss corve in real time. + """ + if writer is None: + return + writer.add_scalar(tag=name, step=step, value=value) + + +def advertise(): + """ + Show the advertising message like the following: + + =========================================================== + == PaddleClas is powered by PaddlePaddle ! == + =========================================================== + == == + == For more info please go to the following website. == + == == + == https://github.com/PaddlePaddle/PaddleClas == + =========================================================== + + """ + copyright = "PaddleClas is powered by PaddlePaddle !" + ad = "For more info please go to the following website." + website = "https://github.com/PaddlePaddle/PaddleClas" + AD_LEN = 6 + len(max([copyright, ad, website], key=len)) + + info("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( + "=" * (AD_LEN + 4), + "=={}==".format(copyright.center(AD_LEN)), + "=" * (AD_LEN + 4), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(ad.center(AD_LEN)), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(website.center(AD_LEN)), + "=" * (AD_LEN + 4), )) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/metrics.py new file mode 100644 index 000000000..b0db68a75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/metrics.py @@ -0,0 +1,107 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.metrics import precision_recall_fscore_support +from sklearn.metrics import average_precision_score +from sklearn.preprocessing import binarize + +import numpy as np + +__all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"] + + +def multi_hot_encode(logits, threshold=0.5): + """ + Encode logits to multi-hot by elementwise for multilabel + """ + + return binarize(logits, threshold=threshold) + + +def hamming_distance(output, target): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + return hamming_loss(target, output) + + +def accuracy_score(output, target, base="sample"): + """ + Hard metric for multilabel classification + Args: + output: + target: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + assert base in ["sample", "label"], 'must be one of ["sample", "label"]' + + if base == "sample": + accuracy = accuracy_metric(target, output) + elif base == "label": + mcm = multilabel_confusion_matrix(target, output) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + + accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps)) + + return accuracy + + +def precision_recall_fscore(output, target): + """ + Metric based label for multilabel classification + Returns: + precisions: + recalls: + fscores: + """ + + precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output) + + return precisions, recalls, fscores + + +def mean_average_precision(logits, target): + """ + Calculate average precision + Args: + logits: probability from network before sigmoid or softmax + target: ground truth, 0 or 1 + """ + if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError("logits and target should be np.ndarray.") + + aps = [] + for i in range(target.shape[1]): + ap = average_precision_score(target[:, i], logits[:, i]) + aps.append(ap) + + return np.mean(aps) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/misc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/misc.py new file mode 100644 index 000000000..08ab7b6f7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/misc.py @@ -0,0 +1,63 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['AverageMeter'] + + +class AverageMeter(object): + """ + Computes and stores the average and current value + Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + + def __init__(self, name='', fmt='f', postfix="", need_avg=True): + self.name = name + self.fmt = fmt + self.postfix = postfix + self.need_avg = need_avg + self.reset() + + def reset(self): + """ reset """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ update """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + @property + def total(self): + return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format( + self=self) + + @property + def total_minute(self): + return '{self.name} {s:{self.fmt}}{self.postfix} min'.format( + s=self.sum / 60, self=self) + + @property + def mean(self): + return '{self.name}: {self.avg:{self.fmt}}{self.postfix}'.format( + self=self) if self.need_avg else '' + + @property + def value(self): + return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format( + self=self) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/model_zoo.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/model_zoo.py new file mode 100644 index 000000000..fc527f6a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import requests +import shutil +import tarfile +import tqdm +import zipfile + +from ppcls.arch import similar_architectures +from ppcls.utils import logger + +__all__ = ['get'] + +DOWNLOAD_RETRY_LIMIT = 3 + + +class UrlError(Exception): + """ UrlError + """ + + def __init__(self, url='', code=''): + message = "Downloading from {} failed with code {}!".format(url, code) + super(UrlError, self).__init__(message) + + +class ModelNameError(Exception): + """ ModelNameError + """ + + def __init__(self, message=''): + super(ModelNameError, self).__init__(message) + + +class RetryError(Exception): + """ RetryError + """ + + def __init__(self, url='', times=''): + message = "Download from {} failed. Retry({}) limit reached".format( + url, times) + super(RetryError, self).__init__(message) + + +def _get_url(architecture, postfix="pdparams"): + prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/" + fname = architecture + "_pretrained." + postfix + return prefix + fname + + +def _move_and_merge_tree(src, dst): + """ + Move src directory to dst, if dst is already exists, + merge src to dst + """ + if not os.path.exists(dst): + shutil.move(src, dst) + elif os.path.isfile(src): + shutil.move(src, dst) + else: + for fp in os.listdir(src): + src_fp = os.path.join(src, fp) + dst_fp = os.path.join(dst, fp) + if os.path.isdir(src_fp): + if os.path.isdir(dst_fp): + _move_and_merge_tree(src_fp, dst_fp) + else: + shutil.move(src_fp, dst_fp) + elif os.path.isfile(src_fp) and \ + not os.path.isfile(dst_fp): + shutil.move(src_fp, dst_fp) + + +def _download(url, path): + """ + Download from url, save to path. + url (str): download url + path (str): download to given path + """ + if not os.path.exists(path): + os.makedirs(path) + + fname = os.path.split(url)[-1] + fullname = os.path.join(path, fname) + retry_cnt = 0 + + while not os.path.exists(fullname): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RetryError(url, DOWNLOAD_RETRY_LIMIT) + + logger.info("Downloading {} from {}".format(fname, url)) + + req = requests.get(url, stream=True) + if req.status_code != 200: + raise UrlError(url, req.status_code) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + for chunk in tqdm.tqdm( + req.iter_content(chunk_size=1024), + total=(int(total_size) + 1023) // 1024, + unit='KB'): + f.write(chunk) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + fpath = os.path.split(fname)[0] + fpath_tmp = os.path.join(fpath, 'tmp') + if os.path.isdir(fpath_tmp): + shutil.rmtree(fpath_tmp) + os.makedirs(fpath_tmp) + + if fname.find('tar') >= 0: + with tarfile.open(fname) as tf: + tf.extractall(path=fpath_tmp) + elif fname.find('zip') >= 0: + with zipfile.ZipFile(fname) as zf: + zf.extractall(path=fpath_tmp) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + fs = os.listdir(fpath_tmp) + assert len( + fs + ) == 1, "There should just be 1 pretrained path in an archive file but got {}.".format( + len(fs)) + + f = fs[0] + src_dir = os.path.join(fpath_tmp, f) + dst_dir = os.path.join(fpath, f) + _move_and_merge_tree(src_dir, dst_dir) + + shutil.rmtree(fpath_tmp) + os.remove(fname) + + return f + + +def _get_pretrained(): + with open('./ppcls/utils/pretrained.list') as flist: + pretrained = [line.strip() for line in flist] + return pretrained + + +def _check_pretrained_name(architecture): + assert isinstance(architecture, str), \ + ("the type of architecture({}) should be str". format(architecture)) + pretrained = _get_pretrained() + similar_names = similar_architectures(architecture, pretrained) + model_list = ', '.join(similar_names) + err = "{} is not exist! Maybe you want: [{}]" \ + "".format(architecture, model_list) + if architecture not in similar_names: + raise ModelNameError(err) + + +def list_models(): + pretrained = _get_pretrained() + msg = "All avialable pretrained models are as follows: {}".format( + pretrained) + logger.info(msg) + return + + +def get(architecture, path, decompress=False, postfix="pdparams"): + """ + Get the pretrained model. + """ + _check_pretrained_name(architecture) + url = _get_url(architecture, postfix=postfix) + fname = _download(url, path) + if postfix == "tar" and decompress: + _decompress(fname) + logger.info("download {} finished ".format(fname)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/pretrained.list b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/pretrained.list new file mode 100644 index 000000000..36d70f5a2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/pretrained.list @@ -0,0 +1,121 @@ +ResNet18 +ResNet34 +ResNet50 +ResNet101 +ResNet152 +ResNet50_vc +ResNet18_vd +ResNet34_vd +ResNet50_vd +ResNet50_vd_v2 +ResNet101_vd +ResNet152_vd +ResNet200_vd +ResNet50_vd_ssld +ResNet50_vd_ssld_v2 +Fix_ResNet50_vd_ssld_v2 +ResNet101_vd_ssld +MobileNetV3_large_x0_35 +MobileNetV3_large_x0_5 +MobileNetV3_large_x0_75 +MobileNetV3_large_x1_0 +MobileNetV3_large_x1_25 +MobileNetV3_small_x0_35 +MobileNetV3_small_x0_5 +MobileNetV3_small_x0_75 +MobileNetV3_small_x1_0 +MobileNetV3_small_x1_25 +MobileNetV3_large_x1_0_ssld +MobileNetV3_large_x1_0_ssld_int8 +MobileNetV3_small_x1_0_ssld +MobileNetV2_x0_25 +MobileNetV2_x0_5 +MobileNetV2_x0_75 +MobileNetV2 +MobileNetV2_x1_5 +MobileNetV2_x2_0 +MobileNetV2_ssld +MobileNetV1_x0_25 +MobileNetV1_x0_5 +MobileNetV1_x0_75 +MobileNetV1 +MobileNetV1_ssld +ShuffleNetV2_x0_25 +ShuffleNetV2_x0_33 +ShuffleNetV2_x0_5 +ShuffleNetV2 +ShuffleNetV2_x1_5 +ShuffleNetV2_x2_0 +ShuffleNetV2_swish +ResNeXt50_32x4d +ResNeXt50_64x4d +ResNeXt101_32x4d +ResNeXt101_64x4d +ResNeXt152_32x4d +ResNeXt152_64x4d +ResNeXt50_vd_32x4d +ResNeXt50_vd_64x4d +ResNeXt101_vd_32x4d +ResNeXt101_vd_64x4d +ResNeXt152_vd_32x4d +ResNeXt152_vd_64x4d +SE_ResNet18_vd +SE_ResNet34_vd +SE_ResNet50_vd +SE_ResNeXt50_32x4d +SE_ResNeXt101_32x4d +SE_ResNeXt50_vd_32x4d +SENet154_vd +Res2Net50_26w_4s +Res2Net50_vd_26w_4s +Res2Net50_14w_8s +Res2Net101_vd_26w_4s +Res2Net200_vd_26w_4s +GoogLeNet +InceptionV4 +Xception41 +Xception41_deeplab +Xception65 +Xception65_deeplab +Xception71 +HRNet_W18_C +HRNet_W30_C +HRNet_W32_C +HRNet_W40_C +HRNet_W44_C +HRNet_W48_C +HRNet_W64_C +DPN68 +DPN92 +DPN98 +DPN107 +DPN131 +DenseNet121 +DenseNet161 +DenseNet169 +DenseNet201 +DenseNet264 +EfficientNetB0_small +EfficientNetB0 +EfficientNetB1 +EfficientNetB2 +EfficientNetB3 +EfficientNetB4 +EfficientNetB5 +EfficientNetB6 +EfficientNetB7 +ResNeXt101_32x8d_wsl +ResNeXt101_32x16d_wsl +ResNeXt101_32x32d_wsl +ResNeXt101_32x48d_wsl +Fix_ResNeXt101_32x48d_wsl +AlexNet +SqueezeNet1_0 +SqueezeNet1_1 +VGG11 +VGG13 +VGG16 +VGG19 +DarkNet53_ImageNet1k +ResNet50_ACNet_deploy +CSPResNet50_leaky diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/profiler.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/profiler.py new file mode 100644 index 000000000..7cf945a26 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/profiler.py @@ -0,0 +1,111 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle + +# A global variable to record the number of calling times for profiler +# functions. It is used to specify the tracing range of training steps. +_profiler_step_id = 0 + +# A global variable to avoid parsing from string every time. +_profiler_options = None + + +class ProfilerOptions(object): + ''' + Use a string to initialize a ProfilerOptions. + The string should be in the format: "key1=value1;key2=value;key3=value3". + For example: + "profile_path=model.profile" + "batch_range=[50, 60]; profile_path=model.profile" + "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile" + + ProfilerOptions supports following key-value pair: + batch_range - a integer list, e.g. [100, 110]. + state - a string, the optional values are 'CPU', 'GPU' or 'All'. + sorted_key - a string, the optional values are 'calls', 'total', + 'max', 'min' or 'ave. + tracer_option - a string, the optional values are 'Default', 'OpDetail', + 'AllOpDetail'. + profile_path - a string, the path to save the serialized profile data, + which can be used to generate a timeline. + exit_on_finished - a boolean. + ''' + + def __init__(self, options_str): + assert isinstance(options_str, str) + + self._options = { + 'batch_range': [10, 20], + 'state': 'All', + 'sorted_key': 'total', + 'tracer_option': 'Default', + 'profile_path': '/tmp/profile', + 'exit_on_finished': True + } + self._parse_from_string(options_str) + + def _parse_from_string(self, options_str): + for kv in options_str.replace(' ', '').split(';'): + key, value = kv.split('=') + if key == 'batch_range': + value_list = value.replace('[', '').replace(']', '').split(',') + value_list = list(map(int, value_list)) + if len(value_list) >= 2 and value_list[0] >= 0 and value_list[ + 1] > value_list[0]: + self._options[key] = value_list + elif key == 'exit_on_finished': + self._options[key] = value.lower() in ("yes", "true", "t", "1") + elif key in [ + 'state', 'sorted_key', 'tracer_option', 'profile_path' + ]: + self._options[key] = value + + def __getitem__(self, name): + if self._options.get(name, None) is None: + raise ValueError( + "ProfilerOptions does not have an option named %s." % name) + return self._options[name] + + +def add_profiler_step(options_str=None): + ''' + Enable the operator-level timing using PaddlePaddle's profiler. + The profiler uses a independent variable to count the profiler steps. + One call of this function is treated as a profiler step. + + Args: + profiler_options - a string to initialize the ProfilerOptions. + Default is None, and the profiler is disabled. + ''' + if options_str is None: + return + + global _profiler_step_id + global _profiler_options + + if _profiler_options is None: + _profiler_options = ProfilerOptions(options_str) + + if _profiler_step_id == _profiler_options['batch_range'][0]: + paddle.utils.profiler.start_profiler( + _profiler_options['state'], _profiler_options['tracer_option']) + elif _profiler_step_id == _profiler_options['batch_range'][1]: + paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'], + _profiler_options['profile_path']) + if _profiler_options['exit_on_finished']: + sys.exit(0) + + _profiler_step_id += 1 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/save_load.py new file mode 100644 index 000000000..625a28483 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.5/utils/save_load.py @@ -0,0 +1,136 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle +from ppcls.utils import logger +from .download import get_weights_path_from_url + +__all__ = ['init_model', 'save_model', 'load_dygraph_pretrain'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def load_dygraph_pretrain(model, path=None): + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + param_state_dict = paddle.load(path + ".pdparams") + model.set_dict(param_state_dict) + return + + +def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld=False): + if use_ssld: + pretrained_url = pretrained_url.replace("_pretrained", + "_ssld_pretrained") + local_weight_path = get_weights_path_from_url(pretrained_url).replace( + ".pdparams", "") + load_dygraph_pretrain(model, path=local_weight_path) + return + + +def load_distillation_model(model, pretrained_model): + logger.info("In distillation mode, teacher model will be " + "loaded firstly before student model.") + + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + + teacher = model.teacher if hasattr(model, + "teacher") else model._layers.teacher + student = model.student if hasattr(model, + "student") else model._layers.student + load_dygraph_pretrain(teacher, path=pretrained_model[0]) + logger.info("Finish initing teacher model from {}".format( + pretrained_model)) + # load student model + if len(pretrained_model) >= 2: + load_dygraph_pretrain(student, path=pretrained_model[1]) + logger.info("Finish initing student model from {}".format( + pretrained_model)) + + +def init_model(config, net, optimizer=None): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints and optimizer is not None: + assert os.path.exists(checkpoints + ".pdparams"), \ + "Given dir {}.pdparams not exist.".format(checkpoints) + assert os.path.exists(checkpoints + ".pdopt"), \ + "Given dir {}.pdopt not exist.".format(checkpoints) + para_dict = paddle.load(checkpoints + ".pdparams") + opti_dict = paddle.load(checkpoints + ".pdopt") + metric_dict = paddle.load(checkpoints + ".pdstates") + net.set_dict(para_dict) + optimizer.set_state_dict(opti_dict) + logger.info("Finish load checkpoints from {}".format(checkpoints)) + return metric_dict + + pretrained_model = config.get('pretrained_model') + use_distillation = config.get('use_distillation', False) + if pretrained_model: + if use_distillation: + load_distillation_model(net, pretrained_model) + else: # common load + load_dygraph_pretrain(net, path=pretrained_model) + logger.info( + logger.coloring("Finish load pretrained model from {}".format( + pretrained_model), "HEADER")) + + +def save_model(net, + optimizer, + metric_info, + model_path, + model_name="", + prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, model_name) + _mkdir_if_not_exist(model_path) + model_path = os.path.join(model_path, prefix) + + paddle.save(net.state_dict(), model_path + ".pdparams") + paddle.save(optimizer.state_dict(), model_path + ".pdopt") + paddle.save(metric_info, model_path + ".pdstates") + logger.info("Already save model in {}".format(model_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/__init__.py new file mode 100644 index 000000000..d6cdb6f8f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import optimizer + +from .arch import * +from .optimizer import * +from .data import * +from .utils import * diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/__init__.py new file mode 100644 index 000000000..798df62eb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/__init__.py @@ -0,0 +1,177 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import copy +import importlib +import paddle.nn as nn +from paddle.jit import to_static +from paddle.static import InputSpec + +from . import backbone, gears +from .backbone import * +from .gears import build_gear, add_ml_decoder_head +from .utils import * +from .backbone.base.theseus_layer import TheseusLayer +from ..utils import logger +from ..utils.save_load import load_dygraph_pretrain +from .slim import prune_model, quantize_model +from .distill.afd_attention import LinearTransformStudent, LinearTransformTeacher + +__all__ = ["build_model", "RecModel", "DistillationModel", "AttentionModel"] + + +def build_model(config, mode="train"): + arch_config = copy.deepcopy(config["Arch"]) + model_type = arch_config.pop("name") + use_sync_bn = arch_config.pop("use_sync_bn", False) + use_ml_decoder = arch_config.pop("use_ml_decoder", False) + mod = importlib.import_module(__name__) + arch = getattr(mod, model_type)(**arch_config) + if use_sync_bn: + if config["Global"]["device"] == "gpu": + arch = nn.SyncBatchNorm.convert_sync_batchnorm(arch) + else: + msg = "SyncBatchNorm can only be used on GPU device. The releated setting has been ignored." + logger.warning(msg) + + if use_ml_decoder: + add_ml_decoder_head(arch, config.get("MLDecoder", {})) + + if isinstance(arch, TheseusLayer): + prune_model(config, arch) + quantize_model(config, arch, mode) + + return arch + + +def apply_to_static(config, model, is_rec): + support_to_static = config['Global'].get('to_static', False) + + if support_to_static: + specs = None + if 'image_shape' in config['Global']: + specs = [InputSpec([None] + config['Global']['image_shape'])] + specs[0].stop_gradient = True + if is_rec: + specs.append(InputSpec([None, 1], 'int64', stop_gradient=True)) + model = to_static(model, input_spec=specs) + logger.info("Successfully to apply @to_static with specs: {}".format( + specs)) + return model + + +class RecModel(TheseusLayer): + def __init__(self, **config): + super().__init__() + backbone_config = config["Backbone"] + backbone_name = backbone_config.pop("name") + self.backbone = eval(backbone_name)(**backbone_config) + self.head_feature_from = config.get('head_feature_from', 'neck') + + if "BackboneStopLayer" in config: + backbone_stop_layer = config["BackboneStopLayer"]["name"] + self.backbone.stop_after(backbone_stop_layer) + + if "Neck" in config: + self.neck = build_gear(config["Neck"]) + else: + self.neck = None + + if "Head" in config: + self.head = build_gear(config["Head"]) + else: + self.head = None + + def forward(self, x, label=None): + + out = dict() + x = self.backbone(x) + out["backbone"] = x + if self.neck is not None: + feat = self.neck(x) + out["neck"] = feat + out["features"] = out['neck'] if self.neck else x + if self.head is not None: + if self.head_feature_from == 'backbone': + y = self.head(out['backbone'], label) + elif self.head_feature_from == 'neck': + y = self.head(out['features'], label) + out["logits"] = y + return out + + +class DistillationModel(nn.Layer): + def __init__(self, + models=None, + pretrained_list=None, + freeze_params_list=None, + **kargs): + super().__init__() + assert isinstance(models, list) + self.model_list = [] + self.model_name_list = [] + if pretrained_list is not None: + assert len(pretrained_list) == len(models) + + if freeze_params_list is None: + freeze_params_list = [False] * len(models) + assert len(freeze_params_list) == len(models) + for idx, model_config in enumerate(models): + assert len(model_config) == 1 + key = list(model_config.keys())[0] + model_config = model_config[key] + model_name = model_config.pop("name") + model = eval(model_name)(**model_config) + + if freeze_params_list[idx]: + for param in model.parameters(): + param.trainable = False + self.model_list.append(self.add_sublayer(key, model)) + self.model_name_list.append(key) + + if pretrained_list is not None: + for idx, pretrained in enumerate(pretrained_list): + if pretrained is not None: + load_dygraph_pretrain( + self.model_name_list[idx], path=pretrained) + + def forward(self, x, label=None): + result_dict = dict() + for idx, model_name in enumerate(self.model_name_list): + if label is None: + result_dict[model_name] = self.model_list[idx](x) + else: + result_dict[model_name] = self.model_list[idx](x, label) + return result_dict + + +class AttentionModel(DistillationModel): + def __init__(self, + models=None, + pretrained_list=None, + freeze_params_list=None, + **kargs): + super().__init__(models, pretrained_list, freeze_params_list, **kargs) + + def forward(self, x, label=None): + result_dict = dict() + out = x + for idx, model_name in enumerate(self.model_name_list): + if label is None: + out = self.model_list[idx](out) + result_dict.update(out) + else: + out = self.model_list[idx](out, label) + result_dict.update(out) + return result_dict \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/__init__.py new file mode 100644 index 000000000..f79dccdfe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/__init__.py @@ -0,0 +1,118 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import inspect + +from .legendary_models.mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +from .legendary_models.mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +from .legendary_models.mobilenet_v4 import MobileNetV4_conv_small, MobileNetV4_conv_medium, MobileNetV4_conv_large, MobileNetV4_hybrid_medium, MobileNetV4_hybrid_large +from .model_zoo.fasternet import FasterNet_T0, FasterNet_T1, FasterNet_T2, FasterNet_S, FasterNet_M, FasterNet_L +from .model_zoo.starnet import StarNet_S1, StarNet_S2, StarNet_S3, StarNet_S4 +from .legendary_models.resnet import ResNet18, ResNet18_vd, ResNet34, ResNet34_vd, ResNet50, ResNet50_vd, ResNet101, ResNet101_vd, ResNet152, ResNet152_vd, ResNet200_vd +from .legendary_models.vgg import VGG11, VGG13, VGG16, VGG19 +from .legendary_models.inception_v3 import InceptionV3 +from .legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C +from .legendary_models.pp_lcnet import PPLCNetBaseNet, PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5 +from .legendary_models.pp_lcnet_v2 import PPLCNetV2_small, PPLCNetV2_base, PPLCNetV2_large +from .legendary_models.esnet import ESNet_x0_25, ESNet_x0_5, ESNet_x0_75, ESNet_x1_0 +from .legendary_models.pp_hgnet import PPHGNet_tiny, PPHGNet_small, PPHGNet_base +from .legendary_models.pp_hgnet_v2 import PPHGNetV2_B0, PPHGNetV2_B1, PPHGNetV2_B2, PPHGNetV2_B3, PPHGNetV2_B4, PPHGNetV2_B5, PPHGNetV2_B6 + +from .model_zoo.resnet_vc import ResNet50_vc +from .model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d +from .model_zoo.resnext_vd import ResNeXt50_vd_32x4d, ResNeXt50_vd_64x4d, ResNeXt101_vd_32x4d, ResNeXt101_vd_64x4d, ResNeXt152_vd_32x4d, ResNeXt152_vd_64x4d +from .model_zoo.res2net import Res2Net50_26w_4s, Res2Net50_14w_8s +from .model_zoo.res2net_vd import Res2Net50_vd_26w_4s, Res2Net101_vd_26w_4s, Res2Net200_vd_26w_4s +from .model_zoo.se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd +from .model_zoo.se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd +from .model_zoo.se_resnext import SE_ResNeXt50_32x4d, SE_ResNeXt101_32x4d, SE_ResNeXt152_64x4d +from .model_zoo.dpn import DPN68, DPN92, DPN98, DPN107, DPN131 +from .model_zoo.dsnet import DSNet_tiny, DSNet_small, DSNet_base +from .model_zoo.densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264 +from .model_zoo.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetB0_small +from .model_zoo.efficientnet_v2 import EfficientNetV2_S +from .model_zoo.resnest import ResNeSt50_fast_1s1x64d, ResNeSt50, ResNeSt101, ResNeSt200, ResNeSt269 +from .model_zoo.googlenet import GoogLeNet +from .model_zoo.mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0 +from .model_zoo.mobilefacenet import MobileFaceNet +from .model_zoo.shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2_x1_0, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish +from .model_zoo.ghostnet import GhostNet_x0_5, GhostNet_x1_0, GhostNet_x1_3 +from .model_zoo.alexnet import AlexNet +from .model_zoo.inception_v4 import InceptionV4 +from .model_zoo.xception import Xception41, Xception65, Xception71 +from .model_zoo.xception_deeplab import Xception41_deeplab, Xception65_deeplab +from .model_zoo.resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl +from .model_zoo.squeezenet import SqueezeNet1_0, SqueezeNet1_1 +from .model_zoo.darknet import DarkNet53 +from .model_zoo.regnet import RegNetX_200MF, RegNetX_400MF, RegNetX_600MF, RegNetX_800MF, RegNetX_1600MF, RegNetX_3200MF, RegNetX_4GF, RegNetX_6400MF, RegNetX_8GF, RegNetX_12GF, RegNetX_16GF, RegNetX_32GF +from .model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384 +from .model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384 +from .legendary_models.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384 +from .model_zoo.swin_transformer_v2 import SwinTransformerV2_tiny_patch4_window8_256, SwinTransformerV2_small_patch4_window8_256, SwinTransformerV2_base_patch4_window8_256, SwinTransformerV2_tiny_patch4_window16_256, SwinTransformerV2_small_patch4_window16_256, SwinTransformerV2_base_patch4_window16_256, SwinTransformerV2_base_patch4_window24_384, SwinTransformerV2_large_patch4_window16_256, SwinTransformerV2_large_patch4_window24_384 +from .model_zoo.cswin_transformer import CSWinTransformer_tiny_224, CSWinTransformer_small_224, CSWinTransformer_base_224, CSWinTransformer_large_224, CSWinTransformer_base_384, CSWinTransformer_large_384 +from .model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L +from .model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0 +from .model_zoo.twins import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large +from .model_zoo.levit import LeViT_128S, LeViT_128, LeViT_192, LeViT_256, LeViT_384 +from .model_zoo.dla import DLA34, DLA46_c, DLA46x_c, DLA60, DLA60x, DLA60x_c, DLA102, DLA102x, DLA102x2, DLA169 +from .model_zoo.rednet import RedNet26, RedNet38, RedNet50, RedNet101, RedNet152 +from .model_zoo.tnt import TNT_small, TNT_base +from .model_zoo.hardnet import HarDNet68, HarDNet85, HarDNet39_ds, HarDNet68_ds +from .model_zoo.cspnet import CSPDarkNet53 +from .model_zoo.pvt_v2 import PVT_V2_B0, PVT_V2_B1, PVT_V2_B2_Linear, PVT_V2_B2, PVT_V2_B3, PVT_V2_B4, PVT_V2_B5 +from .model_zoo.mobilevit import MobileViT_XXS, MobileViT_XS, MobileViT_S +from .model_zoo.repvgg import RepVGG_A0, RepVGG_A1, RepVGG_A2, RepVGG_B0, RepVGG_B1, RepVGG_B2, RepVGG_B1g2, RepVGG_B1g4, RepVGG_B2g4, RepVGG_B3, RepVGG_B3g4, RepVGG_D2se +from .model_zoo.van import VAN_B0, VAN_B1, VAN_B2, VAN_B3 +from .model_zoo.peleenet import PeleeNet +from .model_zoo.foundation_vit import CLIP_vit_base_patch32_224, CLIP_vit_base_patch16_224, CLIP_vit_large_patch14_336, CLIP_vit_large_patch14_224, BEiTv2_vit_base_patch16_224, BEiTv2_vit_large_patch16_224, CAE_vit_base_patch16_224, EVA_vit_giant_patch14, MOCOV3_vit_small, MOCOV3_vit_base, MAE_vit_huge_patch14, MAE_vit_large_patch16, MAE_vit_base_patch16 +from .model_zoo.convnext import ConvNeXt_tiny, ConvNeXt_small, ConvNeXt_base_224, ConvNeXt_base_384, ConvNeXt_large_224, ConvNeXt_large_384 +from .model_zoo.nextvit import NextViT_small_224, NextViT_base_224, NextViT_large_224, NextViT_small_384, NextViT_base_384, NextViT_large_384 +from .model_zoo.cae import cae_base_patch16_224, cae_large_patch16_224 +from .model_zoo.cvt import CvT_13_224, CvT_13_384, CvT_21_224, CvT_21_384, CvT_W24_384 +from .model_zoo.micronet import MicroNet_M0, MicroNet_M1, MicroNet_M2, MicroNet_M3 +from .model_zoo.mobilenext import MobileNeXt_x0_35, MobileNeXt_x0_5, MobileNeXt_x0_75, MobileNeXt_x1_0, MobileNeXt_x1_4 +from .model_zoo.mobilevit_v2 import MobileViTV2_x0_5, MobileViTV2_x0_75, MobileViTV2_x1_0, MobileViTV2_x1_25, MobileViTV2_x1_5, MobileViTV2_x1_75, MobileViTV2_x2_0 +from .model_zoo.tinynet import TinyNet_A, TinyNet_B, TinyNet_C, TinyNet_D, TinyNet_E +from .model_zoo.mobilevit_v3 import MobileViTV3_XXS, MobileViTV3_XS, MobileViTV3_S, MobileViTV3_XXS_L2, MobileViTV3_XS_L2, MobileViTV3_S_L2, MobileViTV3_x0_5, MobileViTV3_x0_75, MobileViTV3_x1_0 +from .model_zoo.svtrnet import SVTR_tiny, SVTR_base, SVTR_large + +from .variant_models.resnet_variant import ResNet50_last_stage_stride1 +from .variant_models.resnet_variant import ResNet50_adaptive_max_pool2d +from .variant_models.resnet_variant import ResNet50_metabin +from .variant_models.vgg_variant import VGG19Sigmoid +from .variant_models.pp_lcnet_variant import PPLCNet_x2_5_Tanh +from .variant_models.pp_lcnetv2_variant import PPLCNetV2_base_ShiTu +from .variant_models.efficientnet_variant import EfficientNetB3_watermark +from .variant_models.foundation_vit_variant import CLIP_large_patch14_224_aesthetic +from .variant_models.swin_transformer_variant import SwinTransformer_tiny_patch4_window7_224_SOLIDER, SwinTransformer_small_patch4_window7_224_SOLIDER, SwinTransformer_base_patch4_window7_224_SOLIDER +from .model_zoo.adaface_ir_net import AdaFace_IR_18, AdaFace_IR_34, AdaFace_IR_50, AdaFace_IR_101, AdaFace_IR_152, AdaFace_IR_SE_50, AdaFace_IR_SE_101, AdaFace_IR_SE_152, AdaFace_IR_SE_200 +from .model_zoo.wideresnet import WideResNet +from .model_zoo.uniformer import UniFormer_small, UniFormer_small_plus, UniFormer_small_plus_dim64, UniFormer_base, UniFormer_base_ls + + +# help whl get all the models' api (class type) and components' api (func type) +def get_apis(): + current_func = sys._getframe().f_code.co_name + current_module = sys.modules[__name__] + api = [] + for _, obj in inspect.getmembers(current_module, + inspect.isclass) + inspect.getmembers( + current_module, inspect.isfunction): + api.append(obj.__name__) + api.remove(current_func) + return api + + +__all__ = get_apis() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_block.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_block.py new file mode 100644 index 000000000..f38c5c257 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_block.py @@ -0,0 +1,365 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2103.13425, https://github.com/DingXiaoH/DiverseBranchBlock + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import numpy as np +from .dbb_transforms import * + + +def conv_bn(in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros'): + conv_layer = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=False, + padding_mode=padding_mode) + bn_layer = nn.BatchNorm2D(num_features=out_channels) + se = nn.Sequential() + se.add_sublayer('conv', conv_layer) + se.add_sublayer('bn', bn_layer) + return se + + +class IdentityBasedConv1x1(nn.Conv2D): + def __init__(self, channels, groups=1): + super(IdentityBasedConv1x1, self).__init__( + in_channels=channels, + out_channels=channels, + kernel_size=1, + stride=1, + padding=0, + groups=groups, + bias_attr=False) + + assert channels % groups == 0 + input_dim = channels // groups + id_value = np.zeros((channels, input_dim, 1, 1)) + for i in range(channels): + id_value[i, i % input_dim, 0, 0] = 1 + self.id_tensor = paddle.to_tensor(id_value) + self.weight.set_value(paddle.zeros_like(self.weight)) + + def forward(self, input): + kernel = self.weight + self.id_tensor + result = F.conv2d( + input, + kernel, + None, + stride=1, + padding=0, + dilation=self._dilation, + groups=self._groups) + return result + + def get_actual_kernel(self): + return self.weight + self.id_tensor + + +class BNAndPad(nn.Layer): + def __init__(self, + pad_pixels, + num_features, + epsilon=1e-5, + momentum=0.1, + last_conv_bias=None, + bn=nn.BatchNorm2D): + super().__init__() + self.bn = bn(num_features, momentum=momentum, epsilon=epsilon) + self.pad_pixels = pad_pixels + self.last_conv_bias = last_conv_bias + + def forward(self, input): + output = self.bn(input) + if self.pad_pixels > 0: + bias = -self.bn._mean + if self.last_conv_bias is not None: + bias += self.last_conv_bias + pad_values = self.bn.bias + self.bn.weight * ( + bias / paddle.sqrt(self.bn._variance + self.bn._epsilon)) + ''' pad ''' + # TODO: n,h,w,c format is not supported yet + n, c, h, w = output.shape + values = pad_values.reshape([1, -1, 1, 1]) + w_values = values.expand([n, -1, self.pad_pixels, w]) + x = paddle.concat([w_values, output, w_values], axis=2) + h = h + self.pad_pixels * 2 + h_values = values.expand([n, -1, h, self.pad_pixels]) + x = paddle.concat([h_values, x, h_values], axis=3) + output = x + return output + + @property + def weight(self): + return self.bn.weight + + @property + def bias(self): + return self.bn.bias + + @property + def _mean(self): + return self.bn._mean + + @property + def _variance(self): + return self.bn._variance + + @property + def _epsilon(self): + return self.bn._epsilon + + +class DiverseBranchBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + is_repped=False, + single_init=False, + **kwargs): + super().__init__() + + padding = (filter_size - 1) // 2 + dilation = 1 + + in_channels = num_channels + out_channels = num_filters + kernel_size = filter_size + internal_channels_1x1_3x3 = None + nonlinear = act + + self.is_repped = is_repped + + if nonlinear is None: + self.nonlinear = nn.Identity() + else: + self.nonlinear = nn.ReLU() + + self.kernel_size = kernel_size + self.out_channels = out_channels + self.groups = groups + assert padding == kernel_size // 2 + + if is_repped: + self.dbb_reparam = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=True) + else: + self.dbb_origin = conv_bn( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups) + + self.dbb_avg = nn.Sequential() + if groups < out_channels: + self.dbb_avg.add_sublayer( + 'conv', + nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=groups, + bias_attr=False)) + self.dbb_avg.add_sublayer( + 'bn', + BNAndPad( + pad_pixels=padding, num_features=out_channels)) + self.dbb_avg.add_sublayer( + 'avg', + nn.AvgPool2D( + kernel_size=kernel_size, stride=stride, padding=0)) + self.dbb_1x1 = conv_bn( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=0, + groups=groups) + else: + self.dbb_avg.add_sublayer( + 'avg', + nn.AvgPool2D( + kernel_size=kernel_size, + stride=stride, + padding=padding)) + + self.dbb_avg.add_sublayer('avgbn', nn.BatchNorm2D(out_channels)) + + if internal_channels_1x1_3x3 is None: + internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels + + self.dbb_1x1_kxk = nn.Sequential() + if internal_channels_1x1_3x3 == in_channels: + self.dbb_1x1_kxk.add_sublayer( + 'idconv1', + IdentityBasedConv1x1( + channels=in_channels, groups=groups)) + else: + self.dbb_1x1_kxk.add_sublayer( + 'conv1', + nn.Conv2D( + in_channels=in_channels, + out_channels=internal_channels_1x1_3x3, + kernel_size=1, + stride=1, + padding=0, + groups=groups, + bias_attr=False)) + self.dbb_1x1_kxk.add_sublayer( + 'bn1', + BNAndPad( + pad_pixels=padding, + num_features=internal_channels_1x1_3x3)) + self.dbb_1x1_kxk.add_sublayer( + 'conv2', + nn.Conv2D( + in_channels=internal_channels_1x1_3x3, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=0, + groups=groups, + bias_attr=False)) + self.dbb_1x1_kxk.add_sublayer('bn2', nn.BatchNorm2D(out_channels)) + + # The experiments reported in the paper used the default initialization of bn.weight (all as 1). But changing the initialization may be useful in some cases. + if single_init: + # Initialize the bn.weight of dbb_origin as 1 and others as 0. This is not the default setting. + self.single_init() + + def forward(self, inputs): + if self.is_repped: + return self.nonlinear(self.dbb_reparam(inputs)) + + out = self.dbb_origin(inputs) + if hasattr(self, 'dbb_1x1'): + out += self.dbb_1x1(inputs) + out += self.dbb_avg(inputs) + out += self.dbb_1x1_kxk(inputs) + return self.nonlinear(out) + + def init_gamma(self, gamma_value): + if hasattr(self, "dbb_origin"): + paddle.nn.init.constant_(self.dbb_origin.bn.weight, gamma_value) + if hasattr(self, "dbb_1x1"): + paddle.nn.init.constant_(self.dbb_1x1.bn.weight, gamma_value) + if hasattr(self, "dbb_avg"): + paddle.nn.init.constant_(self.dbb_avg.avgbn.weight, gamma_value) + if hasattr(self, "dbb_1x1_kxk"): + paddle.nn.init.constant_(self.dbb_1x1_kxk.bn2.weight, gamma_value) + + def single_init(self): + self.init_gamma(0.0) + if hasattr(self, "dbb_origin"): + paddle.nn.init.constant_(self.dbb_origin.bn.weight, 1.0) + + def get_equivalent_kernel_bias(self): + k_origin, b_origin = transI_fusebn(self.dbb_origin.conv.weight, + self.dbb_origin.bn) + + if hasattr(self, 'dbb_1x1'): + k_1x1, b_1x1 = transI_fusebn(self.dbb_1x1.conv.weight, + self.dbb_1x1.bn) + k_1x1 = transVI_multiscale(k_1x1, self.kernel_size) + else: + k_1x1, b_1x1 = 0, 0 + + if hasattr(self.dbb_1x1_kxk, 'idconv1'): + k_1x1_kxk_first = self.dbb_1x1_kxk.idconv1.get_actual_kernel() + else: + k_1x1_kxk_first = self.dbb_1x1_kxk.conv1.weight + k_1x1_kxk_first, b_1x1_kxk_first = transI_fusebn(k_1x1_kxk_first, + self.dbb_1x1_kxk.bn1) + k_1x1_kxk_second, b_1x1_kxk_second = transI_fusebn( + self.dbb_1x1_kxk.conv2.weight, self.dbb_1x1_kxk.bn2) + k_1x1_kxk_merged, b_1x1_kxk_merged = transIII_1x1_kxk( + k_1x1_kxk_first, + b_1x1_kxk_first, + k_1x1_kxk_second, + b_1x1_kxk_second, + groups=self.groups) + + k_avg = transV_avg(self.out_channels, self.kernel_size, self.groups) + k_1x1_avg_second, b_1x1_avg_second = transI_fusebn(k_avg, + self.dbb_avg.avgbn) + if hasattr(self.dbb_avg, 'conv'): + k_1x1_avg_first, b_1x1_avg_first = transI_fusebn( + self.dbb_avg.conv.weight, self.dbb_avg.bn) + k_1x1_avg_merged, b_1x1_avg_merged = transIII_1x1_kxk( + k_1x1_avg_first, + b_1x1_avg_first, + k_1x1_avg_second, + b_1x1_avg_second, + groups=self.groups) + else: + k_1x1_avg_merged, b_1x1_avg_merged = k_1x1_avg_second, b_1x1_avg_second + + return transII_addbranch( + (k_origin, k_1x1, k_1x1_kxk_merged, k_1x1_avg_merged), + (b_origin, b_1x1, b_1x1_kxk_merged, b_1x1_avg_merged)) + + def re_parameterize(self): + if self.is_repped: + return + + kernel, bias = self.get_equivalent_kernel_bias() + self.dbb_reparam = nn.Conv2D( + in_channels=self.dbb_origin.conv._in_channels, + out_channels=self.dbb_origin.conv._out_channels, + kernel_size=self.dbb_origin.conv._kernel_size, + stride=self.dbb_origin.conv._stride, + padding=self.dbb_origin.conv._padding, + dilation=self.dbb_origin.conv._dilation, + groups=self.dbb_origin.conv._groups, + bias_attr=True) + + self.dbb_reparam.weight.set_value(kernel) + self.dbb_reparam.bias.set_value(bias) + + self.__delattr__('dbb_origin') + self.__delattr__('dbb_avg') + if hasattr(self, 'dbb_1x1'): + self.__delattr__('dbb_1x1') + self.__delattr__('dbb_1x1_kxk') + self.is_repped = True diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_transforms.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_transforms.py new file mode 100644 index 000000000..70f55fb09 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/dbb/dbb_transforms.py @@ -0,0 +1,73 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2103.13425, https://github.com/DingXiaoH/DiverseBranchBlock + +import numpy as np +import paddle +import paddle.nn.functional as F + + +def transI_fusebn(kernel, bn): + gamma = bn.weight + std = (bn._variance + bn._epsilon).sqrt() + return kernel * ( + (gamma / std).reshape([-1, 1, 1, 1])), bn.bias - bn._mean * gamma / std + + +def transII_addbranch(kernels, biases): + return sum(kernels), sum(biases) + + +def transIII_1x1_kxk(k1, b1, k2, b2, groups): + if groups == 1: + k = F.conv2d(k2, k1.transpose([1, 0, 2, 3])) + b_hat = (k2 * b1.reshape([1, -1, 1, 1])).sum((1, 2, 3)) + else: + k_slices = [] + b_slices = [] + k1_T = k1.transpose([1, 0, 2, 3]) + k1_group_width = k1.shape[0] // groups + k2_group_width = k2.shape[0] // groups + for g in range(groups): + k1_T_slice = k1_T[:, g * k1_group_width:(g + 1) * + k1_group_width, :, :] + k2_slice = k2[g * k2_group_width:(g + 1) * k2_group_width, :, :, :] + k_slices.append(F.conv2d(k2_slice, k1_T_slice)) + b_slices.append((k2_slice * b1[g * k1_group_width:( + g + 1) * k1_group_width].reshape([1, -1, 1, 1])).sum((1, 2, 3 + ))) + k, b_hat = transIV_depthconcat(k_slices, b_slices) + return k, b_hat + b2 + + +def transIV_depthconcat(kernels, biases): + return paddle.cat(kernels, axis=0), paddle.cat(biases) + + +def transV_avg(channels, kernel_size, groups): + input_dim = channels // groups + k = paddle.zeros((channels, input_dim, kernel_size, kernel_size)) + k[np.arange(channels), np.tile(np.arange(input_dim), + groups), :, :] = 1.0 / kernel_size**2 + return k + + +# This has not been tested with non-square kernels (kernel.shape[2] != kernel.shape[3]) nor even-size kernels +def transVI_multiscale(kernel, target_kernel_size): + H_pixels_to_pad = (target_kernel_size - kernel.shape[2]) // 2 + W_pixels_to_pad = (target_kernel_size - kernel.shape[3]) // 2 + return F.pad( + kernel, + [H_pixels_to_pad, H_pixels_to_pad, W_pixels_to_pad, W_pixels_to_pad]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/theseus_layer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/theseus_layer.py new file mode 100644 index 000000000..30c5a9f8b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/base/theseus_layer.py @@ -0,0 +1,398 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, List, Dict, Union, Callable, Any + +from paddle import nn +from ....utils import logger + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, inputs): + return inputs + + +class TheseusLayer(nn.Layer): + def __init__(self, *args, **kwargs): + super().__init__() + self.res_dict = {} + self.res_name = self.full_name() + self.pruner = None + self.quanter = None + + self.init_net(*args, **kwargs) + + def _return_dict_hook(self, layer, input, output): + res_dict = {"logits": output} + # 'list' is needed to avoid error raised by popping self.res_dict + for res_key in list(self.res_dict): + # clear the res_dict because the forward process may change according to input + res_dict[res_key] = self.res_dict.pop(res_key) + return res_dict + + def init_net(self, + stages_pattern=None, + return_patterns=None, + return_stages=None, + freeze_befor=None, + stop_after=None, + *args, + **kwargs): + # init the output of net + if return_patterns or return_stages: + if return_patterns and return_stages: + msg = f"The 'return_patterns' would be ignored when 'return_stages' is set." + logger.warning(msg) + return_stages = None + + if return_stages is True: + return_patterns = stages_pattern + + # return_stages is int or bool + if type(return_stages) is int: + return_stages = [return_stages] + if isinstance(return_stages, list): + if max(return_stages) > len(stages_pattern) or min( + return_stages) < 0: + msg = f"The 'return_stages' set error. Illegal value(s) have been ignored. The stages' pattern list is {stages_pattern}." + logger.warning(msg) + return_stages = [ + val for val in return_stages + if val >= 0 and val < len(stages_pattern) + ] + return_patterns = [stages_pattern[i] for i in return_stages] + + if return_patterns: + # call update_res function after the __init__ of the object has completed execution, that is, the contructing of layer or model has been completed. + def update_res_hook(layer, input): + self.update_res(return_patterns) + + self.register_forward_pre_hook(update_res_hook) + + # freeze subnet + if freeze_befor is not None: + self.freeze_befor(freeze_befor) + + # set subnet to Identity + if stop_after is not None: + self.stop_after(stop_after) + + def init_res(self, + stages_pattern, + return_patterns=None, + return_stages=None): + msg = "\"init_res\" will be deprecated, please use \"init_net\" instead." + logger.warning(DeprecationWarning(msg)) + + if return_patterns and return_stages: + msg = f"The 'return_patterns' would be ignored when 'return_stages' is set." + logger.warning(msg) + return_stages = None + + if return_stages is True: + return_patterns = stages_pattern + # return_stages is int or bool + if type(return_stages) is int: + return_stages = [return_stages] + if isinstance(return_stages, list): + if max(return_stages) > len(stages_pattern) or min( + return_stages) < 0: + msg = f"The 'return_stages' set error. Illegal value(s) have been ignored. The stages' pattern list is {stages_pattern}." + logger.warning(msg) + return_stages = [ + val for val in return_stages + if val >= 0 and val < len(stages_pattern) + ] + return_patterns = [stages_pattern[i] for i in return_stages] + + if return_patterns: + self.update_res(return_patterns) + + def replace_sub(self, *args, **kwargs) -> None: + msg = "The function 'replace_sub()' is deprecated, please use 'upgrade_sublayer()' instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) + + def upgrade_sublayer(self, + layer_name_pattern: Union[str, List[str]], + handle_func: Callable[[nn.Layer, str], nn.Layer] + ) -> Dict[str, nn.Layer]: + """use 'handle_func' to modify the sub-layer(s) specified by 'layer_name_pattern'. + + Args: + layer_name_pattern (Union[str, List[str]]): The name of layer to be modified by 'handle_func'. + handle_func (Callable[[nn.Layer, str], nn.Layer]): The function to modify target layer specified by 'layer_name_pattern'. The formal params are the layer(nn.Layer) and pattern(str) that is (a member of) layer_name_pattern (when layer_name_pattern is List type). And the return is the layer processed. + + Returns: + Dict[str, nn.Layer]: The key is the pattern and corresponding value is the result returned by 'handle_func()'. + + Examples: + + from paddle import nn + import paddleclas + + def rep_func(layer: nn.Layer, pattern: str): + new_layer = nn.Conv2D( + in_channels=layer._in_channels, + out_channels=layer._out_channels, + kernel_size=5, + padding=2 + ) + return new_layer + + net = paddleclas.MobileNetV1() + res = net.upgrade_sublayer(layer_name_pattern=["blocks[11].depthwise_conv.conv", "blocks[12].depthwise_conv.conv"], handle_func=rep_func) + print(res) + # {'blocks[11].depthwise_conv.conv': the corresponding new_layer, 'blocks[12].depthwise_conv.conv': the corresponding new_layer} + """ + + if not isinstance(layer_name_pattern, list): + layer_name_pattern = [layer_name_pattern] + + hit_layer_pattern_list = [] + for pattern in layer_name_pattern: + # parse pattern to find target layer and its parent + layer_list = parse_pattern_str(pattern=pattern, parent_layer=self) + if not layer_list: + continue + + sub_layer_parent = layer_list[-2]["layer"] if len( + layer_list) > 1 else self + sub_layer = layer_list[-1]["layer"] + sub_layer_name = layer_list[-1]["name"] + sub_layer_index_list = layer_list[-1]["index_list"] + + new_sub_layer = handle_func(sub_layer, pattern) + + if sub_layer_index_list: + if len(sub_layer_index_list) > 1: + sub_layer_parent = getattr( + sub_layer_parent, + sub_layer_name)[sub_layer_index_list[0]] + for sub_layer_index in sub_layer_index_list[1:-1]: + sub_layer_parent = sub_layer_parent[sub_layer_index] + sub_layer_parent[sub_layer_index_list[-1]] = new_sub_layer + else: + getattr(sub_layer_parent, sub_layer_name)[ + sub_layer_index_list[0]] = new_sub_layer + else: + setattr(sub_layer_parent, sub_layer_name, new_sub_layer) + + hit_layer_pattern_list.append(pattern) + return hit_layer_pattern_list + + def stop_after(self, stop_layer_name: str) -> bool: + """stop forward and backward after 'stop_layer_name'. + + Args: + stop_layer_name (str): The name of layer that stop forward and backward after this layer. + + Returns: + bool: 'True' if successful, 'False' otherwise. + """ + + layer_list = parse_pattern_str(stop_layer_name, self) + if not layer_list: + return False + + parent_layer = self + for layer_dict in layer_list: + name, index_list = layer_dict["name"], layer_dict["index_list"] + if not set_identity(parent_layer, name, index_list): + msg = f"Failed to set the layers that after stop_layer_name('{stop_layer_name}') to IdentityLayer. The error layer's name is '{name}'." + logger.warning(msg) + return False + parent_layer = layer_dict["layer"] + + return True + + def freeze_befor(self, layer_name: str) -> bool: + """freeze the layer named layer_name and its previous layer. + + Args: + layer_name (str): The name of layer that would be freezed. + + Returns: + bool: 'True' if successful, 'False' otherwise. + """ + + def stop_grad(layer, pattern): + class StopGradLayer(nn.Layer): + def __init__(self): + super().__init__() + self.layer = layer + + def forward(self, x): + x = self.layer(x) + x.stop_gradient = True + return x + + new_layer = StopGradLayer() + return new_layer + + res = self.upgrade_sublayer(layer_name, stop_grad) + if len(res) == 0: + msg = "Failed to stop the gradient befor the layer named '{layer_name}'" + logger.warning(msg) + return False + return True + + def update_res( + self, + return_patterns: Union[str, List[str]]) -> Dict[str, nn.Layer]: + """update the result(s) to be returned. + + Args: + return_patterns (Union[str, List[str]]): The name of layer to return output. + + Returns: + Dict[str, nn.Layer]: The pattern(str) and corresponding layer(nn.Layer) that have been set successfully. + """ + + # clear res_dict that could have been set + self.res_dict = {} + + class Handler(object): + def __init__(self, res_dict): + # res_dict is a reference + self.res_dict = res_dict + + def __call__(self, layer, pattern): + layer.res_dict = self.res_dict + layer.res_name = pattern + if hasattr(layer, "hook_remove_helper"): + layer.hook_remove_helper.remove() + layer.hook_remove_helper = layer.register_forward_post_hook( + save_sub_res_hook) + return layer + + handle_func = Handler(self.res_dict) + + hit_layer_pattern_list = self.upgrade_sublayer( + return_patterns, handle_func=handle_func) + + if hasattr(self, "hook_remove_helper"): + self.hook_remove_helper.remove() + self.hook_remove_helper = self.register_forward_post_hook( + self._return_dict_hook) + + return hit_layer_pattern_list + + +def save_sub_res_hook(layer, input, output): + layer.res_dict[layer.res_name] = output + + +def set_identity(parent_layer: nn.Layer, + layer_name: str, + layer_index_list: str=None) -> bool: + """set the layer specified by layer_name and layer_index_list to Indentity. + + Args: + parent_layer (nn.Layer): The parent layer of target layer specified by layer_name and layer_index_list. + layer_name (str): The name of target layer to be set to Indentity. + layer_index_list (str, optional): The index of target layer to be set to Indentity in parent_layer. Defaults to None. + + Returns: + bool: True if successfully, False otherwise. + """ + + stop_after = False + for sub_layer_name in parent_layer._sub_layers: + if stop_after: + parent_layer._sub_layers[sub_layer_name] = Identity() + continue + if sub_layer_name == layer_name: + stop_after = True + + if layer_index_list and stop_after: + layer_container = parent_layer._sub_layers[layer_name] + for num, layer_index in enumerate(layer_index_list): + stop_after = False + for i in range(num): + layer_container = layer_container[layer_index_list[i]] + for sub_layer_index in layer_container._sub_layers: + if stop_after: + parent_layer._sub_layers[layer_name][ + sub_layer_index] = Identity() + continue + if layer_index == sub_layer_index: + stop_after = True + + return stop_after + + +def parse_pattern_str(pattern: str, parent_layer: nn.Layer) -> Union[ + None, List[Dict[str, Union[nn.Layer, str, None]]]]: + """parse the string type pattern. + + Args: + pattern (str): The pattern to discribe layer. + parent_layer (nn.Layer): The root layer relative to the pattern. + + Returns: + Union[None, List[Dict[str, Union[nn.Layer, str, None]]]]: None if failed. If successfully, the members are layers parsed in order: + [ + {"layer": first layer, "name": first layer's name parsed, "index": first layer's index parsed if exist}, + {"layer": second layer, "name": second layer's name parsed, "index": second layer's index parsed if exist}, + ... + ] + """ + + pattern_list = pattern.split(".") + if not pattern_list: + msg = f"The pattern('{pattern}') is illegal. Please check and retry." + logger.warning(msg) + return None + + layer_list = [] + while len(pattern_list) > 0: + if '[' in pattern_list[0]: + target_layer_name = pattern_list[0].split('[')[0] + target_layer_index_list = list( + index.split(']')[0] + for index in pattern_list[0].split('[')[1:]) + else: + target_layer_name = pattern_list[0] + target_layer_index_list = None + + target_layer = getattr(parent_layer, target_layer_name, None) + + if target_layer is None: + msg = f"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}')." + logger.warning(msg) + return None + + if target_layer_index_list: + for target_layer_index in target_layer_index_list: + if int(target_layer_index) < 0 or int( + target_layer_index) >= len(target_layer): + msg = f"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0." + logger.warning(msg) + return None + target_layer = target_layer[target_layer_index] + + layer_list.append({ + "layer": target_layer, + "name": target_layer_name, + "index_list": target_layer_index_list + }) + + pattern_list = pattern_list[1:] + parent_layer = target_layer + + return layer_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/__init__.py new file mode 100644 index 000000000..4a4d48e71 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/__init__.py @@ -0,0 +1,8 @@ +from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd +from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W64_C +from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 +from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 +from .mobilenet_v4 import MobileNetV4_conv_small, MobileNetV4_conv_medium, MobileNetV4_conv_large, MobileNetV4_hybrid_medium, MobileNetV4_hybrid_large +from .inception_v3 import InceptionV3 +from .vgg import VGG11, VGG13, VGG16, VGG19 +from .pp_lcnet import PPLCNetBaseNet, PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/custom_devices_layers.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/custom_devices_layers.py new file mode 100644 index 000000000..547646b96 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/custom_devices_layers.py @@ -0,0 +1,37 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +__all__ = ["AdaptiveAvgPool2D"] + + +class AdaptiveAvgPool2D(nn.AdaptiveAvgPool2D): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if paddle.device.get_device().startswith("npu"): + self.device = "npu" + else: + self.device = None + + if isinstance(self._output_size, int) and self._output_size == 1: + self._gap = True + elif isinstance(self._output_size, tuple) and self._output_size[ + 0] == 1 and self._output_size[1] == 1: + self._gap = True + else: + self._gap = False + + def forward(self, x): + if self.device == "npu" and self._gap: + # Global Average Pooling + N, C, _, _ = x.shape + x_mean = paddle.mean(x, axis=[2, 3]) + x_mean = paddle.reshape(x_mean, [N, C, 1, 1]) + return x_mean + else: + return F.adaptive_avg_pool2d( + x, + output_size=self._output_size, + data_format=self._data_format, + name=self._name, ) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/esnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/esnet.py new file mode 100644 index 000000000..8d2872eae --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/esnet.py @@ -0,0 +1,369 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import math +import paddle +from paddle import ParamAttr, reshape, transpose, concat, split +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D +from paddle.nn.initializer import KaimingNormal +from paddle.regularizer import L2Decay + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ESNet_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_25_pretrained.pdparams", + "ESNet_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_5_pretrained.pdparams", + "ESNet_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams", + "ESNet_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = {"ESNet": ["blocks[2]", "blocks[9]", "blocks[12]"]} + +__all__ = list(MODEL_URLS.keys()) + + +def channel_shuffle(x, groups): + batch_size, num_channels, height, width = x.shape[0:4] + channels_per_group = num_channels // groups + x = reshape( + x=x, shape=[batch_size, groups, channels_per_group, height, width]) + x = transpose(x=x, perm=[0, 2, 1, 3, 4]) + x = reshape(x=x, shape=[batch_size, num_channels, height, width]) + return x + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + if_act=True): + super().__init__() + self.conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + self.bn = BatchNorm( + out_channels, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.if_act = if_act + self.hardswish = nn.Hardswish() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self.hardswish(x) + return x + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = nn.Hardsigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = paddle.multiply(x=identity, y=x) + return x + + +class ESBlock1(TheseusLayer): + def __init__(self, in_channels, out_channels): + super().__init__() + self.pw_1_1 = ConvBNLayer( + in_channels=in_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1) + self.dw_1 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=1, + groups=out_channels // 2, + if_act=False) + self.se = SEModule(out_channels) + + self.pw_1_2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1) + + def forward(self, x): + x1, x2 = split( + x, num_or_sections=[x.shape[1] // 2, x.shape[1] // 2], axis=1) + x2 = self.pw_1_1(x2) + x3 = self.dw_1(x2) + x3 = concat([x2, x3], axis=1) + x3 = self.se(x3) + x3 = self.pw_1_2(x3) + x = concat([x1, x3], axis=1) + return channel_shuffle(x, 2) + + +class ESBlock2(TheseusLayer): + def __init__(self, in_channels, out_channels): + super().__init__() + + # branch1 + self.dw_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=2, + groups=in_channels, + if_act=False) + self.pw_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1) + # branch2 + self.pw_2_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1) + self.dw_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=2, + groups=out_channels // 2, + if_act=False) + self.se = SEModule(out_channels // 2) + self.pw_2_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=1) + self.concat_dw = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + groups=out_channels) + self.concat_pw = ConvBNLayer( + in_channels=out_channels, out_channels=out_channels, kernel_size=1) + + def forward(self, x): + x1 = self.dw_1(x) + x1 = self.pw_1(x1) + x2 = self.pw_2_1(x) + x2 = self.dw_2(x2) + x2 = self.se(x2) + x2 = self.pw_2_2(x2) + x = concat([x1, x2], axis=1) + x = self.concat_dw(x) + x = self.concat_pw(x) + return x + + +class ESNet(TheseusLayer): + def __init__(self, + stages_pattern, + class_num=1000, + scale=1.0, + dropout_prob=0.2, + class_expand=1280, + return_patterns=None, + return_stages=None): + super().__init__() + self.scale = scale + self.class_num = class_num + self.class_expand = class_expand + stage_repeats = [3, 7, 3] + stage_out_channels = [ + -1, 24, make_divisible(116 * scale), make_divisible(232 * scale), + make_divisible(464 * scale), 1024 + ] + + self.conv1 = ConvBNLayer( + in_channels=3, + out_channels=stage_out_channels[1], + kernel_size=3, + stride=2) + self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + block_list = [] + for stage_id, num_repeat in enumerate(stage_repeats): + for i in range(num_repeat): + if i == 0: + block = ESBlock2( + in_channels=stage_out_channels[stage_id + 1], + out_channels=stage_out_channels[stage_id + 2]) + else: + block = ESBlock1( + in_channels=stage_out_channels[stage_id + 2], + out_channels=stage_out_channels[stage_id + 2]) + block_list.append(block) + self.blocks = nn.Sequential(*block_list) + + self.conv2 = ConvBNLayer( + in_channels=stage_out_channels[-2], + out_channels=stage_out_channels[-1], + kernel_size=1) + + self.avg_pool = AdaptiveAvgPool2D(1) + + self.last_conv = Conv2D( + in_channels=stage_out_channels[-1], + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.hardswish = nn.Hardswish() + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + self.fc = Linear(self.class_expand, self.class_num) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + x = self.conv1(x) + x = self.max_pool(x) + x = self.blocks(x) + x = self.conv2(x) + x = self.avg_pool(x) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ESNet_x0_25(pretrained=False, use_ssld=False, **kwargs): + """ + ESNet_x0_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ESNet_x0_25` model depends on args. + """ + model = ESNet( + scale=0.25, stages_pattern=MODEL_STAGES_PATTERN["ESNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ESNet_x0_25"], use_ssld) + return model + + +def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + ESNet_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ESNet_x0_5` model depends on args. + """ + model = ESNet( + scale=0.5, stages_pattern=MODEL_STAGES_PATTERN["ESNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ESNet_x0_5"], use_ssld) + return model + + +def ESNet_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + ESNet_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ESNet_x0_75` model depends on args. + """ + model = ESNet( + scale=0.75, stages_pattern=MODEL_STAGES_PATTERN["ESNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ESNet_x0_75"], use_ssld) + return model + + +def ESNet_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + ESNet_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ESNet_x1_0` model depends on args. + """ + model = ESNet( + scale=1.0, stages_pattern=MODEL_STAGES_PATTERN["ESNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ESNet_x1_0"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/hrnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/hrnet.py new file mode 100644 index 000000000..fd6d32557 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/hrnet.py @@ -0,0 +1,797 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1908.07919 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +from paddle import nn +from paddle import ParamAttr +from paddle.nn.functional import upsample +from paddle.nn.initializer import Uniform + +from ..base.theseus_layer import TheseusLayer, Identity +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "HRNet_W18_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams", + "HRNet_W30_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams", + "HRNet_W32_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams", + "HRNet_W40_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams", + "HRNet_W44_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams", + "HRNet_W48_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams", + "HRNet_W64_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams" +} + +MODEL_STAGES_PATTERN = {"HRNet": ["st4"]} + +__all__ = list(MODEL_URLS.keys()) + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act is None: + return Identity() + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act="relu"): + super().__init__() + + self.conv = nn.Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False) + self.bn = nn.BatchNorm(num_filters, act=None) + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.act(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + has_se, + stride=1, + downsample=False): + super().__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu") + self.conv3 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None) + + if self.downsample: + self.conv_down = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + act=None) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=16) + self.relu = nn.ReLU() + + def forward(self, x, res_dict=None): + residual = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.downsample: + residual = self.conv_down(residual) + if self.has_se: + x = self.se(x) + x = paddle.add(x=residual, y=x) + x = self.relu(x) + return x + + +class BasicBlock(nn.Layer): + def __init__(self, num_channels, num_filters, has_se=False): + super().__init__() + + self.has_se = has_se + + self.conv1 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=1, + act="relu") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=1, + act=None) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=16) + self.relu = nn.ReLU() + + def forward(self, x): + residual = x + x = self.conv1(x) + x = self.conv2(x) + + if self.has_se: + x = self.se(x) + + x = paddle.add(x=residual, y=x) + x = self.relu(x) + return x + + +class SELayer(TheseusLayer): + def __init__(self, num_channels, num_filters, reduction_ratio): + super().__init__() + + self.avg_pool = nn.AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.fc_squeeze = nn.Linear( + num_channels, + med_ch, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.fc_excitation = nn.Linear( + med_ch, + num_filters, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, res_dict=None): + residual = x + x = self.avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self.fc_squeeze(x) + x = self.relu(x) + x = self.fc_excitation(x) + x = self.sigmoid(x) + x = paddle.unsqueeze(x, axis=[2, 3]) + x = residual * x + return x + + +class Stage(TheseusLayer): + def __init__(self, num_modules, num_filters, has_se=False): + super().__init__() + + self._num_modules = num_modules + + self.stage_func_list = nn.LayerList() + for i in range(num_modules): + self.stage_func_list.append( + HighResolutionModule( + num_filters=num_filters, has_se=has_se)) + + def forward(self, x, res_dict=None): + x = x + for idx in range(self._num_modules): + x = self.stage_func_list[idx](x) + return x + + +class HighResolutionModule(TheseusLayer): + def __init__(self, num_filters, has_se=False): + super().__init__() + + self.basic_block_list = nn.LayerList() + + for i in range(len(num_filters)): + self.basic_block_list.append( + nn.Sequential(* [ + BasicBlock( + num_channels=num_filters[i], + num_filters=num_filters[i], + has_se=has_se) for j in range(4) + ])) + + self.fuse_func = FuseLayers( + in_channels=num_filters, out_channels=num_filters) + + def forward(self, x, res_dict=None): + out = [] + for idx, xi in enumerate(x): + basic_block_list = self.basic_block_list[idx] + for basic_block_func in basic_block_list: + xi = basic_block_func(xi) + out.append(xi) + out = self.fuse_func(out) + return out + + +class FuseLayers(TheseusLayer): + def __init__(self, in_channels, out_channels): + super().__init__() + + self._actual_ch = len(in_channels) + self._in_channels = in_channels + + self.residual_func_list = nn.LayerList() + self.relu = nn.ReLU() + for i in range(len(in_channels)): + for j in range(len(in_channels)): + if j > i: + self.residual_func_list.append( + ConvBNLayer( + num_channels=in_channels[j], + num_filters=out_channels[i], + filter_size=1, + stride=1, + act=None)) + elif j < i: + pre_num_filters = in_channels[j] + for k in range(i - j): + if k == i - j - 1: + self.residual_func_list.append( + ConvBNLayer( + num_channels=pre_num_filters, + num_filters=out_channels[i], + filter_size=3, + stride=2, + act=None)) + pre_num_filters = out_channels[i] + else: + self.residual_func_list.append( + ConvBNLayer( + num_channels=pre_num_filters, + num_filters=out_channels[j], + filter_size=3, + stride=2, + act="relu")) + pre_num_filters = out_channels[j] + + def forward(self, x, res_dict=None): + out = [] + residual_func_idx = 0 + for i in range(len(self._in_channels)): + residual = x[i] + for j in range(len(self._in_channels)): + if j > i: + xj = self.residual_func_list[residual_func_idx](x[j]) + residual_func_idx += 1 + + xj = upsample(xj, scale_factor=2**(j - i), mode="nearest") + residual = paddle.add(x=residual, y=xj) + elif j < i: + xj = x[j] + for k in range(i - j): + xj = self.residual_func_list[residual_func_idx](xj) + residual_func_idx += 1 + + residual = paddle.add(x=residual, y=xj) + + residual = self.relu(residual) + out.append(residual) + + return out + + +class LastClsOut(TheseusLayer): + def __init__(self, + num_channel_list, + has_se, + num_filters_list=[32, 64, 128, 256]): + super().__init__() + + self.func_list = nn.LayerList() + for idx in range(len(num_channel_list)): + self.func_list.append( + BottleneckBlock( + num_channels=num_channel_list[idx], + num_filters=num_filters_list[idx], + has_se=has_se, + downsample=True)) + + def forward(self, x, res_dict=None): + out = [] + for idx, xi in enumerate(x): + xi = self.func_list[idx](xi) + out.append(xi) + return out + + +class HRNet(TheseusLayer): + """ + HRNet + Args: + width: int=18. Base channel number of HRNet. + has_se: bool=False. If 'True', add se module to HRNet. + class_num: int=1000. Output num of last fc layer. + Returns: + model: nn.Layer. Specific HRNet model depends on args. + """ + + def __init__(self, + stages_pattern, + width=18, + has_se=False, + class_num=1000, + return_patterns=None, + return_stages=None): + super().__init__() + + self.width = width + self.has_se = has_se + self._class_num = class_num + + channels_2 = [self.width, self.width * 2] + channels_3 = [self.width, self.width * 2, self.width * 4] + channels_4 = [ + self.width, self.width * 2, self.width * 4, self.width * 8 + ] + + self.conv_layer1_1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act="relu") + + self.conv_layer1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=2, + act="relu") + + self.layer1 = nn.Sequential(* [ + BottleneckBlock( + num_channels=64 if i == 0 else 256, + num_filters=64, + has_se=has_se, + stride=1, + downsample=True if i == 0 else False) for i in range(4) + ]) + + self.conv_tr1_1 = ConvBNLayer( + num_channels=256, num_filters=width, filter_size=3) + self.conv_tr1_2 = ConvBNLayer( + num_channels=256, num_filters=width * 2, filter_size=3, stride=2) + + self.st2 = Stage( + num_modules=1, num_filters=channels_2, has_se=self.has_se) + + self.conv_tr2 = ConvBNLayer( + num_channels=width * 2, + num_filters=width * 4, + filter_size=3, + stride=2) + self.st3 = Stage( + num_modules=4, num_filters=channels_3, has_se=self.has_se) + + self.conv_tr3 = ConvBNLayer( + num_channels=width * 4, + num_filters=width * 8, + filter_size=3, + stride=2) + + self.st4 = Stage( + num_modules=3, num_filters=channels_4, has_se=self.has_se) + + # classification + num_filters_list = [32, 64, 128, 256] + self.last_cls = LastClsOut( + num_channel_list=channels_4, + has_se=self.has_se, + num_filters_list=num_filters_list) + + last_num_filters = [256, 512, 1024] + self.cls_head_conv_list = nn.LayerList() + for idx in range(3): + self.cls_head_conv_list.append( + ConvBNLayer( + num_channels=num_filters_list[idx] * 4, + num_filters=last_num_filters[idx], + filter_size=3, + stride=2)) + + self.conv_last = ConvBNLayer( + num_channels=1024, num_filters=2048, filter_size=1, stride=1) + + self.avg_pool = nn.AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(2048 * 1.0) + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + self.fc = nn.Linear( + 2048, + class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + x = self.conv_layer1_1(x) + x = self.conv_layer1_2(x) + + x = self.layer1(x) + + tr1_1 = self.conv_tr1_1(x) + tr1_2 = self.conv_tr1_2(x) + x = self.st2([tr1_1, tr1_2]) + + tr2 = self.conv_tr2(x[-1]) + x.append(tr2) + x = self.st3(x) + + tr3 = self.conv_tr3(x[-1]) + x.append(tr3) + x = self.st4(x) + + x = self.last_cls(x) + + y = x[0] + for idx in range(3): + y = paddle.add(x[idx + 1], self.cls_head_conv_list[idx](y)) + + y = self.conv_last(y) + y = self.avg_pool(y) + y = self.flatten(y) + y = self.fc(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def HRNet_W18_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W18_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W18_C` model depends on args. + """ + model = HRNet( + width=18, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W18_C"], use_ssld) + return model + + +def HRNet_W30_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W30_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W30_C` model depends on args. + """ + model = HRNet( + width=30, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W30_C"], use_ssld) + return model + + +def HRNet_W32_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W32_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W32_C` model depends on args. + """ + model = HRNet( + width=32, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W32_C"], use_ssld) + return model + + +def HRNet_W40_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W40_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W40_C` model depends on args. + """ + model = HRNet( + width=40, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W40_C"], use_ssld) + return model + + +def HRNet_W44_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W44_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W44_C` model depends on args. + """ + model = HRNet( + width=44, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W44_C"], use_ssld) + return model + + +def HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W48_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W48_C` model depends on args. + """ + model = HRNet( + width=48, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W48_C"], use_ssld) + return model + + +def HRNet_W60_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W60_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W60_C` model depends on args. + """ + model = HRNet( + width=60, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W60_C"], use_ssld) + return model + + +def HRNet_W64_C(pretrained=False, use_ssld=False, **kwargs): + """ + HRNet_W64_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `HRNet_W64_C` model depends on args. + """ + model = HRNet( + width=64, stages_pattern=MODEL_STAGES_PATTERN["HRNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HRNet_W64_C"], use_ssld) + return model + + +def SE_HRNet_W18_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W18_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W18_C` model depends on args. + """ + model = HRNet( + width=18, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W18_C"], use_ssld) + return model + + +def SE_HRNet_W30_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W30_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W30_C` model depends on args. + """ + model = HRNet( + width=30, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W30_C"], use_ssld) + return model + + +def SE_HRNet_W32_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W32_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W32_C` model depends on args. + """ + model = HRNet( + width=32, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W32_C"], use_ssld) + return model + + +def SE_HRNet_W40_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W40_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W40_C` model depends on args. + """ + model = HRNet( + width=40, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W40_C"], use_ssld) + return model + + +def SE_HRNet_W44_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W44_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W44_C` model depends on args. + """ + model = HRNet( + width=44, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W44_C"], use_ssld) + return model + + +def SE_HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W48_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W48_C` model depends on args. + """ + model = HRNet( + width=48, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W48_C"], use_ssld) + return model + + +def SE_HRNet_W60_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W60_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W60_C` model depends on args. + """ + model = HRNet( + width=60, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W60_C"], use_ssld) + return model + + +def SE_HRNet_W64_C(pretrained=False, use_ssld=False, **kwargs): + """ + SE_HRNet_W64_C + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `SE_HRNet_W64_C` model depends on args. + """ + model = HRNet( + width=64, + stages_pattern=MODEL_STAGES_PATTERN["HRNet"], + has_se=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W64_C"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/inception_v3.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/inception_v3.py new file mode 100644 index 000000000..b01887020 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/inception_v3.py @@ -0,0 +1,559 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1512.00567v3 + +from __future__ import absolute_import, division, print_function +import math +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "InceptionV3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams" +} + +MODEL_STAGES_PATTERN = { + "InceptionV3": [ + "inception_block_list[2]", "inception_block_list[3]", + "inception_block_list[7]", "inception_block_list[8]", + "inception_block_list[10]" + ] +} + +__all__ = MODEL_URLS.keys() +''' +InceptionV3 config: dict. + key: inception blocks of InceptionV3. + values: conv num in different blocks. +''' +NET_CONFIG = { + "inception_a": [[192, 256, 288], [32, 64, 64]], + "inception_b": [288], + "inception_c": [[768, 768, 768, 768], [128, 160, 160, 192]], + "inception_d": [768], + "inception_e": [1280, 2048] +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + groups=1, + act="relu"): + super().__init__() + self.act = act + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + bias_attr=False) + self.bn = BatchNorm(num_filters) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class InceptionStem(TheseusLayer): + def __init__(self): + super().__init__() + self.conv_1a_3x3 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act="relu") + self.conv_2a_3x3 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act="relu") + self.conv_2b_3x3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + padding=1, + act="relu") + + self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self.conv_3b_1x1 = ConvBNLayer( + num_channels=64, num_filters=80, filter_size=1, act="relu") + self.conv_4a_3x3 = ConvBNLayer( + num_channels=80, num_filters=192, filter_size=3, act="relu") + + def forward(self, x): + x = self.conv_1a_3x3(x) + x = self.conv_2a_3x3(x) + x = self.conv_2b_3x3(x) + x = self.max_pool(x) + x = self.conv_3b_1x1(x) + x = self.conv_4a_3x3(x) + x = self.max_pool(x) + return x + + +class InceptionA(TheseusLayer): + def __init__(self, num_channels, pool_features): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch5x5_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=48, + filter_size=1, + act="relu") + self.branch5x5_2 = ConvBNLayer( + num_channels=48, + num_filters=64, + filter_size=5, + padding=2, + act="relu") + + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=64, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3 = ConvBNLayer( + num_channels=96, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=pool_features, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + x = paddle.concat( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1) + return x + + +class InceptionB(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch3x3 = ConvBNLayer( + num_channels=num_channels, + num_filters=384, + filter_size=3, + stride=2, + act="relu") + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=64, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=64, + num_filters=96, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3 = ConvBNLayer( + num_channels=96, + num_filters=96, + filter_size=3, + stride=2, + act="relu") + self.branch_pool = MaxPool2D(kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool(x) + + x = paddle.concat([branch3x3, branch3x3dbl, branch_pool], axis=1) + + return x + + +class InceptionC(TheseusLayer): + def __init__(self, num_channels, channels_7x7): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + self.branch7x7_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=channels_7x7, + filter_size=1, + stride=1, + act="relu") + self.branch7x7_2 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(1, 7), + stride=1, + padding=(0, 3), + act="relu") + self.branch7x7_3 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=192, + filter_size=(7, 1), + stride=1, + padding=(3, 0), + act="relu") + + self.branch7x7dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=channels_7x7, + filter_size=1, + act="relu") + self.branch7x7dbl_2 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7dbl_3 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + self.branch7x7dbl_4 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=channels_7x7, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7dbl_5 = ConvBNLayer( + num_channels=channels_7x7, + num_filters=192, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + + x = paddle.concat( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1) + + return x + + +class InceptionD(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch3x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + self.branch3x3_2 = ConvBNLayer( + num_channels=192, + num_filters=320, + filter_size=3, + stride=2, + act="relu") + self.branch7x7x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + self.branch7x7x3_2 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=(1, 7), + padding=(0, 3), + act="relu") + self.branch7x7x3_3 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=(7, 1), + padding=(3, 0), + act="relu") + self.branch7x7x3_4 = ConvBNLayer( + num_channels=192, + num_filters=192, + filter_size=3, + stride=2, + act="relu") + self.branch_pool = MaxPool2D(kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = self.branch_pool(x) + + x = paddle.concat([branch3x3, branch7x7x3, branch_pool], axis=1) + return x + + +class InceptionE(TheseusLayer): + def __init__(self, num_channels): + super().__init__() + self.branch1x1 = ConvBNLayer( + num_channels=num_channels, + num_filters=320, + filter_size=1, + act="relu") + self.branch3x3_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=384, + filter_size=1, + act="relu") + self.branch3x3_2a = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(1, 3), + padding=(0, 1), + act="relu") + self.branch3x3_2b = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(3, 1), + padding=(1, 0), + act="relu") + + self.branch3x3dbl_1 = ConvBNLayer( + num_channels=num_channels, + num_filters=448, + filter_size=1, + act="relu") + self.branch3x3dbl_2 = ConvBNLayer( + num_channels=448, + num_filters=384, + filter_size=3, + padding=1, + act="relu") + self.branch3x3dbl_3a = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(1, 3), + padding=(0, 1), + act="relu") + self.branch3x3dbl_3b = ConvBNLayer( + num_channels=384, + num_filters=384, + filter_size=(3, 1), + padding=(1, 0), + act="relu") + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False) + self.branch_pool_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=192, + filter_size=1, + act="relu") + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = paddle.concat(branch3x3, axis=1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = paddle.concat(branch3x3dbl, axis=1) + + branch_pool = self.branch_pool(x) + branch_pool = self.branch_pool_conv(branch_pool) + + x = paddle.concat( + [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1) + return x + + +class Inception_V3(TheseusLayer): + """ + Inception_V3 + Args: + config: dict. config of Inception_V3. + class_num: int=1000. The number of classes. + pretrained: (True or False) or path of pretrained_model. Whether to load the pretrained model. + Returns: + model: nn.Layer. Specific Inception_V3 model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + class_num=1000, + return_patterns=None, + return_stages=None): + super().__init__() + + self.inception_a_list = config["inception_a"] + self.inception_c_list = config["inception_c"] + self.inception_b_list = config["inception_b"] + self.inception_d_list = config["inception_d"] + self.inception_e_list = config["inception_e"] + + self.inception_stem = InceptionStem() + + self.inception_block_list = nn.LayerList() + for i in range(len(self.inception_a_list[0])): + inception_a = InceptionA(self.inception_a_list[0][i], + self.inception_a_list[1][i]) + self.inception_block_list.append(inception_a) + + for i in range(len(self.inception_b_list)): + inception_b = InceptionB(self.inception_b_list[i]) + self.inception_block_list.append(inception_b) + + for i in range(len(self.inception_c_list[0])): + inception_c = InceptionC(self.inception_c_list[0][i], + self.inception_c_list[1][i]) + self.inception_block_list.append(inception_c) + + for i in range(len(self.inception_d_list)): + inception_d = InceptionD(self.inception_d_list[i]) + self.inception_block_list.append(inception_d) + + for i in range(len(self.inception_e_list)): + inception_e = InceptionE(self.inception_e_list[i]) + self.inception_block_list.append(inception_e) + + self.avg_pool = AdaptiveAvgPool2D(1) + self.dropout = Dropout(p=0.2, mode="downscale_in_infer") + stdv = 1.0 / math.sqrt(2048 * 1.0) + self.fc = Linear( + 2048, + class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr()) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + x = self.inception_stem(x) + for inception_block in self.inception_block_list: + x = inception_block(x) + x = self.avg_pool(x) + x = paddle.reshape(x, shape=[-1, 2048]) + x = self.dropout(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def InceptionV3(pretrained=False, use_ssld=False, **kwargs): + """ + InceptionV3 + Args: + pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise. + if str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `InceptionV3` model + """ + model = Inception_V3( + NET_CONFIG, + stages_pattern=MODEL_STAGES_PATTERN["InceptionV3"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["InceptionV3"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v1.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v1.py new file mode 100644 index 000000000..4e6706382 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v1.py @@ -0,0 +1,259 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1704.04861 + +from __future__ import absolute_import, division, print_function + +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, ReLU, Flatten +from paddle.nn import AdaptiveAvgPool2D +from paddle.nn.initializer import KaimingNormal + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileNetV1_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams", + "MobileNetV1_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams", + "MobileNetV1_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams", + "MobileNetV1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams" +} + +MODEL_STAGES_PATTERN = { + "MobileNetV1": ["blocks[0]", "blocks[2]", "blocks[4]", "blocks[10]"] +} + +__all__ = MODEL_URLS.keys() + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + num_groups=1): + super().__init__() + + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + self.bn = BatchNorm(num_filters) + self.relu = ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class DepthwiseSeparable(TheseusLayer): + def __init__(self, num_channels, num_filters1, num_filters2, num_groups, + stride, scale): + super().__init__() + + self.depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=3, + stride=stride, + padding=1, + num_groups=int(num_groups * scale)) + + self.pointwise_conv = ConvBNLayer( + num_channels=int(num_filters1 * scale), + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0) + + def forward(self, x): + x = self.depthwise_conv(x) + x = self.pointwise_conv(x) + return x + + +class MobileNet(TheseusLayer): + """ + MobileNet + Args: + scale: float=1.0. The coefficient that controls the size of network parameters. + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific MobileNet model depends on args. + """ + + def __init__(self, + stages_pattern, + scale=1.0, + class_num=1000, + return_patterns=None, + return_stages=None): + super().__init__() + self.scale = scale + + self.conv = ConvBNLayer( + num_channels=3, + filter_size=3, + num_filters=int(32 * scale), + stride=2, + padding=1) + + #num_channels, num_filters1, num_filters2, num_groups, stride + self.cfg = [[int(32 * scale), 32, 64, 32, 1], + [int(64 * scale), 64, 128, 64, 2], + [int(128 * scale), 128, 128, 128, 1], + [int(128 * scale), 128, 256, 128, 2], + [int(256 * scale), 256, 256, 256, 1], + [int(256 * scale), 256, 512, 256, 2], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 512, 512, 1], + [int(512 * scale), 512, 1024, 512, 2], + [int(1024 * scale), 1024, 1024, 1024, 1]] + + self.blocks = nn.Sequential(* [ + DepthwiseSeparable( + num_channels=params[0], + num_filters1=params[1], + num_filters2=params[2], + num_groups=params[3], + stride=params[4], + scale=scale) for params in self.cfg + ]) + + self.avg_pool = AdaptiveAvgPool2D(1) + self.flatten = Flatten(start_axis=1, stop_axis=-1) + + self.fc = Linear( + int(1024 * scale), + class_num, + weight_attr=ParamAttr(initializer=KaimingNormal())) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + x = self.conv(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV1_x0_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args. + """ + model = MobileNet( + scale=0.25, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV1"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_25"], + use_ssld) + return model + + +def MobileNetV1_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args. + """ + model = MobileNet( + scale=0.5, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV1"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_5"], + use_ssld) + return model + + +def MobileNetV1_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args. + """ + model = MobileNet( + scale=0.75, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV1"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_75"], + use_ssld) + return model + + +def MobileNetV1(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV1 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV1` model depends on args. + """ + model = MobileNet( + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV1"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v3.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v3.py new file mode 100644 index 000000000..a50c2884d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v3.py @@ -0,0 +1,591 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1905.02244 + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +from paddle import ParamAttr +from paddle.nn import BatchNorm, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay + +from .custom_devices_layers import AdaptiveAvgPool2D +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileNetV3_small_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams", + "MobileNetV3_small_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams", + "MobileNetV3_small_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams", + "MobileNetV3_small_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams", + "MobileNetV3_small_x1_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams", + "MobileNetV3_large_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams", + "MobileNetV3_large_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams", + "MobileNetV3_large_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams", + "MobileNetV3_large_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams", + "MobileNetV3_large_x1_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = { + "MobileNetV3_small": + ["blocks[0]", "blocks[2]", "blocks[7]", "blocks[10]"], + "MobileNetV3_large": + ["blocks[0]", "blocks[2]", "blocks[5]", "blocks[11]", "blocks[14]"] +} + +__all__ = MODEL_URLS.keys() + +# "large", "small" is just for MobinetV3_large, MobileNetV3_small respectively. +# The type of "large" or "small" config is a list. Each element(list) represents a depthwise block, which is composed of k, exp, se, act, s. +# k: kernel_size +# exp: middle channel number in depthwise block +# c: output channel number in depthwise block +# se: whether to use SE block +# act: which activation to use +# s: stride in depthwise block +NET_CONFIG = { + "large": [ + # k, exp, c, se, act, s + [3, 16, 16, False, "relu", 1], + [3, 64, 24, False, "relu", 2], + [3, 72, 24, False, "relu", 1], + [5, 72, 40, True, "relu", 2], + [5, 120, 40, True, "relu", 1], + [5, 120, 40, True, "relu", 1], + [3, 240, 80, False, "hardswish", 2], + [3, 200, 80, False, "hardswish", 1], + [3, 184, 80, False, "hardswish", 1], + [3, 184, 80, False, "hardswish", 1], + [3, 480, 112, True, "hardswish", 1], + [3, 672, 112, True, "hardswish", 1], + [5, 672, 160, True, "hardswish", 2], + [5, 960, 160, True, "hardswish", 1], + [5, 960, 160, True, "hardswish", 1], + ], + "small": [ + # k, exp, c, se, act, s + [3, 16, 16, True, "relu", 2], + [3, 72, 24, False, "relu", 2], + [3, 88, 24, False, "relu", 1], + [5, 96, 40, True, "hardswish", 2], + [5, 240, 40, True, "hardswish", 1], + [5, 240, 40, True, "hardswish", 1], + [5, 120, 48, True, "hardswish", 1], + [5, 144, 48, True, "hardswish", 1], + [5, 288, 96, True, "hardswish", 2], + [5, 576, 96, True, "hardswish", 1], + [5, 576, 96, True, "hardswish", 1], + ] +} +# first conv output channel number in MobileNetV3 +STEM_CONV_NUMBER = 16 +# last second conv output channel for "small" +LAST_SECOND_CONV_SMALL = 576 +# last second conv output channel for "large" +LAST_SECOND_CONV_LARGE = 960 +# last conv output channel number for "large" and "small" +LAST_CONV = 1280 + + +def _make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act is None: + return None + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class MobileNetV3(TheseusLayer): + """ + MobileNetV3 + Args: + config: list. MobileNetV3 depthwise blocks config. + scale: float=1.0. The coefficient that controls the size of network parameters. + class_num: int=1000. The number of classes. + inplanes: int=16. The output channel number of first convolution layer. + class_squeeze: int=960. The output channel number of penultimate convolution layer. + class_expand: int=1280. The output channel number of last convolution layer. + dropout_prob: float=0.2. Probability of setting units to zero. + Returns: + model: nn.Layer. Specific MobileNetV3 model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + scale=1.0, + class_num=1000, + inplanes=STEM_CONV_NUMBER, + class_squeeze=LAST_SECOND_CONV_LARGE, + class_expand=LAST_CONV, + dropout_prob=0.2, + return_patterns=None, + return_stages=None, + **kwargs): + super().__init__() + + self.cfg = config + self.scale = scale + self.inplanes = inplanes + self.class_squeeze = class_squeeze + self.class_expand = class_expand + self.class_num = class_num + + self.conv = ConvBNLayer( + in_c=3, + out_c=_make_divisible(self.inplanes * self.scale), + filter_size=3, + stride=2, + padding=1, + num_groups=1, + if_act=True, + act="hardswish") + + self.blocks = nn.Sequential(*[ + ResidualUnit( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 else + self.cfg[i - 1][2] * self.scale), + mid_c=_make_divisible(self.scale * exp), + out_c=_make_divisible(self.scale * c), + filter_size=k, + stride=s, + use_se=se, + act=act) for i, (k, exp, c, se, act, s) in enumerate(self.cfg) + ]) + + self.last_second_conv = ConvBNLayer( + in_c=_make_divisible(self.cfg[-1][2] * self.scale), + out_c=_make_divisible(self.scale * self.class_squeeze), + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + act="hardswish") + + self.avg_pool = AdaptiveAvgPool2D(1) + + self.last_conv = Conv2D( + in_channels=_make_divisible(self.scale * self.class_squeeze), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + + self.hardswish = nn.Hardswish() + if dropout_prob is not None: + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + else: + self.dropout = None + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + self.fc = Linear(self.class_expand, class_num) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + x = self.conv(x) + x = self.blocks(x) + x = self.last_second_conv(x) + x = self.avg_pool(x) + x = self.last_conv(x) + x = self.hardswish(x) + if self.dropout is not None: + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + + return x + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + in_c, + out_c, + filter_size, + stride, + padding, + num_groups=1, + if_act=True, + act=None): + super().__init__() + + self.conv = Conv2D( + in_channels=in_c, + out_channels=out_c, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias_attr=False) + self.bn = BatchNorm( + num_channels=out_c, + act=None, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.if_act = if_act + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self.act(x) + return x + + +class ResidualUnit(TheseusLayer): + def __init__(self, + in_c, + mid_c, + out_c, + filter_size, + stride, + use_se, + act=None): + super().__init__() + self.if_shortcut = stride == 1 and in_c == out_c + self.if_se = use_se + + self.expand_conv = ConvBNLayer( + in_c=in_c, + out_c=mid_c, + filter_size=1, + stride=1, + padding=0, + if_act=True, + act=act) + self.bottleneck_conv = ConvBNLayer( + in_c=mid_c, + out_c=mid_c, + filter_size=filter_size, + stride=stride, + padding=int((filter_size - 1) // 2), + num_groups=mid_c, + if_act=True, + act=act) + if self.if_se: + self.mid_se = SEModule(mid_c) + self.linear_conv = ConvBNLayer( + in_c=mid_c, + out_c=out_c, + filter_size=1, + stride=1, + padding=0, + if_act=False, + act=None) + + def forward(self, x): + identity = x + x = self.expand_conv(x) + x = self.bottleneck_conv(x) + if self.if_se: + x = self.mid_se(x) + x = self.linear_conv(x) + if self.if_shortcut: + x = paddle.add(identity, x) + return x + + +# nn.Hardsigmoid can't transfer "slope" and "offset" in nn.functional.hardsigmoid +class Hardsigmoid(TheseusLayer): + def __init__(self, slope=0.2, offset=0.5): + super().__init__() + self.slope = slope + self.offset = offset + + def forward(self, x): + return nn.functional.hardsigmoid( + x, slope=self.slope, offset=self.offset) + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = Hardsigmoid(slope=0.2, offset=0.5) + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + return paddle.multiply(x=identity, y=x) + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV3_small_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.35, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_35"], + use_ssld) + return model + + +def MobileNetV3_small_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.5, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_5"], + use_ssld) + return model + + +def MobileNetV3_small_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x0_75 + Args: + pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise. + if str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=0.75, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_75"], + use_ssld) + return model + + +def MobileNetV3_small_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_0"], + use_ssld) + return model + + +def MobileNetV3_small_x1_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_small_x1_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["small"], + scale=1.25, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_SMALL, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_25"], + use_ssld) + return model + + +def MobileNetV3_large_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.35, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_small"], + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_35"], + use_ssld) + return model + + +def MobileNetV3_large_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.5, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_large"], + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_5"], + use_ssld) + return model + + +def MobileNetV3_large_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=0.75, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_large"], + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_75"], + use_ssld) + return model + + +def MobileNetV3_large_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_large"], + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_0"], + use_ssld) + return model + + +def MobileNetV3_large_x1_25(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV3_large_x1_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args. + """ + model = MobileNetV3( + config=NET_CONFIG["large"], + scale=1.25, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV3_large"], + class_squeeze=LAST_SECOND_CONV_LARGE, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_25"], + use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v4.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v4.py new file mode 100644 index 000000000..cad766c86 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/mobilenet_v4.py @@ -0,0 +1,836 @@ +# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2404.10518 + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn import BatchNorm, Conv2D, Dropout, Linear, Identity, Flatten +from paddle.regularizer import L2Decay + +from .custom_devices_layers import AdaptiveAvgPool2D +from ..base.theseus_layer import TheseusLayer +from ..model_zoo.vision_transformer import DropPath +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileNetV4_conv_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV4_conv_large_pretrained.pdparams", + "MobileNetV4_conv_medium": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV4_conv_medium_pretrained.pdparams", + "MobileNetV4_conv_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV4_conv_small_pretrained.pdparams", + "MobileNetV4_hybrid_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV4_hybrid_large_pretrained.pdparams", + "MobileNetV4_hybrid_medium": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV4_hybrid_medium_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) +STEM_CONV_NUMBER = 32 +LAST_CONV = 1280 + +NET_CONFIG = { + "conv_small": [ + # stage 0, 112x112 + # type, out, kernal_size, act, stride + ["cn", 32, 3, "relu", 2], + ["cn", 32, 1, "relu", 1], + # stage 1, 56x56 + ["cn", 96, 3, "relu", 2], + ["cn", 64, 1, "relu", 1], + # stage 2, 28x28 + # type, out, mid_c, first_kernal_size, mid_kernal_size, act, stride + ["uir", 96, 192, 5, 5, "relu", 2], + ["uir", 96, 192, 0, 3, "relu", 1], + ["uir", 96, 192, 0, 3, "relu", 1], + ["uir", 96, 192, 0, 3, "relu", 1], + ["uir", 96, 192, 0, 3, "relu", 1], + ["uir", 96, 384, 3, 0, "relu", 1], + # stage 3, 14x14 + ["uir", 128, 576, 3, 3, "relu", 2], + ["uir", 128, 512, 5, 5, "relu", 1], + ["uir", 128, 512, 0, 5, "relu", 1], + ["uir", 128, 384, 0, 5, "relu", 1], + ["uir", 128, 512, 0, 3, "relu", 1], + ["uir", 128, 512, 0, 3, "relu", 1], + # stage 4, 7x7 + ["cn", 960, 1, "relu", 1], + ], + "conv_medium": [ + # stage 0, 112x112 + ["er", 48, 128, 3, "relu", 2], + # stage 1, 56x56 + ["uir", 80, 192, 3, 5, "relu", 2], + ["uir", 80, 160, 3, 3, "relu", 1], + # stage 2, 28x28 + ["uir", 160, 480, 3, 5, "relu", 2], + ["uir", 160, 640, 3, 3, "relu", 1], + ["uir", 160, 640, 3, 3, "relu", 1], + ["uir", 160, 640, 3, 5, "relu", 1], + ["uir", 160, 640, 3, 3, "relu", 1], + ["uir", 160, 640, 3, 0, "relu", 1], + ["uir", 160, 320, 0, 0, "relu", 1], + ["uir", 160, 640, 3, 0, "relu", 1], + # stage 3, 14x14 + ["uir", 256, 960, 5, 5, "relu", 2], + ["uir", 256, 1024, 5, 5, "relu", 1], + ["uir", 256, 1024, 3, 5, "relu", 1], + ["uir", 256, 1024, 3, 5, "relu", 1], + ["uir", 256, 1024, 0, 0, "relu", 1], + ["uir", 256, 1024, 3, 0, "relu", 1], + ["uir", 256, 512, 3, 5, "relu", 1], + ["uir", 256, 1024, 5, 5, "relu", 1], + ["uir", 256, 1024, 0, 0, "relu", 1], + ["uir", 256, 1024, 0, 0, "relu", 1], + ["uir", 256, 512, 5, 0, "relu", 1], + # stage 4, 7x7 + ["cn", 960, 1, "relu", 1], + ], + "conv_large": [ + # stem_size = 24 + ["er", 48, 96, 3, "relu", 2], + # stage 1, 56x56 + ["uir", 96, 192, 3, 5, "relu", 2], + ["uir", 96, 384, 3, 3, "relu", 1], + # stage 2, 28x28 in + ["uir", 192, 384, 3, 5, "relu", 2], + ["uir", 192, 768, 3, 3, "relu", 1], + ["uir", 192, 768, 3, 3, "relu", 1], + ["uir", 192, 768, 3, 3, "relu", 1], + ["uir", 192, 768, 3, 5, "relu", 1], + ["uir", 192, 768, 5, 3, "relu", 1], + ["uir", 192, 768, 5, 3, "relu", 1], + ["uir", 192, 768, 5, 3, "relu", 1], + ["uir", 192, 768, 5, 3, "relu", 1], + ["uir", 192, 768, 5, 3, "relu", 1], + ["uir", 192, 768, 3, 0, "relu", 1], + # stage 3, 14x14 in + ["uir", 512, 768, 5, 5, "relu", 2], + ["uir", 512, 2048, 5, 5, "relu", 1], + ["uir", 512, 2048, 5, 5, "relu", 1], + ["uir", 512, 2048, 5, 5, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + ["uir", 512, 2048, 5, 3, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + ["uir", 512, 2048, 5, 3, "relu", 1], + ["uir", 512, 2048, 5, 5, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + ["uir", 512, 2048, 5, 0, "relu", 1], + # stage 4, 7x7 + ["cn", 960, 1, "relu", 1], + ], + "hybrid_medium": [ + # stem_size = 32 + ["er", 48, 128, 3, "relu", 2], + # stage 1, 56x56 + ["uir", 80, 192, 3, 5, "relu", 2], + ["uir", 80, 160, 3, 3, "relu", 1], + # stage 2, 28x28 + ["uir", 160, 480, 3, 5, "relu", 2], + ["uir", 160, 320, 0, 0, "relu", 1], + ["uir", 160, 640, 3, 3, "relu", 1], + ["uir", 160, 640, 3, 5, "relu", 1], + # type, out, kv_dim, kernal_size, kv_stride, act, stride + ["mqa", 160, 64, 3, 4, 2, "relu", 1], + ["uir", 160, 640, 3, 3, "relu", 1], + ["mqa", 160, 64, 3, 4, 2, "relu", 1], + ["uir", 160, 640, 3, 0, "relu", 1], + ["mqa", 160, 64, 3, 4, 2, "relu", 1], + ["uir", 160, 640, 3, 3, "relu", 1], + ["mqa", 160, 64, 3, 4, 2, "relu", 1], + ["uir", 160, 640, 3, 0, "relu", 1], + # stage 3, 14x14 + ["uir", 256, 960, 5, 5, "relu", 2], + ["uir", 256, 1024, 5, 5, "relu", 1], + ["uir", 256, 1024, 3, 5, "relu", 1], + ["uir", 256, 1024, 3, 5, "relu", 1], + ["uir", 256, 512, 0, 0, "relu", 1], + ["uir", 256, 512, 3, 5, "relu", 1], + ["uir", 256, 512, 0, 0, "relu", 1], + ["uir", 256, 1024, 0, 0, "relu", 1], + ["mqa", 256, 64, 3, 4, 1, "relu", 1], + ["uir", 256, 1024, 3, 0, "relu", 1], + ["mqa", 256, 64, 3, 4, 1, "relu", 1], + ["uir", 256, 1024, 5, 5, "relu", 1], + ["mqa", 256, 64, 3, 4, 1, "relu", 1], + ["uir", 256, 1024, 5, 0, "relu", 1], + ["mqa", 256, 64, 3, 4, 1, "relu", 1], + ["uir", 256, 1024, 5, 0, "relu", 1], + # stage 4, 7x7 + ["cn", 960, 1, "relu", 1], + ], + "hybrid_large": [ + # stem_size = 24 + ["er", 48, 96, 3, "gelu", 2], + # stage 1, 56x56 + ["uir", 96, 192, 3, 5, "gelu", 2], + ["uir", 96, 384, 3, 3, "gelu", 1], + # stage 2, 28x28 in + ["uir", 192, 384, 3, 5, "gelu", 2], + ["uir", 192, 768, 3, 3, "gelu", 1], + ["uir", 192, 768, 3, 3, "gelu", 1], + ["uir", 192, 768, 3, 3, "gelu", 1], + ["uir", 192, 768, 3, 5, "gelu", 1], + ["uir", 192, 768, 5, 3, "gelu", 1], + ["uir", 192, 768, 5, 3, "gelu", 1], + ["mqa", 192, 48, 3, 8, 2, "gelu", 1], + ["uir", 192, 768, 5, 3, "gelu", 1], + ["mqa", 192, 48, 3, 8, 2, "gelu", 1], + ["uir", 192, 768, 5, 3, "gelu", 1], + ["mqa", 192, 48, 3, 8, 2, "gelu", 1], + ["uir", 192, 768, 5, 3, "gelu", 1], + ["mqa", 192, 48, 3, 8, 2, "gelu", 1], + ["uir", 192, 768, 3, 0, "gelu", 1], + # stage 3, 14x14 + ["uir", 512, 768, 5, 5, "gelu", 2], + ["uir", 512, 2048, 5, 5, "gelu", 1], + ["uir", 512, 2048, 5, 5, "gelu", 1], + ["uir", 512, 2048, 5, 5, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["uir", 512, 2048, 5, 3, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["uir", 512, 2048, 5, 3, "gelu", 1], + ["uir", 512, 2048, 5, 5, "gelu", 1], + ["mqa", 512, 64, 3, 8, 1, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["mqa", 512, 64, 3, 8, 1, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["mqa", 512, 64, 3, 8, 1, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + ["mqa", 512, 64, 3, 8, 1, "gelu", 1], + ["uir", 512, 2048, 5, 0, "gelu", 1], + # stage 4, 7x7 + ["cn", 960, 1, "gelu", 1], + ] +} + +MODEL_STAGES_PATTERN = { + "MobileNetV4_conv_small": + ["blocks[1]", "blocks[3]", "blocks[9]", "blocks[15]", "blocks[16]"], + "MobileNetV4_conv_medium": + ["blocks[0]", "blocks[2]", "blocks[10]", "blocks[21]", "blocks[22]"], + "MobileNetV4_conv_large": + ["blocks[0]", "blocks[2]", "blocks[13]", "blocks[26]", "blocks[27]"], + "MobileNetV4_hybrid_medium": + ["blocks[0]", "blocks[2]", "blocks[14]", "blocks[30]", "blocks[31]"], + "MobileNetV4_hybrid_large": + ["blocks[0]", "blocks[2]", "blocks[17]", "blocks[35]", "blocks[36]"], +} + + +def _make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act == "gelu": + return nn.GELU(approximate=False) + elif act is None: + return None + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class ConvBnAct(TheseusLayer): + def __init__(self, + in_c, + out_c, + filter_size, + stride, + padding, + num_groups=1, + drop_path_rate=0.0, + if_act=True, + act=None): + super().__init__() + + self.drop_path_rate = drop_path_rate + self.conv = Conv2D( + in_channels=in_c, + out_channels=out_c, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias_attr=False) + self.bn = BatchNorm( + num_channels=out_c, + act=None, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.if_act = if_act + if self.if_act: + self.act = _create_act(act) + if self.drop_path_rate > 0: + self.drop_path = DropPath(drop_path_rate) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self.act(x) + if self.drop_path_rate > 0: + x = self.drop_path(x) + return x + + +class EdgeResidual(TheseusLayer): + def __init__(self, + in_c, + mid_c, + out_c, + filter_size, + stride, + drop_path_rate=0.0, + if_act=False, + act=None): + super(EdgeResidual, self).__init__() + + self.if_shortcut = stride == 1 and in_c == out_c + self.conv_exp = ConvBnAct( + in_c=in_c, + out_c=mid_c, + filter_size=filter_size, + stride=stride, + padding=int((filter_size - 1) // 2), + if_act=True, + act=act) + self.conv_pwl = ConvBnAct( + in_c=mid_c, + out_c=out_c, + filter_size=1, + stride=1, + padding=0, + if_act=False, + act=act) + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate else Identity() + + def forward(self, x): + identity = x + x = self.conv_exp(x) + x = self.conv_pwl(x) + if self.if_shortcut: + x = paddle.add(identity, self.drop_path(x)) + return x + + +class UniversalInvertedResidual(TheseusLayer): + def __init__(self, + in_c, + mid_c, + out_c, + filter_size, + stem_kernel_size=None, + stride=1, + drop_path_rate=0.0, + layer_scale_init_value=0.0, + if_act=False, + act=None): + super().__init__() + + self.if_shortcut = stride == 1 and in_c == out_c + self.layer_scale_init_value = layer_scale_init_value + if stem_kernel_size: + self.dw_start = ConvBnAct( + in_c=in_c, + out_c=in_c, + filter_size=stem_kernel_size, + stride=1, + padding=int((stem_kernel_size - 1) // 2), + num_groups=in_c, + if_act=False, + act=None) + else: + self.dw_start = Identity() + + self.pw_exp = ConvBnAct( + in_c=in_c, + out_c=mid_c, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + act=act) + + if filter_size: + self.dw_mid = ConvBnAct( + in_c=mid_c, + out_c=mid_c, + filter_size=filter_size, + stride=stride, + padding=int((filter_size - 1) // 2), + num_groups=mid_c, + if_act=True, + act=act) + else: + self.dw_mid = Identity() + self.pw_proj = ConvBnAct( + in_c=mid_c, + out_c=out_c, + filter_size=1, + stride=1, + padding=0, + if_act=False, + act=None) + if layer_scale_init_value > 0.0: + self.layer_scale = LayerScale2D(out_c, layer_scale_init_value) + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate else Identity() + + def forward(self, x): + identity = x + x = self.dw_start(x) + x = self.pw_exp(x) + x = self.dw_mid(x) + x = self.pw_proj(x) + if self.layer_scale_init_value > 0.0: + x = self.layer_scale(x) + if self.if_shortcut: + x = paddle.add(identity, self.drop_path(x)) + return x + + +class LayerScale2D(nn.Layer): + def __init__(self, dim, init_values=1e-05, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = paddle.create_parameter( + shape=[dim], + dtype='float32', + default_initializer=nn.initializer.Constant(init_values)) + + def forward(self, x): + gamma = self.gamma.reshape([1, -1, 1, 1]) + return (x.multiply_(y=paddle.to_tensor(gamma)) + if self.inplace else x * gamma) + + +class MobileAttention(nn.Layer): + def __init__(self, + in_c, + out_c, + filter_size=3, + stride=1, + num_head=4, + query_dim=256, + kv_dim=64, + kv_stride=1, + drop_path_rate=0.0, + attn_drop_rate=0.0, + dropout_prob=0.0, + layer_scale_init_value=0.0, + if_act=True, + act=None, + use_fused_attn=False): + super(MobileAttention, self).__init__() + + self.if_shortcut = stride == 1 and in_c == out_c + self.kv_stride = kv_stride + self.kv_dim = kv_dim + self.num_head = num_head + self.query_dim = query_dim + self.attn_drop_rate = attn_drop_rate + self.use_fused_attn = use_fused_attn + self.norm = BatchNorm( + num_channels=in_c, + act=None, + param_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.query_proj = Conv2D( + in_channels=in_c, + out_channels=query_dim, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias_attr=False) + if kv_stride > 1: + self.key_down_proj = ConvBnAct( + in_c=in_c, + out_c=in_c, + filter_size=filter_size, + stride=kv_stride, + padding=int((filter_size - 1) // 2), + num_groups=in_c, + if_act=False) + self.value_down_proj = ConvBnAct( + in_c=in_c, + out_c=in_c, + filter_size=filter_size, + stride=kv_stride, + padding=int((filter_size - 1) // 2), + num_groups=in_c, + if_act=False) + self.key_proj = Conv2D( + in_channels=in_c, + out_channels=kv_dim, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias_attr=False) + self.value_proj = Conv2D( + in_channels=in_c, + out_channels=kv_dim, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias_attr=False) + self.proj = Conv2D( + in_channels=query_dim, + out_channels=out_c, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias_attr=False) + if not self.use_fused_attn: + self.scale = query_dim**-0.5 + self.softmax = nn.Softmax(-1) + self.attn_drop = Dropout(self.attn_drop_rate) + self.drop = Dropout(dropout_prob) + self.layer_scale_init_value = layer_scale_init_value + if layer_scale_init_value > 0.0: + self.layer_scale = LayerScale2D(out_c, layer_scale_init_value) + self.drop_path = (DropPath(drop_path_rate) + if drop_path_rate else Identity()) + + def forward(self, x, attn_mask=None): + identity = x + x = self.norm(x) + B, C, H, W = tuple(x.shape) + q = self.query_proj(x).reshape( + [B, self.num_head, self.query_dim // self.num_head, H * W]) + q = q.transpose([0, 3, 1, 2]) + if self.kv_stride > 1: + k = self.key_proj(self.key_down_proj(x)) + v = self.value_proj(self.value_down_proj(x)) + else: + k = self.key_proj(x) + v = self.value_proj(x) + k = k.reshape( + [B, self.kv_dim, 1, H // self.kv_stride * W // self.kv_stride]) + k = k.transpose([0, 3, 2, 1]) + v = v.reshape( + [B, self.kv_dim, 1, H // self.kv_stride * W // self.kv_stride]) + v = v.transpose([0, 3, 2, 1]) + if self.use_fused_attn: + attn = F.scaled_dot_product_attention( + query=q, + key=k, + value=v, + attn_mask=attn_mask, + dropout_p=self.attn_drop_rate if self.training else 0.0) + else: + q = q.transpose([0, 2, 1, 3]) * self.scale + v = v.transpose([0, 2, 1, 3]) + attn = q @ k.transpose([0, 2, 3, 1]) + attn = self.softmax(attn) + attn = self.attn_drop(attn) + attn = attn @ v + attn = attn.transpose([0, 2, 1, 3]) + + attn = attn.reshape([B, H, W, self.query_dim]) + x = self.proj(attn.transpose([0, 3, 1, 2])) + x = self.drop(x) + if self.layer_scale_init_value > 0.0: + x = self.layer_scale(x) + if self.if_shortcut: + x = paddle.add(identity, self.drop_path(x)) + return x + + +class MobileNetV4(TheseusLayer): + """ + MobileNetV4 + Args: + config: list. MobileNetV4 depthwise blocks config. + stages_pattern: list. The pattern of each stage blocks. + scale: float=1.0. The coefficient that controls the size of network parameters. + class_num: int=1000. The number of classes. + inplanes: int=32. The output channel number of first convolution layer. + act: str="relu". The activation function. + class_expand: int=960. The output channel number of last convolution layer. + drop_path_rate: float=0.0. Probability of dropping path. + drop_rate: float=0.0. Probability of setting units to zero. + Returns: + model: nn.Layer. Specific MobileNetV4 model depends on args. + """ + def __init__(self, + config, + stages_pattern, + scale=1.0, + class_num=1000, + inplanes=STEM_CONV_NUMBER, + class_expand=LAST_CONV, + act="relu", + drop_path_rate=0.0, + drop_rate=0.0, + layer_scale_init_value=0.0, + return_patterns=None, + return_stages=None, + use_fused_attn=False, + **kwargs): + super(MobileNetV4, self).__init__() + self.cfg = config + self.scale = scale + self.drop_path_rate = drop_path_rate + self.inplanes = inplanes + self.class_expand = class_expand + self.class_num = class_num + self.conv_stem = ConvBnAct( + in_c=3, + out_c=_make_divisible(self.inplanes * self.scale), + filter_size=3, + stride=2, + padding=1, + if_act=True, + act=act) + + blocks = [] + block_count = len(self.cfg) + for i in range(block_count): + type = self.cfg[i][0] + if type == "cn": + _, exp, k, act, s = self.cfg[i] + block = ConvBnAct( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 + else self.cfg[i - 1][1] * self.scale), + out_c=_make_divisible(exp * self.scale), + filter_size=k, + stride=s, + padding=int((k - 1) // 2), + num_groups=1, + drop_path_rate=self.drop_path_rate * i / block_count, + if_act=True, + act=act) + elif type == "uir": + _, c, exp, k_start, k, act, s = self.cfg[i] + block = UniversalInvertedResidual( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 + else self.cfg[i - 1][1] * self.scale), + mid_c=_make_divisible(self.scale * exp), + out_c=_make_divisible(self.scale * c), + filter_size=k, + stem_kernel_size=k_start, + stride=s, + drop_path_rate=self.drop_path_rate * i / block_count, + layer_scale_init_value=layer_scale_init_value, + if_act=True, + act=act) + elif type == "er": + _, c, exp, k, act, s = self.cfg[i] + block = EdgeResidual( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 + else self.cfg[i - 1][1] * self.scale), + mid_c=_make_divisible(self.scale * exp), + out_c=_make_divisible(self.scale * c), + filter_size=k, + stride=s, + drop_path_rate=self.drop_path_rate * i / block_count, + if_act=True, + act=act) + elif type == "mqa": + # type, out,kv_dim, kernal_size, kv_stride, act, stride + _, c, dim, k, head, kv_stride, act, s = self.cfg[i] + block = MobileAttention( + in_c=_make_divisible(self.inplanes * self.scale if i == 0 + else self.cfg[i - 1][1] * self.scale), + out_c=_make_divisible(self.scale * c), + filter_size=k, + stride=s, + num_head=head, + query_dim=_make_divisible(self.scale * head * dim), + kv_dim=_make_divisible(self.scale * dim), + kv_stride=kv_stride, + drop_path_rate=self.drop_path_rate * i / block_count, + layer_scale_init_value=layer_scale_init_value, + if_act=True, + act=act, + use_fused_attn=use_fused_attn) + blocks.append(block) + self.blocks = nn.Sequential(*blocks) + self.global_pool = AdaptiveAvgPool2D(1) + self.conv_head = ConvBnAct( + in_c=_make_divisible(self.scale * self.cfg[-1][1]), + out_c=self.class_expand, + filter_size=1, + stride=1, + padding=0, + if_act=True, + act=act) + self.flatten = Flatten(start_axis=1, stop_axis=-1) + self.dropout = Dropout(drop_rate) + self.classifier = Linear(self.class_expand, + class_num) if class_num > 0 else Identity() + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits=False): + x = self.global_pool(x) + x = self.conv_head(x) + x = self.flatten(x) + if pre_logits: + return x + return self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError("pretrained type is not available. ") + + +def MobileNetV4_conv_small(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV4_conv_small + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV4_conv_small` model depends on args. + """ + model = MobileNetV4( + config=NET_CONFIG["conv_small"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV4_conv_small"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV4_conv_small"], + use_ssld) + return model + + +def MobileNetV4_conv_medium(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV4_conv_medium + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV4_conv_medium` model depends on args. + """ + model = MobileNetV4( + config=NET_CONFIG["conv_medium"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV4_conv_medium"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV4_conv_medium"], + use_ssld) + return model + + +def MobileNetV4_conv_large(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV4_conv_large + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV4_conv_large` model depends on args. + """ + model = MobileNetV4( + config=NET_CONFIG["conv_large"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV4_conv_large"], + inplanes=24, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV4_conv_large"], + use_ssld) + return model + + +def MobileNetV4_hybrid_medium(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV4_hybrid_medium + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV4_hybrid_medium` model depends on args. + """ + model = MobileNetV4( + config=NET_CONFIG["hybrid_medium"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV4_hybrid_medium"], + layer_scale_init_value=1e-05, + **kwargs) + _load_pretrained(pretrained, model, + MODEL_URLS["MobileNetV4_hybrid_medium"], use_ssld) + return model + + +def MobileNetV4_hybrid_large(pretrained=False, use_ssld=False, **kwargs): + """ + MobileNetV4_hybrid_large + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `MobileNetV4_hybrid_large` model depends on args. + """ + model = MobileNetV4( + config=NET_CONFIG["hybrid_large"], + scale=1.0, + stages_pattern=MODEL_STAGES_PATTERN["MobileNetV4_hybrid_large"], + inplanes=24, + act="gelu", + layer_scale_init_value=1e-05, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MobileNetV4_hybrid_large"], + use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet.py new file mode 100644 index 000000000..04a936f85 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet.py @@ -0,0 +1,376 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import KaimingNormal, Constant +from paddle.nn import Conv2D, BatchNorm2D, ReLU, MaxPool2D +from .custom_devices_layers import AdaptiveAvgPool2D +from paddle.regularizer import L2Decay +from paddle import ParamAttr + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PPHGNet_tiny": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams", + "PPHGNet_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams", + "PPHGNet_base": "" +} + +__all__ = list(MODEL_URLS.keys()) + +kaiming_normal_ = KaimingNormal() +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class ConvBNAct(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + use_act=True): + super().__init__() + self.use_act = use_act + self.conv = Conv2D( + in_channels, + out_channels, + kernel_size, + stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias_attr=False) + self.bn = BatchNorm2D( + out_channels, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + if self.use_act: + self.act = ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.use_act: + x = self.act(x) + return x + + +class ESEModule(TheseusLayer): + def __init__(self, channels): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv = Conv2D( + in_channels=channels, + out_channels=channels, + kernel_size=1, + stride=1, + padding=0) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv(x) + x = self.sigmoid(x) + return paddle.multiply(x=identity, y=x) + + +class HG_Block(TheseusLayer): + def __init__( + self, + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False, ): + super().__init__() + self.identity = identity + + self.layers = nn.LayerList() + self.layers.append( + ConvBNAct( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + for _ in range(layer_num - 1): + self.layers.append( + ConvBNAct( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + + # feature aggregation + total_channels = in_channels + layer_num * mid_channels + self.aggregation_conv = ConvBNAct( + in_channels=total_channels, + out_channels=out_channels, + kernel_size=1, + stride=1) + self.att = ESEModule(out_channels) + + def forward(self, x): + identity = x + output = [] + output.append(x) + for layer in self.layers: + x = layer(x) + output.append(x) + x = paddle.concat(output, axis=1) + x = self.aggregation_conv(x) + x = self.att(x) + if self.identity: + x += identity + return x + + +class HG_Stage(TheseusLayer): + def __init__(self, + in_channels, + mid_channels, + out_channels, + block_num, + layer_num, + downsample=True): + super().__init__() + self.downsample = downsample + if downsample: + self.downsample = ConvBNAct( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=2, + groups=in_channels, + use_act=False) + + blocks_list = [] + blocks_list.append( + HG_Block( + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False)) + for _ in range(block_num - 1): + blocks_list.append( + HG_Block( + out_channels, + mid_channels, + out_channels, + layer_num, + identity=True)) + self.blocks = nn.Sequential(*blocks_list) + + def forward(self, x): + if self.downsample: + x = self.downsample(x) + x = self.blocks(x) + return x + + +class PPHGNet(TheseusLayer): + """ + PPHGNet + Args: + stem_channels: list. Stem channel list of PPHGNet. + stage_config: dict. The configuration of each stage of PPHGNet. such as the number of channels, stride, etc. + layer_num: int. Number of layers of HG_Block. + use_last_conv: boolean. Whether to use a 1x1 convolutional layer before the classification layer. + class_expand: int=2048. Number of channels for the last 1x1 convolutional layer. + dropout_prob: float. Parameters of dropout, 0.0 means dropout is not used. + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific PPHGNet model depends on args. + """ + + def __init__(self, + stem_channels, + stage_config, + layer_num, + use_last_conv=True, + class_expand=2048, + dropout_prob=0.0, + class_num=1000, + **kwargs): + super().__init__() + self.use_last_conv = use_last_conv + self.class_expand = class_expand + + # stem + stem_channels.insert(0, 3) + self.stem = nn.Sequential(* [ + ConvBNAct( + in_channels=stem_channels[i], + out_channels=stem_channels[i + 1], + kernel_size=3, + stride=2 if i == 0 else 1) for i in range( + len(stem_channels) - 1) + ]) + self.pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + + # stages + self.stages = nn.LayerList() + for k in stage_config: + in_channels, mid_channels, out_channels, block_num, downsample = stage_config[ + k] + self.stages.append( + HG_Stage(in_channels, mid_channels, out_channels, block_num, + layer_num, downsample)) + + self.avg_pool = AdaptiveAvgPool2D(1) + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=out_channels, + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = nn.ReLU() + self.dropout = nn.Dropout( + p=dropout_prob, mode="downscale_in_infer") + + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + self.fc = nn.Linear(self.class_expand + if self.use_last_conv else out_channels, class_num) + + self._init_weights() + + def _init_weights(self): + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2D)): + ones_(m.weight) + zeros_(m.bias) + elif isinstance(m, nn.Linear): + zeros_(m.bias) + + def forward(self, x): + x = self.stem(x) + x = self.pool(x) + + for stage in self.stages: + x = stage(x) + + x = self.avg_pool(x) + if self.use_last_conv: + x = self.last_conv(x) + x = self.act(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPHGNet_tiny(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNet_tiny + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_tiny` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [96, 96, 224, 1, False], + "stage2": [224, 128, 448, 1, True], + "stage3": [448, 160, 512, 2, True], + "stage4": [512, 192, 768, 1, True], + } + + model = PPHGNet( + stem_channels=[48, 48, 96], + stage_config=stage_config, + layer_num=5, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_tiny"], use_ssld) + return model + + +def PPHGNet_small(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNet_small + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_small` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [128, 128, 256, 1, False], + "stage2": [256, 160, 512, 1, True], + "stage3": [512, 192, 768, 2, True], + "stage4": [768, 224, 1024, 1, True], + } + + model = PPHGNet( + stem_channels=[64, 64, 128], + stage_config=stage_config, + layer_num=6, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_small"], use_ssld) + return model + + +def PPHGNet_base(pretrained=False, use_ssld=True, **kwargs): + """ + PPHGNet_base + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_base` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [160, 192, 320, 1, False], + "stage2": [320, 224, 640, 2, True], + "stage3": [640, 256, 960, 3, True], + "stage4": [960, 288, 1280, 2, True], + } + + model = PPHGNet( + stem_channels=[96, 96, 160], + stage_config=stage_config, + layer_num=7, + dropout_prob=0.2, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_base"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet_v2.py new file mode 100644 index 000000000..3f554c82c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_hgnet_v2.py @@ -0,0 +1,706 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import KaimingNormal, Constant +from paddle.nn import Conv2D, BatchNorm2D, ReLU, AdaptiveAvgPool2D, MaxPool2D +from paddle.regularizer import L2Decay +from paddle import ParamAttr + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PPHGNetV2_B0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B0_ssld_pretrained.pdparams", + "PPHGNetV2_B1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B1_ssld_pretrained.pdparams", + "PPHGNetV2_B2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B2_ssld_pretrained.pdparams", + "PPHGNetV2_B3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B3_ssld_pretrained.pdparams", + "PPHGNetV2_B4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B4_ssld_pretrained.pdparams", + "PPHGNetV2_B5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B5_ssld_pretrained.pdparams", + "PPHGNetV2_B6": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNetV2_B6_ssld_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +kaiming_normal_ = KaimingNormal() +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class LearnableAffineBlock(TheseusLayer): + """ + Create a learnable affine block module. This module can significantly improve accuracy on smaller models. + + Args: + scale_value (float): The initial value of the scale parameter, default is 1.0. + bias_value (float): The initial value of the bias parameter, default is 0.0. + lr_mult (float): The learning rate multiplier, default is 1.0. + lab_lr (float): The learning rate, default is 0.01. + """ + + def __init__(self, + scale_value=1.0, + bias_value=0.0, + lr_mult=1.0, + lab_lr=0.01): + super().__init__() + self.scale = self.create_parameter( + shape=[1, ], + default_initializer=Constant(value=scale_value), + attr=ParamAttr(learning_rate=lr_mult * lab_lr)) + self.add_parameter("scale", self.scale) + self.bias = self.create_parameter( + shape=[1, ], + default_initializer=Constant(value=bias_value), + attr=ParamAttr(learning_rate=lr_mult * lab_lr)) + self.add_parameter("bias", self.bias) + + def forward(self, x): + return self.scale * x + self.bias + + +class ConvBNAct(TheseusLayer): + """ + ConvBNAct is a combination of convolution and batchnorm layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Size of the convolution kernel. Defaults to 3. + stride (int): Stride of the convolution. Defaults to 1. + padding (int/str): Padding or padding type for the convolution. Defaults to 1. + groups (int): Number of groups for the convolution. Defaults to 1. + use_act: (bool): Whether to use activation function. Defaults to True. + use_lab (bool): Whether to use the LAB operation. Defaults to False. + lr_mult (float): Learning rate multiplier for the layer. Defaults to 1.0. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + groups=1, + use_act=True, + use_lab=False, + lr_mult=1.0): + super().__init__() + self.use_act = use_act + self.use_lab = use_lab + self.conv = Conv2D( + in_channels, + out_channels, + kernel_size, + stride, + padding=padding + if isinstance(padding, str) else (kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False) + self.bn = BatchNorm2D( + out_channels, + weight_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult), + bias_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult)) + if self.use_act: + self.act = ReLU() + if self.use_lab: + self.lab = LearnableAffineBlock(lr_mult=lr_mult) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.use_act: + x = self.act(x) + if self.use_lab: + x = self.lab(x) + return x + + +class LightConvBNAct(TheseusLayer): + """ + LightConvBNAct is a combination of pw and dw layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Size of the depth-wise convolution kernel. + use_lab (bool): Whether to use the LAB operation. Defaults to False. + lr_mult (float): Learning rate multiplier for the layer. Defaults to 1.0. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + use_lab=False, + lr_mult=1.0, + **kwargs): + super().__init__() + self.conv1 = ConvBNAct( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + use_act=False, + use_lab=use_lab, + lr_mult=lr_mult) + self.conv2 = ConvBNAct( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + groups=out_channels, + use_act=True, + use_lab=use_lab, + lr_mult=lr_mult) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class StemBlock(TheseusLayer): + """ + StemBlock for PP-HGNetV2. + + Args: + in_channels (int): Number of input channels. + mid_channels (int): Number of middle channels. + out_channels (int): Number of output channels. + use_lab (bool): Whether to use the LAB operation. Defaults to False. + lr_mult (float): Learning rate multiplier for the layer. Defaults to 1.0. + """ + + def __init__(self, + in_channels, + mid_channels, + out_channels, + use_lab=False, + lr_mult=1.0): + super().__init__() + self.stem1 = ConvBNAct( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=3, + stride=2, + use_lab=use_lab, + lr_mult=lr_mult) + self.stem2a = ConvBNAct( + in_channels=mid_channels, + out_channels=mid_channels // 2, + kernel_size=2, + stride=1, + padding="SAME", + use_lab=use_lab, + lr_mult=lr_mult) + self.stem2b = ConvBNAct( + in_channels=mid_channels // 2, + out_channels=mid_channels, + kernel_size=2, + stride=1, + padding="SAME", + use_lab=use_lab, + lr_mult=lr_mult) + self.stem3 = ConvBNAct( + in_channels=mid_channels * 2, + out_channels=mid_channels, + kernel_size=3, + stride=2, + use_lab=use_lab, + lr_mult=lr_mult) + self.stem4 = ConvBNAct( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + use_lab=use_lab, + lr_mult=lr_mult) + self.pool = nn.MaxPool2D( + kernel_size=2, stride=1, ceil_mode=True, padding="SAME") + + def forward(self, x): + x = self.stem1(x) + x2 = self.stem2a(x) + x2 = self.stem2b(x2) + x1 = self.pool(x) + x = paddle.concat([x1, x2], 1) + x = self.stem3(x) + x = self.stem4(x) + + return x + + +class HGV2_Block(TheseusLayer): + """ + HGV2_Block, the basic unit that constitutes the HGV2_Stage. + + Args: + in_channels (int): Number of input channels. + mid_channels (int): Number of middle channels. + out_channels (int): Number of output channels. + kernel_size (int): Size of the convolution kernel. Defaults to 3. + layer_num (int): Number of layers in the HGV2 block. Defaults to 6. + stride (int): Stride of the convolution. Defaults to 1. + padding (int/str): Padding or padding type for the convolution. Defaults to 1. + groups (int): Number of groups for the convolution. Defaults to 1. + use_act (bool): Whether to use activation function. Defaults to True. + use_lab (bool): Whether to use the LAB operation. Defaults to False. + lr_mult (float): Learning rate multiplier for the layer. Defaults to 1.0. + """ + + def __init__(self, + in_channels, + mid_channels, + out_channels, + kernel_size=3, + layer_num=6, + identity=False, + light_block=True, + use_lab=False, + lr_mult=1.0): + super().__init__() + self.identity = identity + + self.layers = nn.LayerList() + block_type = "LightConvBNAct" if light_block else "ConvBNAct" + for i in range(layer_num): + self.layers.append( + eval(block_type)(in_channels=in_channels + if i == 0 else mid_channels, + out_channels=mid_channels, + stride=1, + kernel_size=kernel_size, + use_lab=use_lab, + lr_mult=lr_mult)) + # feature aggregation + total_channels = in_channels + layer_num * mid_channels + self.aggregation_squeeze_conv = ConvBNAct( + in_channels=total_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + use_lab=use_lab, + lr_mult=lr_mult) + self.aggregation_excitation_conv = ConvBNAct( + in_channels=out_channels // 2, + out_channels=out_channels, + kernel_size=1, + stride=1, + use_lab=use_lab, + lr_mult=lr_mult) + + def forward(self, x): + identity = x + output = [] + output.append(x) + for layer in self.layers: + x = layer(x) + output.append(x) + x = paddle.concat(output, axis=1) + x = self.aggregation_squeeze_conv(x) + x = self.aggregation_excitation_conv(x) + if self.identity: + x += identity + return x + + +class HGV2_Stage(TheseusLayer): + """ + HGV2_Stage, the basic unit that constitutes the PPHGNetV2. + + Args: + in_channels (int): Number of input channels. + mid_channels (int): Number of middle channels. + out_channels (int): Number of output channels. + block_num (int): Number of blocks in the HGV2 stage. + layer_num (int): Number of layers in the HGV2 block. Defaults to 6. + is_downsample (bool): Whether to use downsampling operation. Defaults to False. + light_block (bool): Whether to use light block. Defaults to True. + kernel_size (int): Size of the convolution kernel. Defaults to 3. + use_lab (bool, optional): Whether to use the LAB operation. Defaults to False. + lr_mult (float, optional): Learning rate multiplier for the layer. Defaults to 1.0. + """ + + def __init__(self, + in_channels, + mid_channels, + out_channels, + block_num, + layer_num=6, + is_downsample=True, + light_block=True, + kernel_size=3, + use_lab=False, + lr_mult=1.0): + + super().__init__() + self.is_downsample = is_downsample + if self.is_downsample: + self.downsample = ConvBNAct( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=2, + groups=in_channels, + use_act=False, + use_lab=use_lab, + lr_mult=lr_mult) + + blocks_list = [] + for i in range(block_num): + blocks_list.append( + HGV2_Block( + in_channels=in_channels if i == 0 else out_channels, + mid_channels=mid_channels, + out_channels=out_channels, + kernel_size=kernel_size, + layer_num=layer_num, + identity=False if i == 0 else True, + light_block=light_block, + use_lab=use_lab, + lr_mult=lr_mult)) + self.blocks = nn.Sequential(*blocks_list) + + def forward(self, x): + if self.is_downsample: + x = self.downsample(x) + x = self.blocks(x) + return x + + +class PPHGNetV2(TheseusLayer): + """ + PPHGNetV2 + + Args: + stage_config (dict): Config for PPHGNetV2 stages. such as the number of channels, stride, etc. + stem_channels: (list): Number of channels of the stem of the PPHGNetV2. + use_lab (bool): Whether to use the LAB operation. Defaults to False. + use_last_conv (bool): Whether to use the last conv layer as the output channel. Defaults to True. + class_expand (int): Number of channels for the last 1x1 convolutional layer. + drop_prob (float): Dropout probability for the last 1x1 convolutional layer. Defaults to 0.0. + class_num (int): The number of classes for the classification layer. Defaults to 1000. + lr_mult_list (list): Learning rate multiplier for the stages. Defaults to [1.0, 1.0, 1.0, 1.0, 1.0]. + Returns: + model: nn.Layer. Specific PPHGNetV2 model depends on args. + """ + + def __init__(self, + stage_config, + stem_channels=[3, 32, 64], + use_lab=False, + use_last_conv=True, + class_expand=2048, + dropout_prob=0.0, + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + **kwargs): + super().__init__() + self.use_lab = use_lab + self.use_last_conv = use_last_conv + self.class_expand = class_expand + self.class_num = class_num + + # stem + self.stem = StemBlock( + in_channels=stem_channels[0], + mid_channels=stem_channels[1], + out_channels=stem_channels[2], + use_lab=use_lab, + lr_mult=lr_mult_list[0]) + + # stages + self.stages = nn.LayerList() + for i, k in enumerate(stage_config): + in_channels, mid_channels, out_channels, block_num, is_downsample, light_block, kernel_size, layer_num = stage_config[ + k] + self.stages.append( + HGV2_Stage( + in_channels, + mid_channels, + out_channels, + block_num, + layer_num, + is_downsample, + light_block, + kernel_size, + use_lab, + lr_mult=lr_mult_list[i + 1])) + + self.avg_pool = AdaptiveAvgPool2D(1) + + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=out_channels, + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = ReLU() + if self.use_lab: + self.lab = LearnableAffineBlock() + self.dropout = nn.Dropout( + p=dropout_prob, mode="downscale_in_infer") + + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + self.fc = nn.Linear(self.class_expand if self.use_last_conv else + out_channels, self.class_num) + + self._init_weights() + + def _init_weights(self): + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2D)): + ones_(m.weight) + zeros_(m.bias) + elif isinstance(m, nn.Linear): + zeros_(m.bias) + + def forward(self, x): + x = self.stem(x) + + for stage in self.stages: + x = stage(x) + x = self.avg_pool(x) + + if self.use_last_conv: + x = self.last_conv(x) + x = self.act(x) + if self.use_lab: + x = self.lab(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain( + model, + model_url, + use_ssld=use_ssld, + use_ssld_stage1_pretrained=True) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPHGNetV2_B0(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B0 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B0` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [16, 16, 64, 1, False, False, 3, 3], + "stage2": [64, 32, 256, 1, True, False, 3, 3], + "stage3": [256, 64, 512, 2, True, True, 5, 3], + "stage4": [512, 128, 1024, 1, True, True, 5, 3], + } + + model = PPHGNetV2( + stem_channels=[3, 16, 16], + stage_config=stage_config, + use_lab=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B0"], use_ssld) + return model + + +def PPHGNetV2_B1(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B1 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B1` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 64, 1, False, False, 3, 3], + "stage2": [64, 48, 256, 1, True, False, 3, 3], + "stage3": [256, 96, 512, 2, True, True, 5, 3], + "stage4": [512, 192, 1024, 1, True, True, 5, 3], + } + + model = PPHGNetV2( + stem_channels=[3, 24, 32], + stage_config=stage_config, + use_lab=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B1"], use_ssld) + return model + + +def PPHGNetV2_B2(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B2 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B2` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 96, 1, False, False, 3, 4], + "stage2": [96, 64, 384, 1, True, False, 3, 4], + "stage3": [384, 128, 768, 3, True, True, 5, 4], + "stage4": [768, 256, 1536, 1, True, True, 5, 4], + } + + model = PPHGNetV2( + stem_channels=[3, 24, 32], + stage_config=stage_config, + use_lab=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B2"], use_ssld) + return model + + +def PPHGNetV2_B3(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B3 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B3` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 128, 1, False, False, 3, 5], + "stage2": [128, 64, 512, 1, True, False, 3, 5], + "stage3": [512, 128, 1024, 3, True, True, 5, 5], + "stage4": [1024, 256, 2048, 1, True, True, 5, 5], + } + + model = PPHGNetV2( + stem_channels=[3, 24, 32], + stage_config=stage_config, + use_lab=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B3"], use_ssld) + return model + + +def PPHGNetV2_B4(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B4 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B4` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [48, 48, 128, 1, False, False, 3, 6], + "stage2": [128, 96, 512, 1, True, False, 3, 6], + "stage3": [512, 192, 1024, 3, True, True, 5, 6], + "stage4": [1024, 384, 2048, 1, True, True, 5, 6], + } + + model = PPHGNetV2( + stem_channels=[3, 32, 48], + stage_config=stage_config, + use_lab=False, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B4"], use_ssld) + return model + + +def PPHGNetV2_B5(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B5 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B5` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [64, 64, 128, 1, False, False, 3, 6], + "stage2": [128, 128, 512, 2, True, False, 3, 6], + "stage3": [512, 256, 1024, 5, True, True, 5, 6], + "stage4": [1024, 512, 2048, 2, True, True, 5, 6], + } + + model = PPHGNetV2( + stem_channels=[3, 32, 64], + stage_config=stage_config, + use_lab=False, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B5"], use_ssld) + return model + + +def PPHGNetV2_B6(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNetV2_B6 + Args: + pretrained (bool/str): If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld (bool) Whether using ssld pretrained model when pretrained is True. + Returns: + model: nn.Layer. Specific `PPHGNetV2_B6` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, num_blocks, is_downsample, light_block, kernel_size, layer_num + "stage1": [96, 96, 192, 2, False, False, 3, 6], + "stage2": [192, 192, 512, 3, True, False, 3, 6], + "stage3": [512, 384, 1024, 6, True, True, 5, 6], + "stage4": [1024, 768, 2048, 3, True, True, 5, 6], + } + + model = PPHGNetV2( + stem_channels=[3, 48, 96], + stage_config=stage_config, + use_lab=False, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNetV2_B6"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet.py new file mode 100644 index 000000000..cc672b0f4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet.py @@ -0,0 +1,520 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +from paddle import ParamAttr +from paddle.nn import BatchNorm2D, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import KaimingNormal + +from .custom_devices_layers import AdaptiveAvgPool2D +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PPLCNet_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams", + "PPLCNet_x0_35": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams", + "PPLCNet_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams", + "PPLCNet_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams", + "PPLCNet_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams", + "PPLCNet_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams", + "PPLCNet_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams", + "PPLCNet_x2_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams" +} + +MODEL_STAGES_PATTERN = { + "PPLCNet": ["blocks2", "blocks3", "blocks4", "blocks5", "blocks6"] +} + +__all__ = list(MODEL_URLS.keys()) + +# Each element(list) represents a depthwise block, which is composed of k, in_c, out_c, s, use_se. +# k: kernel_size +# in_c: input channel number in depthwise block +# out_c: output channel number in depthwise block +# s: stride in depthwise block +# use_se: whether to use SE block + +NET_CONFIG = { + # [k, in_c, out_c, s, use_se] + "blocks2": [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]], + "blocks5": [[3, 128, 256, 2, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True]] +} + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act == "relu6": + return nn.ReLU6() + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +def _create_model_urls(model_scale): + model_scale_str = "PPLCNet_x" + str(model_scale).replace('.', '_') + if model_scale_str in MODEL_URLS: + return MODEL_URLS[model_scale_str] + else: + return None + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + num_groups=1, + lr_mult=1.0, + act="hardswish"): + super().__init__() + + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=num_groups, + weight_attr=ParamAttr( + initializer=KaimingNormal(), learning_rate=lr_mult), + bias_attr=False) + + self.bn = BatchNorm2D( + num_filters, + weight_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult), + bias_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult)) + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.act(x) + return x + + +class DepthwiseSeparable(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + dw_size=3, + use_se=False, + lr_mult=1.0, + act="hardswish"): + super().__init__() + self.use_se = use_se + self.dw_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_channels, + filter_size=dw_size, + stride=stride, + num_groups=num_channels, + lr_mult=lr_mult, + act=act) + if use_se: + self.se = SEModule(num_channels, lr_mult=lr_mult) + self.pw_conv = ConvBNLayer( + num_channels=num_channels, + filter_size=1, + num_filters=num_filters, + stride=1, + lr_mult=lr_mult, + act=act) + + def forward(self, x): + x = self.dw_conv(x) + if self.use_se: + x = self.se(x) + x = self.pw_conv(x) + return x + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4, lr_mult=1.0): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult)) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult)) + self.hardsigmoid = nn.Hardsigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = paddle.multiply(x=identity, y=x) + return x + + +class PPLCNet(TheseusLayer): + def __init__(self, + scale=1.0, + class_num=1000, + dropout_prob=0.2, + class_expand=1280, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + stride_list=[2, 2, 2, 2, 2], + use_last_conv=True, + act="hardswish", + **kwargs): + super().__init__(**kwargs) + self.scale = scale + self.class_expand = class_expand + self.lr_mult_list = lr_mult_list + self.use_last_conv = use_last_conv + self.stride_list = stride_list + self.net_config = NET_CONFIG + if isinstance(self.lr_mult_list, str): + self.lr_mult_list = eval(self.lr_mult_list) + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 6, "lr_mult_list length should be 6 but got {}".format( + len(self.lr_mult_list)) + + assert isinstance(self.stride_list, ( + list, tuple + )), "stride_list should be in (list, tuple) but got {}".format( + type(self.stride_list)) + assert len(self.stride_list + ) == 5, "stride_list length should be 5 but got {}".format( + len(self.stride_list)) + + for i, stride in enumerate(stride_list[1:]): + self.net_config["blocks{}".format(i + 3)][0][3] = stride + self.conv1 = ConvBNLayer( + num_channels=3, + filter_size=3, + num_filters=make_divisible(16 * scale), + stride=stride_list[0], + lr_mult=self.lr_mult_list[0], + act=act) + + self.blocks2 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + lr_mult=self.lr_mult_list[1], + act=act) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks2"]) + ]) + + self.blocks3 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + lr_mult=self.lr_mult_list[2], + act=act) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks3"]) + ]) + + self.blocks4 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + lr_mult=self.lr_mult_list[3], + act=act) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks4"]) + ]) + + self.blocks5 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + lr_mult=self.lr_mult_list[4], + act=act) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks5"]) + ]) + + self.blocks6 = nn.Sequential(*[ + DepthwiseSeparable( + num_channels=make_divisible(in_c * scale), + num_filters=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + lr_mult=self.lr_mult_list[5], + act=act) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks6"]) + ]) + + self.avg_pool = AdaptiveAvgPool2D(1) + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=make_divisible(self.net_config["blocks6"][-1][2] * + scale), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = _create_act(act) + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + else: + self.last_conv = None + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + self.fc = Linear( + self.class_expand if self.use_last_conv else + make_divisible(self.net_config["blocks6"][-1][2] * scale), + class_num) + + def forward(self, x): + x = self.conv1(x) + + x = self.blocks2(x) + x = self.blocks3(x) + x = self.blocks4(x) + x = self.blocks5(x) + x = self.blocks6(x) + + x = self.avg_pool(x) + if self.last_conv is not None: + x = self.last_conv(x) + x = self.act(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPLCNetBaseNet(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNetBaseNet + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. + """ + if "scale" in kwargs: + scale = kwargs["scale"] + kwargs.pop("scale") + else: + scale = 1.0 + + model = PPLCNet(scale=scale, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + model_url = _create_model_urls(scale) + _load_pretrained(pretrained, model, model_url, use_ssld) + return model + + +def PPLCNet_x0_25(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_25 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_25` model depends on args. + """ + model = PPLCNet( + scale=0.25, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_25"], use_ssld) + return model + + +def PPLCNet_x0_35(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_35 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_35` model depends on args. + """ + model = PPLCNet( + scale=0.35, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_35"], use_ssld) + return model + + +def PPLCNet_x0_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_5` model depends on args. + """ + model = PPLCNet( + scale=0.5, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_5"], use_ssld) + return model + + +def PPLCNet_x0_75(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x0_75 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x0_75` model depends on args. + """ + model = PPLCNet( + scale=0.75, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x0_75"], use_ssld) + return model + + +def PPLCNet_x1_0(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x1_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x1_0` model depends on args. + """ + model = PPLCNet( + scale=1.0, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x1_0"], use_ssld) + return model + + +def PPLCNet_x1_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x1_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x1_5` model depends on args. + """ + model = PPLCNet( + scale=1.5, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x1_5"], use_ssld) + return model + + +def PPLCNet_x2_0(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x2_0 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x2_0` model depends on args. + """ + model = PPLCNet( + scale=2.0, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x2_0"], use_ssld) + return model + + +def PPLCNet_x2_5(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNet_x2_5 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNet_x2_5` model depends on args. + """ + model = PPLCNet( + scale=2.5, stages_pattern=MODEL_STAGES_PATTERN["PPLCNet"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNet_x2_5"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet_v2.py new file mode 100644 index 000000000..85be0a63f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/pp_lcnet_v2.py @@ -0,0 +1,417 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import KaimingNormal + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PPLCNetV2_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_small_pretrained.pdparams", + "PPLCNetV2_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams", + "PPLCNetV2_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_large_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +NET_CONFIG = { + # in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut + "stage1": [64, 3, False, False, False, False], + "stage2": [128, 3, False, False, False, False], + "stage3": [256, 5, True, True, True, False], + "stage4": [512, 5, False, True, False, True], +} + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + +def _create_act(act): + if act == "hardswish": + return nn.Hardswish() + elif act == "relu": + return nn.ReLU() + elif act == "relu6": + return nn.ReLU6() + elif act == "prelu": + return nn.PReLU() + elif act == "leaky_relu": + return nn.LeakyReLU(negative_slope=0.1) + else: + raise RuntimeError( + "The activation function is not supported: {}".format(act)) + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + act="relu"): + super().__init__() + self.act = act + self.conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + self.bn = BatchNorm2D( + out_channels, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + if self.act is not None: + self.act = _create_act(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.act(x) + return x + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4, act='relu'): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.act = _create_act(act) + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.act(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = paddle.multiply(x=identity, y=x) + return x + + +class RepDepthwiseSeparable(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + stride, + dw_size=3, + split_pw=False, + use_rep=False, + use_se=False, + use_shortcut=False, + act="relu"): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.is_repped = False + + self.dw_size = dw_size + self.split_pw = split_pw + self.use_rep = use_rep + self.use_se = use_se + self.use_shortcut = True if use_shortcut and stride == 1 and in_channels == out_channels else False + + if self.use_rep: + self.dw_conv_list = nn.LayerList() + for kernel_size in range(self.dw_size, 0, -2): + if kernel_size == 1 and stride != 1: + continue + dw_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + groups=in_channels, + act=None) + self.dw_conv_list.append(dw_conv) + self.dw_conv = nn.Conv2D( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + padding=(dw_size - 1) // 2, + groups=in_channels) + else: + self.dw_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + groups=in_channels) + + self.act = _create_act(act) + + if use_se: + self.se = SEModule(in_channels, act=act) + + if self.split_pw: + pw_ratio = 0.5 + self.pw_conv_1 = ConvBNLayer( + in_channels=in_channels, + kernel_size=1, + out_channels=int(out_channels * pw_ratio), + stride=1, + act=act) + self.pw_conv_2 = ConvBNLayer( + in_channels=int(out_channels * pw_ratio), + kernel_size=1, + out_channels=out_channels, + stride=1, + act=act) + else: + self.pw_conv = ConvBNLayer( + in_channels=in_channels, + kernel_size=1, + out_channels=out_channels, + stride=1, + act=act) + + def forward(self, x): + if self.use_rep: + input_x = x + if self.is_repped: + x = self.act(self.dw_conv(x)) + else: + y = self.dw_conv_list[0](x) + for dw_conv in self.dw_conv_list[1:]: + y += dw_conv(x) + x = self.act(y) + else: + x = self.dw_conv(x) + + if self.use_se: + x = self.se(x) + if self.split_pw: + x = self.pw_conv_1(x) + x = self.pw_conv_2(x) + else: + x = self.pw_conv(x) + if self.use_shortcut: + x = x + input_x + return x + + def re_parameterize(self): + if self.use_rep: + self.is_repped = True + kernel, bias = self._get_equivalent_kernel_bias() + self.dw_conv.weight.set_value(kernel) + self.dw_conv.bias.set_value(bias) + + def _get_equivalent_kernel_bias(self): + kernel_sum = 0 + bias_sum = 0 + for dw_conv in self.dw_conv_list: + kernel, bias = self._fuse_bn_tensor(dw_conv) + kernel = self._pad_tensor(kernel, to_size=self.dw_size) + kernel_sum += kernel + bias_sum += bias + return kernel_sum, bias_sum + + def _fuse_bn_tensor(self, branch): + kernel = branch.conv.weight + running_mean = branch.bn._mean + running_var = branch.bn._variance + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn._epsilon + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + def _pad_tensor(self, tensor, to_size): + from_size = tensor.shape[-1] + if from_size == to_size: + return tensor + pad = (to_size - from_size) // 2 + return F.pad(tensor, [pad, pad, pad, pad]) + + +class PPLCNetV2(TheseusLayer): + def __init__(self, + scale, + depths, + class_num=1000, + dropout_prob=0, + use_last_conv=True, + class_expand=1280, + act="relu", + **kwargs): + super().__init__(**kwargs) + self.scale = scale + self.use_last_conv = use_last_conv + self.class_expand = class_expand + + self.stem = nn.Sequential(* [ + ConvBNLayer( + in_channels=3, + kernel_size=3, + out_channels=make_divisible(32 * scale), + stride=2, + act=act), RepDepthwiseSeparable( + in_channels=make_divisible(32 * scale), + out_channels=make_divisible(64 * scale), + stride=1, + dw_size=3, + act=act) + ]) + + # stages + self.stages = nn.LayerList() + for depth_idx, k in enumerate(NET_CONFIG): + in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut = NET_CONFIG[ + k] + self.stages.append( + nn.Sequential(* [ + RepDepthwiseSeparable( + in_channels=make_divisible((in_channels if i == 0 else + in_channels * 2) * scale), + out_channels=make_divisible(in_channels * 2 * scale), + stride=2 if i == 0 else 1, + dw_size=kernel_size, + split_pw=split_pw, + use_rep=use_rep, + use_se=use_se, + use_shortcut=use_shortcut, + act=act) + for i in range(depths[depth_idx]) + ])) + + self.avg_pool = AdaptiveAvgPool2D(1) + + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=make_divisible(NET_CONFIG["stage4"][0] * 2 * + scale), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = _create_act(act) + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + in_features = self.class_expand if self.use_last_conv else make_divisible( + NET_CONFIG["stage4"][0] * 2 * scale) + self.fc = Linear(in_features, class_num) + + def forward(self, x): + x = self.stem(x) + for stage in self.stages: + x = stage(x) + x = self.avg_pool(x) + if self.use_last_conv: + x = self.last_conv(x) + x = self.act(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPLCNetV2_small(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNetV2_small + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNetV2_base` model depends on args. + """ + model = PPLCNetV2( + scale=0.75, depths=[2, 2, 4, 2], dropout_prob=0.2, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_small"], + use_ssld) + return model + + +def PPLCNetV2_base(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNetV2_base + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNetV2_base` model depends on args. + """ + model = PPLCNetV2( + scale=1.0, depths=[2, 2, 6, 2], dropout_prob=0.2, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_base"], use_ssld) + return model + + +def PPLCNetV2_large(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNetV2_large + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNetV2_base` model depends on args. + """ + model = PPLCNetV2( + scale=1.25, depths=[2, 2, 8, 2], dropout_prob=0.2, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_large"], + use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/resnet.py new file mode 100644 index 000000000..e38206ee4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/resnet.py @@ -0,0 +1,653 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/pdf/1512.03385 + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, BatchNorm2D +from paddle.nn import MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +from paddle.regularizer import L2Decay +import math + +from .custom_devices_layers import AdaptiveAvgPool2D +from ....utils import logger +from ..base.theseus_layer import TheseusLayer +from ..base.dbb.dbb_block import DiverseBranchBlock +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_dbb": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_dbb_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = { + "ResNet18": ["blocks[1]", "blocks[3]", "blocks[5]", "blocks[7]"], + "ResNet34": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet50": ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"], + "ResNet101": ["blocks[2]", "blocks[6]", "blocks[29]", "blocks[32]"], + "ResNet152": ["blocks[2]", "blocks[10]", "blocks[46]", "blocks[49]"], + "ResNet200": ["blocks[2]", "blocks[14]", "blocks[62]", "blocks[65]"] +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, + stride=stride, + padding="SAME", + ceil_mode=True, + data_format=data_format) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=1 if is_vd_mode else stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + layer=ConvBNLayer, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.conv0 = layer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = layer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = layer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + layer=ConvBNLayer, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = layer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = layer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + version="vb", + stem_act="relu", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + stride_list=[2, 2, 2, 2, 2], + max_pool=True, + data_format="NCHW", + input_image_channel=3, + return_patterns=None, + return_stages=None, + layer_type="ConvBNLayer", + use_first_short_conv=True, + **kargs): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.stride_list = stride_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + if layer_type == "ConvBNLayer": + layer = ConvBNLayer + elif layer_type == "DiverseBranchBlock": + layer = DiverseBranchBlock + else: + raise Exception() + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + if len(self.lr_mult_list) != 5: + msg = "lr_mult_list length should be 5 but got {}, default lr_mult_list used".format( + len(self.lr_mult_list)) + logger.warning(msg) + self.lr_mult_list = [1.0, 1.0, 1.0, 1.0, 1.0] + + assert isinstance(self.stride_list, ( + list, tuple + )), "stride_list should be in (list, tuple) but got {}".format( + type(self.stride_list)) + assert len(self.stride_list + ) == 5, "stride_list length should be 5 but got {}".format( + len(self.stride_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, self.stride_list[0]]], + "vd": [[input_image_channel, 32, 3, self.stride_list[0]], + [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(*[ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act=stem_act, + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = max_pool + if max_pool: + self.max_pool = MaxPool2D( + kernel_size=3, + stride=stride_list[1], + padding=1, + data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + # paddleclas' special improvement version + shortcut = False + # official resnet_vb version + if not use_first_short_conv and block_idx == 0: + shortcut = True + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=self.stride_list[block_idx + 1] + if i == 0 and (block_idx != 0 or not max_pool) else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + layer=layer, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + return self._forward(x) + + def _forward(self, x): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + if self.max_pool: + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, + use_ssld=False, + layer_type="ConvBNLayer", + **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vb", + layer_type=layer_type, + **kwargs) + if layer_type == "DiverseBranchBlock": + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_dbb"], + use_ssld) + else: + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["18"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet18"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["34"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet34"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["50"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet50"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["101"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet101"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vb", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["152"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet152"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet( + config=NET_CONFIG["200"], + stages_pattern=MODEL_STAGES_PATTERN["ResNet200"], + version="vd", + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/swin_transformer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/swin_transformer.py new file mode 100644 index 000000000..9a464e9dd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/swin_transformer.py @@ -0,0 +1,1002 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/microsoft/Swin-Transformer +# reference: https://arxiv.org/abs/2103.14030 + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant + +from ..model_zoo.vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain +from ....utils import logger + + +MODEL_URLS = { + "SwinTransformer_tiny_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_small_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_small_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_base_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_base_patch4_window12_384_pretrained.pdparams", + "SwinTransformer_large_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_large_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_large_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SwinTransformer_large_patch4_window12_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +Linear = nn.Linear + + +def masked_fill(x, mask, value): + y = paddle.full(x.shape, value, x.dtype) + return paddle.where(mask, y, x) + + +def check_support_fused_op(use_fused_linear): + if use_fused_linear: + if paddle.device.cuda.get_device_capability()[0] >= 8: + return True + else: + logger.warning("The current device don't support Fused OP! Using the general Linear instead.") + return False + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def pading_for_not_divisible(pixel_values, + height, + width, + patch_size, + format="BCHW", + function="split"): + if isinstance(patch_size, int): + patch_size = (patch_size, patch_size) + if height % patch_size[0] == 0 and width % patch_size[1] == 0: + return pixel_values, (0, 0, 0, 0, 0, 0, 0, 0) + if function == "split": + pading_width = patch_size[1] - width % patch_size[1] + pading_height = patch_size[0] - height % patch_size[0] + elif function == "merge": + pading_width = width % 2 + pading_height = height % 2 + if format == "BCHW": + pad_index = (0, 0, 0, 0, 0, pading_height, 0, pading_width) + elif format == "BHWC": + pad_index = (0, 0, 0, pading_height, 0, pading_width, 0, 0) + else: + assert ("vaild format") + + return F.pad(pixel_values, pad_index), pad_index + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.reshape( + [B, H // window_size, window_size, W // window_size, window_size, C]) + windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape( + [-1, window_size, window_size, C]) + return windows + + +def window_reverse(windows, window_size, H, W, C): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + x = windows.reshape( + [-1, H // window_size, W // window_size, window_size, window_size, C]) + x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C]) + return x + + +class WindowAttention(nn.Layer): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + use_fused_attn=False): + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + # 2*Wh-1 * 2*Ww-1, nH + self.relative_position_bias_table = self.create_parameter( + shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads), + default_initializer=zeros_) + self.add_parameter("relative_position_bias_table", + self.relative_position_bias_table) + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(self.window_size[0]) + coords_w = paddle.arange(self.window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + + coords_flatten_1 = coords_flatten.unsqueeze(axis=2) + coords_flatten_2 = coords_flatten.unsqueeze(axis=1) + relative_coords = coords_flatten_1 - coords_flatten_2 + + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + + self.register_buffer("relative_position_index", + relative_position_index) + + self.qkv = Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table) + self.softmax = nn.Softmax(axis=-1) + + self.use_fused_attn = use_fused_attn + + def eval(self, ): + # this is used to re-param swin for model export + relative_position_bias_table = self.relative_position_bias_table + window_size = self.window_size + index = self.relative_position_index.reshape([-1]) + + relative_position_bias = paddle.index_select( + relative_position_bias_table, index) + relative_position_bias = relative_position_bias.reshape([ + window_size[0] * window_size[1], window_size[0] * window_size[1], + -1 + ]) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + relative_position_bias = relative_position_bias.unsqueeze(0) + self.register_buffer("relative_position_bias", relative_position_bias) + + def get_relative_position_bias(self): + if self.training or not hasattr(self, "relative_position_bias"): + index = self.relative_position_index.reshape([-1]) + + relative_position_bias = paddle.index_select( + self.relative_position_bias_table, index) + relative_position_bias = relative_position_bias.reshape([ + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], -1 + ]) # Wh*Ww,Wh*Ww,nH + + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + else: + return self.relative_position_bias + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape( + [B_, N, 3, self.num_heads, C // self.num_heads]) + + if self.use_fused_attn: + qkv = qkv.transpose((2, 0, 1, 3, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_mask = self.get_relative_position_bias() + if mask is not None: + nW = mask.shape[0] + mask = mask.reshape((1, nW, 1, N, N)).expand((B_ // nW, -1, self.num_heads, -1, -1)) + attn_mask = attn_mask + mask.reshape((-1, self.num_heads, N, N)) + attn_mask = attn_mask.expand((B_, -1, -1, -1)) + attn = paddle.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0., attn_mask=attn_mask) + else: + qkv = qkv.transpose((2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + attn = paddle.mm(q, k.transpose([0, 1, 3, 2])) + attn = attn + self.get_relative_position_bias() + + if mask is not None: + nW = mask.shape[0] + attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N + ]) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.reshape([B_, self.num_heads, N, N]) + attn = self.softmax(attn) + attn = self.attn_drop(attn) + attn = paddle.mm(attn, v) + attn = attn.transpose([0, 2, 1, 3]) + x = attn.reshape([B_, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self): + return "dim={}, window_size={}, num_heads={}".format( + self.dim, self.window_size, self.num_heads) + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Layer): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + use_fused_attn=False): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + use_fused_attn=use_fused_attn) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + H, W = self.input_resolution + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def get_attn_mask(self, height, width, dtype): + if self.shift_size > 0: + # calculate attention mask for shifted window multihead self attention + img_mask = paddle.zeros((1, height, width, 1), dtype=dtype) + height_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), ) + width_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), ) + count = 0 + for height_slice in height_slices: + for width_slice in width_slices: + img_mask[:, height_slice, width_slice, :] = count + count += 1 + + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.reshape( + (-1, self.window_size * self.window_size)) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = masked_fill(attn_mask, attn_mask != 0, float(-100.0)) + attn_mask = masked_fill(attn_mask, attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def forward(self, x, input_dimensions): + H, W = input_dimensions + B, L, C = x.shape + + shortcut = x + x = self.norm1(x) + x = x.reshape([B, H, W, C]) + + x, pad_values = pading_for_not_divisible(x, H, W, self.window_size, + "BHWC") + _, height_pad, width_pad, _ = x.shape + + padding_state = pad_values[3] > 0 or pad_values[ + 5] > 0 # change variable name + # cyclic shift + if self.shift_size > 0: + shifted_x = paddle.roll( + x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.reshape( + [-1, self.window_size * self.window_size, + C]) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + #check did it need to calculate again + attn_mask = self.get_attn_mask(height_pad, width_pad, x.dtype) + + attn_windows = self.attn( + x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.reshape( + [-1, self.window_size, self.window_size, C]) + shifted_x = window_reverse(attn_windows, self.window_size, height_pad, + width_pad, C) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = paddle.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + axis=(1, 2)) + else: + x = shifted_x + + if padding_state: + x = x[:, :H, :W, :] + x = x.reshape([B, H * W, C]) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self): + return "dim={}, input_resolution={}, num_heads={}, window_size={}, shift_size={}, mlp_ratio={}".format( + self.dim, self.input_resolution, self.num_heads, self.window_size, + self.shift_size, self.mlp_ratio) + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Layer): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, input_dimensions): + """ + x: B, H*W, C + """ + H, W = input_dimensions + B, L, C = x.shape + x = x.reshape([B, H, W, C]) + x, _ = pading_for_not_divisible(x, H, W, 2, "BHWC", function="merge") + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = paddle.reshape(x, x.shape) + x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + + # x = x.reshape([B, H // 2, 2, W // 2, 2, C]) + # x = x.transpose((0, 1, 3, 4, 2, 5)) + + x = x.reshape([B, -1, 4 * C]) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self): + return "input_resolution={}, dim={}".format(self.input_resolution, + self.dim) + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Layer): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + use_fused_attn=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.LayerList([ + SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + use_fused_attn=use_fused_attn) for i in range(depth) + ]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, input_dimensions): + H, W = input_dimensions + for blk in self.blocks: + x = blk(x, input_dimensions) + if self.downsample is not None: + H, W = (H + 1) // 2, (W + 1) // 2 + x = self.downsample(x, input_dimensions) + return x, (H, W) + + def extra_repr(self): + return "dim={}, input_resolution={}, depth={}".format( + self.dim, self.input_resolution, self.depth) + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Layer, optional): Normalization layer. Default: None + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + x, _ = pading_for_not_divisible(x, H, W, self.patch_size, "BCHW") + x = self.proj(x) + _, _, height, width = x.shape + output_dimensions = (height, width) + x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x, output_dimensions + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * ( + self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformer(TheseusLayer): + """ Swin Transformer + A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + use_checkpoint=False, + **kwargs): + super(SwinTransformer, self).__init__() + + self.num_classes = num_classes = class_num + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2**(self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + use_fused_attn = check_support_fused_op(kwargs.get('use_fused_attn', False)) + use_fused_linear = kwargs.get('use_fused_linear', False) + global Linear + Linear = paddle.incubate.nn.FusedLinear if use_fused_linear else nn.Linear + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = self.create_parameter( + shape=(1, num_patches, embed_dim), default_initializer=zeros_) + self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) + trunc_normal_(self.absolute_pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = np.linspace(0, drop_path_rate, + sum(depths)).tolist() # stochastic depth decay rule + + # build layers + self.layers = nn.LayerList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + input_resolution=(patches_resolution[0] // (2**i_layer), + patches_resolution[1] // (2**i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging + if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + use_fused_attn=use_fused_attn) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1D(1) + + self.head = Linear( + self.num_features, + num_classes) if self.num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Linear, paddle.incubate.nn.FusedLinear)): + trunc_normal_(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x, output_dimensions = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x, output_dimensions = layer(x, output_dimensions) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose([0, 2, 1])) # B C 1 + x = paddle.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for _, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[ + 0] * self.patches_resolution[1] // (2**self.num_layers) + flops += self.num_features * self.num_classes + return flops + + +def _load_pretrained(pretrained, + model, + model_url, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain( + model, + model_url, + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SwinTransformer_tiny_patch4_window7_224( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.2, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.1 + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_tiny_patch4_window7_224"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformer_small_patch4_window7_224( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.3, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.2 + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_small_patch4_window7_224"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformer_base_patch4_window7_224( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + model = SwinTransformer( + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + drop_path_rate=0.5, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.2 + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window7_224"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformer_base_patch4_window12_384( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.5, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.2 + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window12_384"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformer_large_patch4_window7_224( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=True, + **kwargs): + model = SwinTransformer( + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window7_224"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformer_large_patch4_window12_384( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=True, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window12_384"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/vgg.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/vgg.py new file mode 100644 index 000000000..8a0f0156e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/legendary_models/vgg.py @@ -0,0 +1,261 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1409.1556 + +from __future__ import absolute_import, division, print_function + +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import MaxPool2D + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "VGG11": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams", + "VGG13": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams", + "VGG16": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams", + "VGG19": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams", +} + +MODEL_STAGES_PATTERN = { + "VGG": [ + "conv_block_1", "conv_block_2", "conv_block_3", "conv_block_4", + "conv_block_5" + ] +} + +__all__ = MODEL_URLS.keys() + +# VGG config +# key: VGG network depth +# value: conv num in different blocks +NET_CONFIG = { + 11: [1, 1, 2, 2, 2], + 13: [2, 2, 2, 2, 2], + 16: [2, 2, 3, 3, 3], + 19: [2, 2, 4, 4, 4] +} + + +class ConvBlock(TheseusLayer): + def __init__(self, input_channels, output_channels, groups): + super().__init__() + + self.groups = groups + self.conv1 = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 2 or groups == 3 or groups == 4: + self.conv2 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 3 or groups == 4: + self.conv3 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if groups == 4: + self.conv4 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + + self.max_pool = MaxPool2D(kernel_size=2, stride=2, padding=0) + self.relu = nn.ReLU() + + def forward(self, inputs): + x = self.conv1(inputs) + x = self.relu(x) + if self.groups == 2 or self.groups == 3 or self.groups == 4: + x = self.conv2(x) + x = self.relu(x) + if self.groups == 3 or self.groups == 4: + x = self.conv3(x) + x = self.relu(x) + if self.groups == 4: + x = self.conv4(x) + x = self.relu(x) + x = self.max_pool(x) + return x + + +class VGGNet(TheseusLayer): + """ + VGGNet + Args: + config: list. VGGNet config. + stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False` + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific VGG model depends on args. + """ + + def __init__(self, + config, + stages_pattern, + stop_grad_layers=0, + class_num=1000, + return_patterns=None, + return_stages=None): + super().__init__() + + self.stop_grad_layers = stop_grad_layers + + self.conv_block_1 = ConvBlock(3, 64, config[0]) + self.conv_block_2 = ConvBlock(64, 128, config[1]) + self.conv_block_3 = ConvBlock(128, 256, config[2]) + self.conv_block_4 = ConvBlock(256, 512, config[3]) + self.conv_block_5 = ConvBlock(512, 512, config[4]) + + self.relu = nn.ReLU() + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + + for idx, block in enumerate([ + self.conv_block_1, self.conv_block_2, self.conv_block_3, + self.conv_block_4, self.conv_block_5 + ]): + if self.stop_grad_layers >= idx + 1: + for param in block.parameters(): + param.trainable = False + + self.drop = Dropout(p=0.5, mode="downscale_in_infer") + self.fc1 = Linear(7 * 7 * 512, 4096) + self.fc2 = Linear(4096, 4096) + self.fc3 = Linear(4096, class_num) + + super().init_res( + stages_pattern, + return_patterns=return_patterns, + return_stages=return_stages) + + def forward(self, inputs): + x = self.conv_block_1(inputs) + x = self.conv_block_2(x) + x = self.conv_block_3(x) + x = self.conv_block_4(x) + x = self.conv_block_5(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.relu(x) + x = self.drop(x) + x = self.fc2(x) + x = self.relu(x) + x = self.drop(x) + x = self.fc3(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def VGG11(pretrained=False, use_ssld=False, **kwargs): + """ + VGG11 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG11` model depends on args. + """ + model = VGGNet( + config=NET_CONFIG[11], + stages_pattern=MODEL_STAGES_PATTERN["VGG"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG11"], use_ssld) + return model + + +def VGG13(pretrained=False, use_ssld=False, **kwargs): + """ + VGG13 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG13` model depends on args. + """ + model = VGGNet( + config=NET_CONFIG[13], + stages_pattern=MODEL_STAGES_PATTERN["VGG"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG13"], use_ssld) + return model + + +def VGG16(pretrained=False, use_ssld=False, **kwargs): + """ + VGG16 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG16` model depends on args. + """ + model = VGGNet( + config=NET_CONFIG[16], + stages_pattern=MODEL_STAGES_PATTERN["VGG"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG16"], use_ssld) + return model + + +def VGG19(pretrained=False, use_ssld=False, **kwargs): + """ + VGG19 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `VGG19` model depends on args. + """ + model = VGGNet( + config=NET_CONFIG[19], + stages_pattern=MODEL_STAGES_PATTERN["VGG"], + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["VGG19"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/adaface_ir_net.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/adaface_ir_net.py new file mode 100644 index 000000000..47de152b6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/adaface_ir_net.py @@ -0,0 +1,529 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# this code is based on AdaFace(https://github.com/mk-minchul/AdaFace) +from collections import namedtuple +import paddle +import paddle.nn as nn +from paddle.nn import Dropout +from paddle.nn import MaxPool2D +from paddle.nn import Sequential +from paddle.nn import Conv2D, Linear +from paddle.nn import BatchNorm1D, BatchNorm2D +from paddle.nn import ReLU, Sigmoid +from paddle.nn import Layer +from paddle.nn import PReLU + +# from ppcls.arch.backbone.legendary_models.resnet import _load_pretrained + + +class Flatten(Layer): + """ Flat tensor + """ + + def forward(self, input): + return paddle.reshape(input, [input.shape[0], -1]) + + +class LinearBlock(Layer): + """ Convolution block without no-linear activation layer + """ + + def __init__(self, + in_c, + out_c, + kernel=(1, 1), + stride=(1, 1), + padding=(0, 0), + groups=1): + super(LinearBlock, self).__init__() + self.conv = Conv2D( + in_c, + out_c, + kernel, + stride, + padding, + groups=groups, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=None) + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + self.bn = BatchNorm2D( + out_c, weight_attr=weight_attr, bias_attr=bias_attr) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class GNAP(Layer): + """ Global Norm-Aware Pooling block + """ + + def __init__(self, in_c): + super(GNAP, self).__init__() + self.bn1 = BatchNorm2D(in_c, weight_attr=False, bias_attr=False) + self.pool = nn.AdaptiveAvgPool2D((1, 1)) + self.bn2 = BatchNorm1D(in_c, weight_attr=False, bias_attr=False) + + def forward(self, x): + x = self.bn1(x) + x_norm = paddle.norm(x, 2, 1, True) + x_norm_mean = paddle.mean(x_norm) + weight = x_norm_mean / x_norm + x = x * weight + x = self.pool(x) + x = x.view(x.shape[0], -1) + feature = self.bn2(x) + return feature + + +class GDC(Layer): + """ Global Depthwise Convolution block + """ + + def __init__(self, in_c, embedding_size): + super(GDC, self).__init__() + self.conv_6_dw = LinearBlock( + in_c, + in_c, + groups=in_c, + kernel=(7, 7), + stride=(1, 1), + padding=(0, 0)) + self.conv_6_flatten = Flatten() + self.linear = Linear( + in_c, + embedding_size, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False) + self.bn = BatchNorm1D( + embedding_size, weight_attr=False, bias_attr=False) + + def forward(self, x): + x = self.conv_6_dw(x) + x = self.conv_6_flatten(x) + x = self.linear(x) + x = self.bn(x) + return x + + +class SELayer(Layer): + """ SE block + """ + + def __init__(self, channels, reduction): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2D(1) + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierUniform()) + self.fc1 = Conv2D( + channels, + channels // reduction, + kernel_size=1, + padding=0, + weight_attr=weight_attr, + bias_attr=False) + + self.relu = ReLU() + self.fc2 = Conv2D( + channels // reduction, + channels, + kernel_size=1, + padding=0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False) + + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + + return module_input * x + + +class BasicBlockIR(Layer): + """ BasicBlock for IRNet + """ + + def __init__(self, in_channel, depth, stride): + super(BasicBlockIR, self).__init__() + + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + if in_channel == depth: + self.shortcut_layer = MaxPool2D(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2D( + in_channel, + depth, (1, 1), + stride, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + self.res_layer = Sequential( + BatchNorm2D( + in_channel, weight_attr=weight_attr, bias_attr=bias_attr), + Conv2D( + in_channel, + depth, (3, 3), (1, 1), + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr), + PReLU(depth), + Conv2D( + depth, + depth, (3, 3), + stride, + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BottleneckIR(Layer): + """ BasicBlock with bottleneck for IRNet + """ + + def __init__(self, in_channel, depth, stride): + super(BottleneckIR, self).__init__() + reduction_channel = depth // 4 + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + if in_channel == depth: + self.shortcut_layer = MaxPool2D(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2D( + in_channel, + depth, (1, 1), + stride, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + self.res_layer = Sequential( + BatchNorm2D( + in_channel, weight_attr=weight_attr, bias_attr=bias_attr), + Conv2D( + in_channel, + reduction_channel, (1, 1), (1, 1), + 0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + reduction_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + PReLU(reduction_channel), + Conv2D( + reduction_channel, + reduction_channel, (3, 3), (1, 1), + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + reduction_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + PReLU(reduction_channel), + Conv2D( + reduction_channel, + depth, (1, 1), + stride, + 0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BasicBlockIRSE(BasicBlockIR): + def __init__(self, in_channel, depth, stride): + super(BasicBlockIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_sublayer("se_block", SELayer(depth, 16)) + + +class BottleneckIRSE(BottleneckIR): + def __init__(self, in_channel, depth, stride): + super(BottleneckIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_sublayer("se_block", SELayer(depth, 16)) + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + '''A named tuple describing a ResNet block.''' + + +def get_block(in_channel, depth, num_units, stride=2): + + return [Bottleneck(in_channel, depth, stride)] +\ + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 18: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=2), get_block( + in_channel=64, depth=128, num_units=2), get_block( + in_channel=128, depth=256, num_units=2), get_block( + in_channel=256, depth=512, num_units=2) + ] + elif num_layers == 34: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=4), get_block( + in_channel=128, depth=256, num_units=6), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 50: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=4), get_block( + in_channel=128, depth=256, num_units=14), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=13), get_block( + in_channel=128, depth=256, num_units=30), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block( + in_channel=64, depth=256, num_units=3), get_block( + in_channel=256, depth=512, num_units=8), get_block( + in_channel=512, depth=1024, num_units=36), get_block( + in_channel=1024, depth=2048, num_units=3) + ] + elif num_layers == 200: + blocks = [ + get_block( + in_channel=64, depth=256, num_units=3), get_block( + in_channel=256, depth=512, num_units=24), get_block( + in_channel=512, depth=1024, num_units=36), get_block( + in_channel=1024, depth=2048, num_units=3) + ] + + return blocks + + +class Backbone(Layer): + def __init__(self, input_size, num_layers, mode='ir'): + """ Args: + input_size: input_size of backbone + num_layers: num_layers of backbone + mode: support ir or irse + """ + super(Backbone, self).__init__() + assert input_size[0] in [112, 224], \ + "input_size should be [112, 112] or [224, 224]" + assert num_layers in [18, 34, 50, 100, 152, 200], \ + "num_layers should be 18, 34, 50, 100 or 152" + assert mode in ['ir', 'ir_se'], \ + "mode should be ir or ir_se" + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + self.input_layer = Sequential( + Conv2D( + 3, + 64, (3, 3), + 1, + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + 64, weight_attr=weight_attr, bias_attr=bias_attr), + PReLU(64)) + blocks = get_blocks(num_layers) + if num_layers <= 100: + if mode == 'ir': + unit_module = BasicBlockIR + elif mode == 'ir_se': + unit_module = BasicBlockIRSE + output_channel = 512 + else: + if mode == 'ir': + unit_module = BottleneckIR + elif mode == 'ir_se': + unit_module = BottleneckIRSE + output_channel = 2048 + + if input_size[0] == 112: + self.output_layer = Sequential( + BatchNorm2D( + output_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + Dropout(0.4), + Flatten(), + Linear( + output_channel * 7 * 7, + 512, + weight_attr=nn.initializer.KaimingNormal()), + BatchNorm1D( + 512, weight_attr=False, bias_attr=False)) + else: + self.output_layer = Sequential( + BatchNorm2D( + output_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + Dropout(0.4), + Flatten(), + Linear( + output_channel * 14 * 14, + 512, + weight_attr=nn.initializer.KaimingNormal()), + BatchNorm1D( + 512, weight_attr=False, bias_attr=False)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append( + unit_module(bottleneck.in_channel, bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + # initialize_weights(self.modules()) + + def forward(self, x): + + # current code only supports one extra image + # it comes with a extra dimension for number of extra image. We will just squeeze it out for now + x = self.input_layer(x) + + for idx, module in enumerate(self.body): + x = module(x) + + x = self.output_layer(x) + # norm = paddle.norm(x, 2, 1, True) + # output = paddle.divide(x, norm) + # return output, norm + return x + + +def AdaFace_IR_18(input_size=(112, 112)): + """ Constructs a ir-18 model. + """ + model = Backbone(input_size, 18, 'ir') + return model + + +def AdaFace_IR_34(input_size=(112, 112)): + """ Constructs a ir-34 model. + """ + model = Backbone(input_size, 34, 'ir') + + return model + + +def AdaFace_IR_50(input_size=(112, 112)): + """ Constructs a ir-50 model. + """ + model = Backbone(input_size, 50, 'ir') + + return model + + +def AdaFace_IR_101(input_size=(112, 112)): + """ Constructs a ir-101 model. + """ + model = Backbone(input_size, 100, 'ir') + + return model + + +def AdaFace_IR_152(input_size=(112, 112)): + """ Constructs a ir-152 model. + """ + model = Backbone(input_size, 152, 'ir') + + return model + + +def AdaFace_IR_200(input_size=(112, 112)): + """ Constructs a ir-200 model. + """ + model = Backbone(input_size, 200, 'ir') + + return model + + +def AdaFace_IR_SE_50(input_size=(112, 112)): + """ Constructs a ir_se-50 model. + """ + model = Backbone(input_size, 50, 'ir_se') + + return model + + +def AdaFace_IR_SE_101(input_size=(112, 112)): + """ Constructs a ir_se-101 model. + """ + model = Backbone(input_size, 100, 'ir_se') + + return model + + +def AdaFace_IR_SE_152(input_size=(112, 112)): + """ Constructs a ir_se-152 model. + """ + model = Backbone(input_size, 152, 'ir_se') + + return model + + +def AdaFace_IR_SE_200(input_size=(112, 112)): + """ Constructs a ir_se-200 model. + """ + model = Backbone(input_size, 200, 'ir_se') + + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/alexnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/alexnet.py new file mode 100644 index 000000000..7020d5db7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/alexnet.py @@ -0,0 +1,170 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout, ReLU +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "AlexNet": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvPoolLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride, + padding, + stdv, + groups=1, + act=None, + name=None): + super(ConvPoolLayer, self).__init__() + + self.relu = ReLU() if act == "relu" else None + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr( + name=name + "_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name=name + "_offset", initializer=Uniform(-stdv, stdv))) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + + def forward(self, inputs): + x = self._conv(inputs) + if self.relu is not None: + x = self.relu(x) + x = self._pool(x) + return x + + +class AlexNetDY(nn.Layer): + def __init__(self, class_num=1000): + super(AlexNetDY, self).__init__() + + stdv = 1.0 / math.sqrt(3 * 11 * 11) + self._conv1 = ConvPoolLayer( + 3, 64, 11, 4, 2, stdv, act="relu", name="conv1") + stdv = 1.0 / math.sqrt(64 * 5 * 5) + self._conv2 = ConvPoolLayer( + 64, 192, 5, 1, 2, stdv, act="relu", name="conv2") + stdv = 1.0 / math.sqrt(192 * 3 * 3) + self._conv3 = Conv2D( + 192, + 384, + 3, + stride=1, + padding=1, + weight_attr=ParamAttr( + name="conv3_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="conv3_offset", initializer=Uniform(-stdv, stdv))) + stdv = 1.0 / math.sqrt(384 * 3 * 3) + self._conv4 = Conv2D( + 384, + 256, + 3, + stride=1, + padding=1, + weight_attr=ParamAttr( + name="conv4_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="conv4_offset", initializer=Uniform(-stdv, stdv))) + stdv = 1.0 / math.sqrt(256 * 3 * 3) + self._conv5 = ConvPoolLayer( + 256, 256, 3, 1, 1, stdv, act="relu", name="conv5") + stdv = 1.0 / math.sqrt(256 * 6 * 6) + + self._drop1 = Dropout(p=0.5, mode="downscale_in_infer") + self._fc6 = Linear( + in_features=256 * 6 * 6, + out_features=4096, + weight_attr=ParamAttr( + name="fc6_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc6_offset", initializer=Uniform(-stdv, stdv))) + + self._drop2 = Dropout(p=0.5, mode="downscale_in_infer") + self._fc7 = Linear( + in_features=4096, + out_features=4096, + weight_attr=ParamAttr( + name="fc7_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc7_offset", initializer=Uniform(-stdv, stdv))) + self._fc8 = Linear( + in_features=4096, + out_features=class_num, + weight_attr=ParamAttr( + name="fc8_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr( + name="fc8_offset", initializer=Uniform(-stdv, stdv))) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + x = self._conv3(x) + x = F.relu(x) + x = self._conv4(x) + x = F.relu(x) + x = self._conv5(x) + x = paddle.flatten(x, start_axis=1, stop_axis=-1) + x = self._drop1(x) + x = self._fc6(x) + x = F.relu(x) + x = self._drop2(x) + x = self._fc7(x) + x = F.relu(x) + x = self._fc8(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def AlexNet(pretrained=False, use_ssld=False, **kwargs): + model = AlexNetDY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["AlexNet"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cae.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cae.py new file mode 100644 index 000000000..ac0f044bb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cae.py @@ -0,0 +1,860 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/PaddlePaddle/VIMER/blob/main/CAE/models/modeling_finetune.py +# reference: https://arxiv.org/abs/2202.03026 + +import collections +from itertools import repeat +import math +import numpy as np +from functools import partial + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ....utils.download import get_weights_path_from_url + +MODEL_URLS = { + "cae_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/cae_base_patch16_224_pretrained.pdparams", + "cae_large_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/cae_large_patch16_224_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +def trunc_normal_(tensor, mean=0., std=1.): + nn.initializer.TruncatedNormal(mean=mean, std=std)(tensor) + + +def drop_path(x, drop_prob: float=0., training: bool=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0], ) + (1, ) * ( + x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor.floor_() # binarize + output = x / keep_prob * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features, bias_attr=True) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features, bias_attr=True) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + window_size=None, + attn_head_dim=None): + super().__init__() + + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.zeros_ = nn.initializer.Constant(value=0.) + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias_attr=False) + if qkv_bias: + self.q_bias = self.create_parameter( + [all_head_dim], default_initializer=self.zeros_) + self.v_bias = self.create_parameter( + [all_head_dim], default_initializer=self.zeros_) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * ( + 2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = self.create_parameter( + [self.num_relative_distance, num_heads], + default_initializer=self.zeros_) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(window_size[0]) + coords_w = paddle.arange(window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, + None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[ + 0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + paddle.zeros((window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum( + -1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", + relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim, bias_attr=True) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias=None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + k_bias = paddle.zeros_like(self.v_bias) + k_bias.stop_gradient = True + qkv_bias = paddle.concat((self.q_bias, k_bias, self.v_bias)) + # qkv = self.qkv(x).reshape([B, N, 3, self.num_heads, C // self.num_heads]).transpose([2, 0, 3, 1, 4]) + qkv = F.linear(x=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape([B, N, 3, self.num_heads, -1]).transpose( + [2, 0, 3, 1, 4]) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @k.transpose([0, 1, 3, 2])) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.reshape([-1])].reshape([ + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1]) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn @v).transpose([0, 2, 1, 3]).reshape([B, N, -1]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + init_values=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + window_size=None, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + window_size=window_size, + attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if init_values > 0: + self.gamma_1 = self.create_parameter( + [dim], + default_initializer=nn.initializer.Constant(value=init_values)) + self.gamma_2 = self.create_parameter( + [dim], + default_initializer=nn.initializer.Constant(value=init_values)) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias=None): + if self.gamma_1 is None: + x = x + self.drop_path( + self.attn( + self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn( + self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + to_2tuple = _ntuple(2) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // + patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.in_chans = in_chans + self.out_chans = embed_dim + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=patch_size, + bias_attr=True) + + def forward(self, x, **kwargs): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose([0, 2, 1]) + return x + + def _init_weights(self): + fan_out = self.out_chans + fan_in = self.patch_size[0] * self.patch_size[1] * self.in_chans + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.XavierUniform(fan_in, fan_out)) # MAE + bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(0.0)) + return weight_attr, bias_attr + + +class RelativePositionBias(nn.Layer): + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * ( + 2 * window_size[1] - 1) + 3 + self.zeros_ = nn.initializer.Constant(value=0.) + self.relative_position_bias_table = self.create_parameter( + [self.num_relative_distance, num_heads], + default_initializer=self.zeros_) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(window_size[0]) + coords_w = paddle.arange(window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, + None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + paddle.zeros((window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum( + -1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", + relative_position_index) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.reshape([-1])].reshape([ + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1]) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.transpose([2, 0, 1]) # nH, Wh*Ww, Wh*Ww + + +def get_sinusoid_encoding_table(n_position, d_hid, token=False): + ''' Sinusoid position encoding table ''' + + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + if token: + sinusoid_table = np.concatenate( + [sinusoid_table, np.zeros([1, d_hid])], dim=0) + + return paddle.to_tensor(sinusoid_table).unsqueeze(0) + + +class VisionTransformer(nn.Layer): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=None, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + use_mean_pooling=True, + init_scale=0.001, + lin_probe=False, + sin_pos_emb=True, + args=None): + super().__init__() + self.class_num = class_num + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.use_mean_pooling = use_mean_pooling + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.zeros_ = nn.initializer.Constant(value=0.) + self.ones_ = nn.initializer.Constant(value=1.) + + self.cls_token = self.create_parameter( + [1, 1, embed_dim], default_initializer=self.zeros_) + + self.use_abs_pos_emb = use_abs_pos_emb + if use_abs_pos_emb: + self.pos_embed = self.create_parameter( + [1, num_patches + 1, embed_dim], + default_initializer=self.zeros_) + elif sin_pos_emb: + # sine-cosine positional embeddings is on the way + self.pos_embed = self.create_parameter( + [1, num_patches + 1, embed_dim], + default_initializer=self.zeros_) + self.pos_embed.set_value( + self.build_2d_sincos_position_embedding(embed_dim)) + self.pos_embed.stop_gradient = True # fixed sin-cos embedding + else: + self.pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias( + window_size=self.patch_embed.patch_shape, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in paddle.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.LayerList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + window_size=self.patch_embed.patch_shape + if use_rel_pos_bias else None) for i in range(depth) + ]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer( + embed_dim) + + self.lin_probe = lin_probe + # NOTE: batch norm + if lin_probe: + # TODO + from models.lincls_bn import LP_BatchNorm + self.fc_norm = LP_BatchNorm(embed_dim, affine=False) + else: + if use_mean_pooling: + self.fc_norm = norm_layer(embed_dim) + else: + self.fc_norm = None + self.head = nn.Linear(embed_dim, + class_num) if class_num > 0 else nn.Identity() + + if self.pos_embed is not None and use_abs_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + self.fix_init_weight() + + self.head.weight.set_value(self.head.weight * init_scale) + self.head.bias.set_value(self.head.bias * init_scale) + + def build_2d_sincos_position_embedding(self, + embed_dim=768, + temperature=10000.): + h, w = self.patch_embed.patch_shape + grid_w = paddle.arange(w, dtype=paddle.float32) + grid_h = paddle.arange(h, dtype=paddle.float32) + grid_w, grid_h = paddle.meshgrid(grid_w, grid_h) + assert embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' + pos_dim = embed_dim // 4 + omega = paddle.arange(pos_dim, dtype=paddle.float32) / pos_dim + omega = 1. / (temperature**omega) + out_w = paddle.einsum('m,d->md', grid_w.flatten(), omega) + out_h = paddle.einsum('m,d->md', grid_h.flatten(), omega) + pos_emb = paddle.concat( + [ + paddle.sin(out_w), paddle.cos(out_w), paddle.sin(out_h), + paddle.cos(out_h) + ], + axis=1)[None, :, :] + + # if not self.use_mean_pooling: + pe_token = paddle.zeros([1, 1, embed_dim], dtype=paddle.float32) + pos_emb = paddle.concat([pe_token, pos_emb], axis=1) + return pos_emb + + def fix_init_weight(self): + def rescale(param, layer_id): + param.set_value(param / math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight, layer_id + 1) + rescale(layer.mlp.fc2.weight, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + self.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + self.zeros_(m.bias) + self.ones_(m.weight) + + def get_num_layers(self): + return len(self.blocks) + + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, class_num, global_pool=''): + self.class_num = class_num + self.head = nn.Linear(self.embed_dim, + class_num) if class_num > 0 else nn.Identity() + + def forward_features(self, x, is_train=True): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.shape + + cls_tokens = self.cls_token.expand([ + batch_size, -1, -1 + ]).astype(x.dtype) # stole cls_tokens impl from Phil Wang, thanks + x = paddle.concat((cls_tokens, x), axis=1) + if self.pos_embed is not None: + if self.use_abs_pos_emb: + x = x + self.pos_embed.expand( + [batch_size, -1, -1]).astype(x.dtype).clone().detach() + else: + x = x + self.pos_embed.expand( + [batch_size, -1, -1]).astype(x.dtype).clone().detach() + + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias( + ) if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + x = self.norm(x) + if self.fc_norm is not None: + t = x[:, 1:, :] + if self.lin_probe: + if self.use_mean_pooling: + return self.fc_norm(t.mean(1), is_train=is_train) + else: + return self.fc_norm(x[:, 0], is_train=is_train) + else: + return self.fc_norm(t.mean(1)) + + else: + return x[:, 0] + + def forward(self, x, is_train=True): + x = self.forward_features(x, is_train) + x = self.head(x) + return x + + +def _enable_linear_eval(model): + zeros_ = nn.initializer.Constant(value=0.) + normal_ = nn.initializer.Normal(mean=0.0, std=0.01) + linear_keyword = 'head' + head_norm = 'fc_norm' + requires_grad = [] + for name, param in model.named_parameters(): + if name not in [ + '%s.weight' % linear_keyword, '%s.bias' % linear_keyword + ] and head_norm not in name: + param.stop_gradient = True + else: + requires_grad.append(name) + # init the fc layer + normal_(getattr(model, linear_keyword).weight) + zeros_(getattr(model, linear_keyword).bias) + + return + + +def _load_pretrained(pretrained, + pretrained_url, + model, + model_keys, + model_ema_configs, + use_abs_pos_emb, + use_rel_pos_bias, + use_ssld=False): + if pretrained is False: + return + elif pretrained is True: + local_weight_path = get_weights_path_from_url(pretrained_url).replace( + ".pdparams", "") + checkpoint = paddle.load(local_weight_path + ".pdparams") + elif isinstance(pretrained, str): + checkpoint = paddle.load(pretrained + ".pdparams") + + checkpoint_model = None + for model_key in model_keys.split('|'): + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + break + + if checkpoint_model is None: + checkpoint_model = checkpoint + state_dict = model.state_dict() + all_keys = list(checkpoint_model.keys()) + # NOTE: remove all decoder keys + all_keys = [key for key in all_keys if key.startswith('encoder.')] + for key in all_keys: + new_key = key.replace('encoder.', '') + checkpoint_model[new_key] = checkpoint_model[key] + checkpoint_model.pop(key) + + for key in list(checkpoint_model.keys()): + if key.startswith('regressor_and_decoder.'): + checkpoint_model.pop(key) + if key.startswith('teacher_network.'): + checkpoint_model.pop(key) + + # NOTE: replace norm with fc_norm + for key in list(checkpoint_model.keys()): + if key.startswith('norm.'): + new_key = key.replace('norm.', 'fc_norm.') + checkpoint_model[new_key] = checkpoint_model[key] + checkpoint_model.pop(key) + + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[ + k].shape: + del checkpoint_model[k] + + if model.use_rel_pos_bias and "rel_pos_bias.relative_position_bias_table" in checkpoint_model: + num_layers = model.get_num_layers() + rel_pos_bias = checkpoint_model[ + "rel_pos_bias.relative_position_bias_table"] + for i in range(num_layers): + checkpoint_model["blocks.%d.attn.relative_position_bias_table" % + i] = rel_pos_bias.clone() + + checkpoint_model.pop("rel_pos_bias.relative_position_bias_table") + + all_keys = list(checkpoint_model.keys()) + + for key in all_keys: + if "relative_position_index" in key: + checkpoint_model.pop(key) + + if "relative_position_bias_table" in key and use_rel_pos_bias: + rel_pos_bias = checkpoint_model[key] + src_num_pos, num_attn_heads = rel_pos_bias.shape + dst_num_pos, _ = model.state_dict()[key].shape + dst_patch_shape = model.patch_embed.patch_shape + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * ( + dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens)**0.5) + dst_size = int((dst_num_pos - num_extra_tokens)**0.5) + if src_size != dst_size: + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q**(i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + all_rel_pos_bias = [] + + for i in range(num_attn_heads): + z = rel_pos_bias[:, i].view(src_size, + src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + paddle.Tensor(f(dx, dy)).astype('float32').reshape( + [-1, 1])) + + rel_pos_bias = paddle.concat(all_rel_pos_bias, axis=-1) + + new_rel_pos_bias = paddle.concat( + (rel_pos_bias, extra_tokens), axis=0) + checkpoint_model[key] = new_rel_pos_bias + + # interpolate position embedding + if 'pos_embed' in checkpoint_model and use_abs_pos_emb: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens)** + 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches**0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, + embedding_size).permute(0, 3, 1, 2) + pos_tokens = paddle.nn.functional.interpolate( + pos_tokens, + size=(new_size, new_size), + mode='bicubic', + align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = paddle.concat((extra_tokens, pos_tokens), axis=1) + checkpoint_model['pos_embed'] = new_pos_embed + msg = model.set_state_dict(checkpoint_model) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() + if not p.stop_gradient).item() + + return + + +def cae_base_patch16_224(pretrained=True, use_ssld=False, **kwargs): + config = kwargs.copy() + enable_linear_eval = config.pop('enable_linear_eval') + model_keys = config.pop('model_key') + model_ema_configs = config.pop('model_ema') + use_abs_pos_emb = config.get('use_abs_pos_emb', False) + use_rel_pos_bias = config.get('use_rel_pos_bias', True) + if pretrained in config: + pretrained = config.pop('pretrained') + + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + **config) + + if enable_linear_eval: + _enable_linear_eval(model) + + _load_pretrained( + pretrained, + MODEL_URLS["cae_base_patch16_224"], + model, + model_keys, + model_ema_configs, + use_abs_pos_emb, + use_rel_pos_bias, + use_ssld=False) + + return model + + +def cae_large_patch16_224(pretrained=True, use_ssld=False, **kwargs): + config = kwargs.copy() + enable_linear_eval = config.pop('enable_linear_eval') + model_keys = config.pop('model_key') + model_ema_configs = config.pop('model_ema') + use_abs_pos_emb = config.get('use_abs_pos_emb', False) + use_rel_pos_bias = config.get('use_rel_pos_bias', True) + if pretrained in config: + pretrained = config.pop('pretrained') + + model = VisionTransformer( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + **config) + + if enable_linear_eval: + _enable_linear_eval(model) + + _load_pretrained( + pretrained, + MODEL_URLS["cae_large_patch16_224"], + model, + model_keys, + model_ema_configs, + use_abs_pos_emb, + use_rel_pos_bias, + use_ssld=False) + + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/convnext.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/convnext.py new file mode 100644 index 000000000..3773fac56 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/convnext.py @@ -0,0 +1,282 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Code was heavily based on https://github.com/facebookresearch/ConvNeXt + +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ConvNeXt_tiny": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_tiny_pretrained.pdparams", + "ConvNeXt_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_small_pretrained.pdparams", + "ConvNeXt_base_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_base_224_pretrained.pdparams", + "ConvNeXt_base_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_base_384_pretrained.pdparams", + "ConvNeXt_large_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_large_224_pretrained.pdparams", + "ConvNeXt_large_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ConvNeXt_large_384_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class ChannelsFirstLayerNorm(nn.Layer): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, epsilon=1e-5): + super().__init__() + self.weight = self.create_parameter( + shape=[normalized_shape], default_initializer=ones_) + self.bias = self.create_parameter( + shape=[normalized_shape], default_initializer=zeros_) + self.epsilon = epsilon + self.normalized_shape = [normalized_shape] + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / paddle.sqrt(s + self.epsilon) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class Block(nn.Layer): + r""" ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): + super().__init__() + self.dwconv = nn.Conv2D( + dim, dim, 7, padding=3, groups=dim) # depthwise conv + self.norm = nn.LayerNorm(dim, epsilon=1e-6) + # pointwise/1x1 convs, implemented with linear layers + self.pwconv1 = nn.Linear(dim, 4 * dim) + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + if layer_scale_init_value > 0: + self.gamma = self.create_parameter( + shape=[dim], + default_initializer=Constant(value=layer_scale_init_value)) + else: + self.gamma = None + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = x.transpose([0, 2, 3, 1]) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.transpose([0, 3, 1, 2]) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class ConvNeXt(nn.Layer): + r""" ConvNeXt + A PaddlePaddle impl of : `A ConvNet for the 2020s` - + https://arxiv.org/pdf/2201.03545.pdf + + Args: + in_chans (int): Number of input image channels. Default: 3 + class_num (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] + dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] + drop_path_rate (float): Stochastic depth rate. Default: 0. + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. + """ + + def __init__(self, + in_chans=3, + class_num=1000, + depths=[3, 3, 9, 3], + dims=[96, 192, 384, 768], + drop_path_rate=0., + layer_scale_init_value=1e-6, + head_init_scale=1.): + super().__init__() + + # stem and 3 intermediate downsampling conv layers + self.downsample_layers = nn.LayerList() + stem = nn.Sequential( + nn.Conv2D( + in_chans, dims[0], 4, stride=4), + ChannelsFirstLayerNorm( + dims[0], epsilon=1e-6)) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + ChannelsFirstLayerNorm( + dims[i], epsilon=1e-6), + nn.Conv2D( + dims[i], dims[i + 1], 2, stride=2), ) + self.downsample_layers.append(downsample_layer) + + # 4 feature resolution stages, each consisting of multiple residual blocks + self.stages = nn.LayerList() + dp_rates = [ + x.item() for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] + cur = 0 + for i in range(4): + stage = nn.Sequential(* [ + Block( + dim=dims[i], + drop_path=dp_rates[cur + j], + layer_scale_init_value=layer_scale_init_value) + for j in range(depths[i]) + ]) + self.stages.append(stage) + cur += depths[i] + + self.norm = nn.LayerNorm(dims[-1], epsilon=1e-6) # final norm layer + self.head = nn.Linear(dims[-1], class_num) + + self.apply(self._init_weights) + self.head.weight.set_value(self.head.weight * head_init_scale) + self.head.bias.set_value(self.head.bias * head_init_scale) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2D, nn.Linear)): + trunc_normal_(m.weight) + if m.bias is not None: + zeros_(m.bias) + + def forward_features(self, x): + for i in range(4): + x = self.downsample_layers[i](x) + x = self.stages[i](x) + # global average pooling, (N, C, H, W) -> (N, C) + return self.norm(x.mean([-2, -1])) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ConvNeXt_tiny(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_tiny"], use_ssld=use_ssld) + return model + + +def ConvNeXt_small(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_small"], use_ssld=use_ssld) + return model + + +def ConvNeXt_base_224(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt( + depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_base_224"], use_ssld=use_ssld) + return model + + +def ConvNeXt_base_384(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt( + depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_base_384"], use_ssld=use_ssld) + return model + + +def ConvNeXt_large_224(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt( + depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_large_224"], use_ssld=use_ssld) + return model + + +def ConvNeXt_large_384(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt( + depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_large_384"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cspnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cspnet.py new file mode 100644 index 000000000..c62206328 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cspnet.py @@ -0,0 +1,377 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/rwightman/pytorch-image-models +# reference: https://arxiv.org/abs/1911.11929 + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "CSPDarkNet53": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSPDarkNet53_pretrained.pdparams" +} + +MODEL_CFGS = { + "CSPDarkNet53": dict( + stem=dict( + out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2, ) * 5, + exp_ratio=(2., ) + (1., ) * 4, + bottle_ratio=(0.5, ) + (1.0, ) * 4, + block_ratio=(1., ) + (0.5, ) * 4, + down_growth=True, )) +} + +__all__ = ['CSPDarkNet53' + ] # model_registry will add each entrypoint fn to this + + +class ConvBnAct(nn.Layer): + def __init__(self, + input_channels, + output_channels, + kernel_size=1, + stride=1, + padding=None, + dilation=1, + groups=1, + act_layer=nn.LeakyReLU, + norm_layer=nn.BatchNorm2D): + super().__init__() + if padding is None: + padding = (kernel_size - 1) // 2 + self.conv = nn.Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + weight_attr=ParamAttr(), + bias_attr=False) + + self.bn = norm_layer(num_features=output_channels) + self.act = act_layer() + + def forward(self, inputs): + x = self.conv(inputs) + x = self.bn(x) + if self.act is not None: + x = self.act(x) + return x + + +def create_stem(in_chans=3, + out_chs=32, + kernel_size=3, + stride=2, + pool='', + act_layer=None, + norm_layer=None): + stem = nn.Sequential() + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + assert len(out_chs) + in_c = in_chans + for i, out_c in enumerate(out_chs): + conv_name = f'conv{i + 1}' + stem.add_sublayer( + conv_name, + ConvBnAct( + in_c, + out_c, + kernel_size, + stride=stride if i == 0 else 1, + act_layer=act_layer, + norm_layer=norm_layer)) + in_c = out_c + last_conv = conv_name + if pool: + stem.add_sublayer( + 'pool', nn.MaxPool2D( + kernel_size=3, stride=2, padding=1)) + return stem, dict( + num_chs=in_c, reduction=stride, module='.'.join(['stem', last_conv])) + + +class DarkBlock(nn.Layer): + def __init__(self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.5, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2D, + attn_layer=None, + drop_block=None): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct( + mid_chs, + out_chs, + kernel_size=3, + dilation=dilation, + groups=groups, + **ckwargs) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = x + shortcut + return x + + +class CrossStage(nn.Layer): + def __init__(self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + exp_ratio=1., + groups=1, + first_dilation=None, + down_growth=False, + cross_linear=False, + block_dpr=None, + block_fn=DarkBlock, + **block_kwargs): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs + exp_chs = int(round(out_chs * exp_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict( + act_layer=block_kwargs.get('act_layer'), + norm_layer=block_kwargs.get('norm_layer')) + + if stride != 1 or first_dilation != dilation: + self.conv_down = ConvBnAct( + in_chs, + down_chs, + kernel_size=3, + stride=stride, + dilation=first_dilation, + groups=groups, + **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + self.conv_exp = ConvBnAct( + prev_chs, exp_chs, kernel_size=1, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_sublayer( + str(i), + block_fn(prev_chs, block_out_chs, dilation, bottle_ratio, + groups, **block_kwargs)) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvBnAct( + prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvBnAct( + exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + if self.conv_down is not None: + x = self.conv_down(x) + x = self.conv_exp(x) + split = x.shape[1] // 2 + xs, xb = x[:, :split], x[:, split:] + xb = self.blocks(xb) + xb = self.conv_transition_b(xb) + out = self.conv_transition(paddle.concat([xs, xb], axis=1)) + return out + + +class DarkStage(nn.Layer): + def __init__(self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + groups=1, + first_dilation=None, + block_fn=DarkBlock, + block_dpr=None, + **block_kwargs): + super().__init__() + first_dilation = first_dilation or dilation + + self.conv_down = ConvBnAct( + in_chs, + out_chs, + kernel_size=3, + stride=stride, + dilation=first_dilation, + groups=groups, + act_layer=block_kwargs.get('act_layer'), + norm_layer=block_kwargs.get('norm_layer')) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_sublayer( + str(i), + block_fn(prev_chs, block_out_chs, dilation, bottle_ratio, + groups, **block_kwargs)) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def _cfg_to_stage_args(cfg, curr_stride=2, output_stride=32): + # get per stage args for stage and containing blocks, calculate strides to meet target output_stride + num_stages = len(cfg['depth']) + if 'groups' not in cfg: + cfg['groups'] = (1, ) * num_stages + if 'down_growth' in cfg and not isinstance(cfg['down_growth'], + (list, tuple)): + cfg['down_growth'] = (cfg['down_growth'], ) * num_stages + stage_strides = [] + stage_dilations = [] + stage_first_dilations = [] + dilation = 1 + for cfg_stride in cfg['stride']: + stage_first_dilations.append(dilation) + if curr_stride >= output_stride: + dilation *= cfg_stride + stride = 1 + else: + stride = cfg_stride + curr_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + cfg['stride'] = stage_strides + cfg['dilation'] = stage_dilations + cfg['first_dilation'] = stage_first_dilations + stage_args = [ + dict(zip(cfg.keys(), values)) for values in zip(*cfg.values()) + ] + return stage_args + + +class CSPNet(nn.Layer): + def __init__(self, + cfg, + in_chans=3, + class_num=1000, + output_stride=32, + global_pool='avg', + drop_rate=0., + act_layer=nn.LeakyReLU, + norm_layer=nn.BatchNorm2D, + zero_init_last_bn=True, + stage_fn=CrossStage, + block_fn=DarkBlock): + super().__init__() + self.class_num = class_num + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + + # Construct the stem + self.stem, stem_feat_info = create_stem(in_chans, **cfg['stem'], + **layer_args) + self.feature_info = [stem_feat_info] + prev_chs = stem_feat_info['num_chs'] + curr_stride = stem_feat_info[ + 'reduction'] # reduction does not include pool + if cfg['stem']['pool']: + curr_stride *= 2 + + # Construct the stages + per_stage_args = _cfg_to_stage_args( + cfg['stage'], curr_stride=curr_stride, output_stride=output_stride) + self.stages = nn.LayerList() + for i, sa in enumerate(per_stage_args): + self.stages.add_sublayer( + str(i), + stage_fn( + prev_chs, **sa, **layer_args, block_fn=block_fn)) + prev_chs = sa['out_chs'] + curr_stride *= sa['stride'] + self.feature_info += [ + dict( + num_chs=prev_chs, + reduction=curr_stride, + module=f'stages.{i}') + ] + + # Construct the head + self.num_features = prev_chs + + self.pool = nn.AdaptiveAvgPool2D(1) + self.flatten = nn.Flatten(1) + self.fc = nn.Linear( + prev_chs, + class_num, + weight_attr=ParamAttr(), + bias_attr=ParamAttr()) + + def forward(self, x): + x = self.stem(x) + for stage in self.stages: + x = stage(x) + x = self.pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def CSPDarkNet53(pretrained=False, use_ssld=False, **kwargs): + model = CSPNet(MODEL_CFGS["CSPDarkNet53"], block_fn=DarkBlock, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["CSPDarkNet53"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cswin_transformer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cswin_transformer.py new file mode 100644 index 000000000..9d6d26cbe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cswin_transformer.py @@ -0,0 +1,651 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/BR-IDL/PaddleViT/blob/develop/image_classification/CSwin/cswin.py +# reference: https://arxiv.org/abs/2107.00652 + +import copy +import numpy as np +import paddle +import paddle.nn as nn +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "CSWinTransformer_tiny_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_tiny_224_pretrained.pdparams", + "CSWinTransformer_small_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_small_224_pretrained.pdparams", + "CSWinTransformer_base_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_224_pretrained.pdparams", + "CSWinTransformer_large_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_224_pretrained.pdparams", + "CSWinTransformer_base_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_384_pretrained.pdparams", + "CSWinTransformer_large_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class PatchEmbedding(nn.Layer): + """CSwin Patch Embedding + This patch embedding has a 7x7 conv + layernorm, the output tensor + is reshaped to [Batch, H*W, embed_dim]. Note that the patch is applied + by a conv with overlap (using patch_stride). + Args: + patch_stride: int, patch stride size, default: 4 + in_channels: int, number of channels of input image, default: 3 + embed_dim: int, output feature dimension, default: 96 + """ + + def __init__(self, patch_stride=4, in_channels=3, embed_dim=96): + super().__init__() + self.patch_embed = nn.Conv2D( + in_channels=in_channels, + out_channels=embed_dim, + kernel_size=7, + stride=patch_stride, + padding=2) + + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + x = self.patch_embed( + x) # [batch, embed_dim, h, w], h = w = image_size / 4 + x = x.flatten(start_axis=2, stop_axis=-1) # [batch, embed_dim, h*w] + x = x.transpose([0, 2, 1]) # [batch, h*w, embed_dim] + x = self.norm(x) + return x + + +class Mlp(nn.Layer): + """ MLP module + Impl using nn.Linear and activation is GELU, dropout is applied. + Ops: fc -> act -> dropout -> fc -> dropout + Attributes: + fc1: nn.Linear + fc2: nn.Linear + act: GELU + dropout1: dropout after fc1 + dropout2: dropout after fc2 + """ + + def __init__(self, in_features, hidden_features, dropout): + super().__init__() + self.fc1 = nn.Linear(in_features, hidden_features) + self.fc2 = nn.Linear(hidden_features, in_features) + self.act = nn.GELU() + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.dropout(x) + x = self.fc2(x) + x = self.dropout(x) + return x + + +def img2windows(img, h_split, w_split): + """Convert input tensor into split stripes + Args: + img: tensor, image tensor with shape [B, C, H, W] + h_split: int, splits width in height direction + w_split: int, splits width in width direction + Returns: + out: tensor, splitted image + """ + B, C, H, W = img.shape + out = img.reshape([B, C, H // h_split, h_split, W // w_split, w_split]) + out = out.transpose( + [0, 2, 4, 3, 5, 1]) # [B, H//h_split, W//w_split, h_split, w_split, C] + out = out.reshape([-1, h_split * w_split, + C]) # [B, H//h_split, W//w_split, h_split*w_split, C] + return out + + +def windows2img(img_splits, h_split, w_split, img_h, img_w): + """Convert splitted stripes back + Args: + img_splits: tensor, image tensor with shape [B, C, H, W] + h_split: int, splits width in height direction + w_split: int, splits width in width direction + img_h: int, original tensor height + img_w: int, original tensor width + Returns: + img: tensor, original tensor + """ + B = paddle.to_tensor(img_splits.shape[0] // + (img_h // h_split * img_w // w_split), "int32") + img = img_splits.reshape([ + B, img_h // h_split, img_w // w_split, h_split, w_split, + img_splits.shape[-1] + ]) + img = img.transpose( + [0, 1, 3, 2, 4, + 5]) #[B,img_h//h_split, h_split, img_w//w_split, w_split,C] + img = img.reshape( + [B, img_h, img_w, img_splits.shape[-1]]) # [B, img_h, img_w, C] + return img + + +class LePEAttention(nn.Layer): + """Cross Shaped Window self-attention with Locally enhanced positional encoding""" + + def __init__(self, + dim, + resolution, + h_split=7, + w_split=7, + num_heads=8, + attention_dropout=0., + dropout=0., + qk_scale=None): + super().__init__() + self.dim = dim + self.resolution = resolution + self.num_heads = num_heads + self.dim_head = dim // num_heads + self.scale = qk_scale or self.dim_head**-0.5 + self.h_split = h_split + self.w_split = w_split + + self.get_v = nn.Conv2D( + in_channels=dim, + out_channels=dim, + kernel_size=3, + stride=1, + padding=1, + groups=dim) + + self.softmax = nn.Softmax(axis=-1) + self.attn_dropout = nn.Dropout(attention_dropout) + + def im2cswin(self, x): + B, HW, C = x.shape + H = W = int(np.sqrt(HW)) + x = x.transpose([0, 2, 1]) # [B, C, H*W] + x = x.reshape([B, C, H, W]) # [B, C, H, W] + x = img2windows(x, self.h_split, self.w_split) + x = x.reshape( + [-1, self.h_split * self.w_split, self.num_heads, self.dim_head]) + x = x.transpose([0, 2, 1, 3]) + return x + + def get_lepe(self, x, func): + """Locally Enhanced Positional Encoding (LePE) + This module applies a depthwise conv on V and returns the lepe + Args: + x: tensor, the input tensor V + func: nn.Layer, a depth wise conv of kernel 3 stride 1 and padding 1 + """ + B, HW, C = x.shape + H = W = int(np.sqrt(HW)) + h_split = self.h_split + w_split = self.w_split + + x = x.transpose([0, 2, 1]) # [B, C, H*W] + x = x.reshape([B, C, H, W]) # [B, C, H, W] + x = x.reshape([B, C, H // h_split, h_split, W // w_split, w_split]) + x = x.transpose( + [0, 2, 4, 1, 3, + 5]) # [B, H//h_split, W//w_split, C, h_split, w_split] + x = x.reshape( + [-1, C, h_split, + w_split]) # [B*(H//h_split)*(W//w_split), h_split, w_split] + + lepe = func(x) # depth wise conv does not change shape + #lepe = lepe.reshape([-1, self.num_heads, C // self.num_heads, h_split * w_split]) + lepe = lepe.reshape( + [-1, self.num_heads, self.dim_head, h_split * w_split]) + lepe = lepe.transpose( + [0, 1, 3, 2]) # [B, num_heads, h_spllit*w_split, dim_head] + + x = x.reshape([-1, self.num_heads, self.dim_head, h_split * w_split]) + x = x.transpose( + [0, 1, 3, 2]) # [B, num_heads, h_split*wsplit, dim_head] + return x, lepe + + def forward(self, q, k, v): + B, HW, C = q.shape + H = W = self.resolution + q = self.im2cswin(q) + k = self.im2cswin(k) + v, lepe = self.get_lepe(v, self.get_v) + + q = q * self.scale + attn = paddle.matmul(q, k, transpose_y=True) + attn = self.softmax(attn) + attn = self.attn_dropout(attn) + + z = paddle.matmul(attn, v) + z = z + lepe + z = z.transpose([0, 2, 1, 3]) + z = z.reshape([-1, self.h_split * self.w_split, C]) + + z = windows2img(z, self.h_split, self.w_split, H, W) + z = z.reshape([B, z.shape[1] * z.shape[2], C]) + return z + + +class CSwinBlock(nn.Layer): + """CSwin Block + CSwin block contains a LePE attention modual, a linear projection, + a mlp layer, and related norms layers. In the first 3 stages, the + LePE attention moduals used 2 branches, where horizontal and + vertical split stripes are used for self attention and a concat + op is applied to combine the outputs. The last stage does not + have branche in LePE attention. + Args: + dim: int, input feature dimension + input_resolution: int, input feature spatial size. + num_heads: int, num of attention heads in current stage + split_size: int, the split size in current stage + mlp_ratio: float, mlp ratio, mlp_hidden_dim = mlp_ratio * mlp_in_dim, default: 4. + qkv_bias: bool, if set True, qkv projection will have bias, default: True + qk_scale: float, if set, replace the orig qk_scale (dim_head ** -0.5), default: None + dropout: float, dropout rate for linear projection, default: 0 + attention_dropout: float, dropout rate for attention, default: 0 + droppath: float, drop path rate, default: 0 + split_heads: bool, if True, split heads is applied (True for 1,2,3 stages), default: True + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + split_size=7, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + attention_dropout=0., + dropout=0., + droppath=0., + split_heads=True): + super().__init__() + self.dim = dim + # NOTE: here assume image_h == imgae_w + self.input_resolution = (input_resolution, input_resolution) + self.num_heads = num_heads + self.dim_head = dim // num_heads + self.mlp_ratio = mlp_ratio + self.split_size = split_size + self.norm1 = nn.LayerNorm(dim) + self.qkv = nn.Linear( + dim, dim * 3, bias_attr=None if qkv_bias else False) + self.attns = nn.LayerList() + self.split_heads = split_heads + + num_branches = 2 if split_heads else 1 + if split_heads: # first 3 stages + splits = [self.input_resolution[0], + self.split_size] # horizantal splits + else: # last stage + splits = [self.input_resolution[0], self.input_resolution[0]] + for _ in range(num_branches): + attn = LePEAttention( + dim=dim // num_branches, + resolution=input_resolution, + h_split=splits[0], + w_split=splits[1], + num_heads=num_heads // num_branches, + qk_scale=qk_scale, + attention_dropout=attention_dropout, + dropout=dropout) + self.attns.append(copy.deepcopy(attn)) + # switch splits from horizantal to vertical + # NOTE: may need to change for different H and W + splits[0], splits[1] = splits[1], splits[0] + + self.proj = nn.Linear(dim, dim) + self.drop_path = DropPath(droppath) if droppath > 0. else Identity() + + self.norm2 = nn.LayerNorm(dim) + self.mlp = Mlp(in_features=dim, + hidden_features=int(dim * mlp_ratio), + dropout=dropout) + + def chunk_qkv(self, x, chunks=1, axis=-1): + x = x.chunk(chunks, axis=axis) + return x + + def forward(self, x): + H, W = self.input_resolution + B, HW, C = x.shape + # cswin attention + h = x + x = self.norm1(x) + qkv = self.qkv(x).chunk(3, axis=-1) # qkv is a tuple of [q, k, v] + + if self.split_heads: + q, k, v = map(self.chunk_qkv, qkv, + (2, 2, 2)) # map requries list/tuple inputs + else: + q, k, v = map(lambda x: [x], qkv) + + if self.split_heads: # first 3 stages + h_attn = self.attns[0](q[0], k[0], v[0]) + w_attn = self.attns[1](q[1], k[1], v[1]) + attn = paddle.concat([h_attn, w_attn], axis=2) + else: # last stage + attn = self.attns[0](q[0], k[0], v[0]) + attn = self.proj(attn) + attn = self.drop_path(attn) + x = h + attn + # mlp + residual + h = x + x = self.norm2(x) + x = self.mlp(x) + x = self.drop_path(x) + x = h + x + return x + + +class MergeBlock(nn.Layer): + def __init__(self, dim_in, dim_out): + super().__init__() + self.conv = nn.Conv2D( + in_channels=dim_in, + out_channels=dim_out, + kernel_size=3, + stride=2, + padding=1) + self.norm = nn.LayerNorm(dim_out) + + def forward(self, x): + B, HW, C = x.shape + H = W = int(np.sqrt(HW)) + x = x.transpose([0, 2, 1]) # [B, C, HW] + x = x.reshape([B, C, H, W]) # [B, C, H, W] + x = self.conv(x) + new_shape = [x.shape[0], x.shape[1], + x.shape[2] * x.shape[3]] # [B, C', H*W] + x = x.reshape(new_shape) # [B, C', H*W] + x = x.transpose([0, 2, 1]) # [B, H*W, C'] + x = self.norm(x) + return x + + +class CSwinStage(nn.Layer): + """ CSwin Stage, each stage contains multi blocks + CSwin has 4 stages, the first 3 stages are using head split. The last + stage does not have head split. There is a merge block between each + 2 stages. + Args: + dim: int, input feature dimension + depth: int, number of blocks in current stage + num_heads: int, num of attention heads in current stage + split_size: int, the split size in current stage + mlp_ratio: float, mlp ratio, mlp_hidden_dim = mlp_ratio * mlp_in_dim, default: 4. + qkv_bias: bool, if set True, qkv projection will have bias, default: True + qk_scale: float, if set, replace the orig qk_scale (dim_head ** -0.5), default: None + dropout: float, dropout rate for linear projection, default: 0 + attention_dropout: float, dropout rate for attention, default: 0 + droppath: float, drop path rate, default: 0 + last_stage: bool, if current stage is the last stage, default: False + """ + + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + split_size, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + dropout=0., + attention_dropout=0., + droppath=0., + last_stage=False): + super().__init__() + self.blocks = nn.LayerList() + for i in range(depth): + block = CSwinBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + split_size=split_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attention_dropout=attention_dropout, + dropout=dropout, + droppath=droppath[i] + if isinstance(droppath, list) else droppath, + split_heads=not last_stage) + self.blocks.append(copy.deepcopy(block)) + # last stage does not need merge layer + self.merge = MergeBlock( + dim_in=dim, dim_out=dim * 2) if not last_stage else Identity() + + def forward(self, x): + for block in self.blocks: + x = block(x) + x = self.merge(x) + return x + + +class CSwinTransformer(nn.Layer): + """CSwin Transformer class + Args: + image_size: int, input image size, default: 224 + patch_stride: int, stride for patch embedding, default: 4 + in_channels: int, num of channels of input image, default: 3 + num_classes: int, num of classes, default: 1000 + embed_dim: int, embedding dim (patch embed out dim), default: 96 + depths: list/tuple(int), number of blocks in each stage, default: [2, 4, 32, 2] + splits: list/tuple(int), the split number in each stage, default: [1, 2, 7, 7] + num_heads: list/tuple(int), num of attention heads in each stage, default: [4, 8, 16, 32] + mlp_ratio: float, mlp ratio, mlp_hidden_dim = mlp_ratio * mlp_in_dim, default: 4. + qkv_bias: bool, if set True, qkv projection will have bias, default: True + qk_scale: float, if set, replace the orig qk_scale (dim_head ** -0.5), default: None + dropout: float, dropout rate for linear projection, default: 0 + attention_dropout: float, dropout rate for attention, default: 0 + droppath: float, drop path rate, default: 0 + """ + + def __init__(self, + image_size=224, + patch_stride=4, + in_channels=3, + class_num=1000, + embed_dim=96, + depths=[2, 4, 32, 2], + splits=[1, 2, 7, 7], + num_heads=[4, 8, 16, 32], + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + dropout=0., + attention_dropout=0., + droppath=0.): + super().__init__() + # token embedding + self.patch_embedding = PatchEmbedding( + patch_stride=patch_stride, + in_channels=in_channels, + embed_dim=embed_dim) + # drop path decay by stage + depth_decay = [ + x.item() for x in paddle.linspace(0, droppath, sum(depths)) + ] + dim = embed_dim + resolution = image_size // 4 + self.stages = nn.LayerList() + num_stages = len(depths) + # construct CSwin stages: each stage has multiple blocks + for stage_idx in range(num_stages): + stage = CSwinStage( + dim=dim, + input_resolution=resolution, + depth=depths[stage_idx], + num_heads=num_heads[stage_idx], + split_size=splits[stage_idx], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + dropout=dropout, + attention_dropout=attention_dropout, + droppath=depth_decay[sum(depths[:stage_idx]):sum( + depths[:stage_idx + 1])], + last_stage=stage_idx == num_stages - 1) + self.stages.append(stage) + if stage_idx != num_stages - 1: + dim = dim * 2 + resolution = resolution // 2 + # last norm and classification head layers + self.norm = nn.LayerNorm(dim) + self.head = nn.Linear(dim, class_num) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embedding(x) + for stage in self.stages: + x = stage(x) + x = self.norm(x) + return paddle.mean(x, axis=1) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def CSWinTransformer_tiny_224(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=224, + embed_dim=64, + depths=[1, 2, 21, 1], + splits=[1, 2, 7, 7], + num_heads=[2, 4, 8, 16], + droppath=0.2, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_tiny_224"], + use_ssld=use_ssld) + return model + + +def CSWinTransformer_small_224(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=224, + embed_dim=64, + depths=[2, 4, 32, 2], + splits=[1, 2, 7, 7], + num_heads=[2, 4, 8, 16], + droppath=0.4, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_small_224"], + use_ssld=use_ssld) + return model + + +def CSWinTransformer_base_224(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=224, + embed_dim=96, + depths=[2, 4, 32, 2], + splits=[1, 2, 7, 7], + num_heads=[4, 8, 16, 32], + droppath=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_base_224"], + use_ssld=use_ssld) + return model + + +def CSWinTransformer_base_384(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=384, + embed_dim=96, + depths=[2, 4, 32, 2], + splits=[1, 2, 12, 12], + num_heads=[4, 8, 16, 32], + droppath=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_base_384"], + use_ssld=use_ssld) + return model + + +def CSWinTransformer_large_224(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=224, + embed_dim=144, + depths=[2, 4, 32, 2], + splits=[1, 2, 7, 7], + num_heads=[6, 12, 24, 24], + droppath=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_large_224"], + use_ssld=use_ssld) + return model + + +def CSWinTransformer_large_384(pretrained=False, use_ssld=False, **kwargs): + model = CSwinTransformer( + image_size=384, + embed_dim=144, + depths=[2, 4, 32, 2], + splits=[1, 2, 12, 12], + num_heads=[6, 12, 24, 24], + droppath=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CSWinTransformer_large_384"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cvt.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cvt.py new file mode 100644 index 000000000..0092fb3d7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/cvt.py @@ -0,0 +1,723 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Code was heavily based on https://github.com/microsoft/CvT +# reference: https://arxiv.org/abs/2103.15808 + +import paddle +import paddle.nn as nn +from paddle.nn.initializer import XavierUniform, TruncatedNormal, Constant + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "CvT_13_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CvT_13_224_pretrained.pdparams", + "CvT_13_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CvT_13_384_pretrained.pdparams", + "CvT_21_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CvT_21_224_pretrained.pdparams", + "CvT_21_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CvT_21_384_pretrained.pdparams", + "CvT_W24_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CvT_W24_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +xavier_uniform_ = XavierUniform() +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self): + return f'drop_prob={self.drop_prob:.3f}' + + +def rearrange(x, pattern, **axes_lengths): + if 'b (h w) c -> b c h w' == pattern: + b, _, c = x.shape + h, w = axes_lengths.pop('h', -1), axes_lengths.pop('w', -1) + return x.transpose([0, 2, 1]).reshape([b, c, h, w]) + if 'b c h w -> b (h w) c' == pattern: + b, c, h, w = x.shape + return x.reshape([b, c, h * w]).transpose([0, 2, 1]) + if 'b t (h d) -> b h t d' == pattern: + b, t, h_d = x.shape + h = axes_lengths['h'] + return x.reshape([b, t, h, h_d // h]).transpose([0, 2, 1, 3]) + if 'b h t d -> b t (h d)' == pattern: + b, h, t, d = x.shape + return x.transpose([0, 2, 1, 3]).reshape([b, t, h * d]) + + raise NotImplementedError( + f"Rearrangement '{pattern}' has not been implemented.") + + +class Rearrange(nn.Layer): + def __init__(self, pattern, **axes_lengths): + super().__init__() + self.pattern = pattern + self.axes_lengths = axes_lengths + + def forward(self, x): + return rearrange(x, self.pattern, **self.axes_lengths) + + def extra_repr(self): + return self.pattern + + +class QuickGELU(nn.Layer): + def forward(self, x): + return x * nn.functional.sigmoid(1.702 * x) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim_in, + dim_out, + num_heads, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + method='dw_bn', + kernel_size=3, + stride_kv=1, + stride_q=1, + padding_kv=1, + padding_q=1, + with_cls_token=True, + **kwargs): + super().__init__() + self.stride_kv = stride_kv + self.stride_q = stride_q + self.dim = dim_out + self.num_heads = num_heads + # head_dim = self.qkv_dim // num_heads + self.scale = dim_out**-0.5 + self.with_cls_token = with_cls_token + + self.conv_proj_q = self._build_projection( + dim_in, dim_out, kernel_size, padding_q, stride_q, 'linear' + if method == 'avg' else method) + self.conv_proj_k = self._build_projection( + dim_in, dim_out, kernel_size, padding_kv, stride_kv, method) + self.conv_proj_v = self._build_projection( + dim_in, dim_out, kernel_size, padding_kv, stride_kv, method) + + self.proj_q = nn.Linear(dim_in, dim_out, bias_attr=qkv_bias) + self.proj_k = nn.Linear(dim_in, dim_out, bias_attr=qkv_bias) + self.proj_v = nn.Linear(dim_in, dim_out, bias_attr=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim_out, dim_out) + self.proj_drop = nn.Dropout(proj_drop) + + def _build_projection(self, dim_in, dim_out, kernel_size, padding, stride, + method): + if method == 'dw_bn': + proj = nn.Sequential( + ('conv', nn.Conv2D( + dim_in, + dim_in, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias_attr=False, + groups=dim_in)), ('bn', nn.BatchNorm2D(dim_in)), + ('rearrage', Rearrange('b c h w -> b (h w) c'))) + elif method == 'avg': + proj = nn.Sequential( + ('avg', nn.AvgPool2D( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=True)), + ('rearrage', Rearrange('b c h w -> b (h w) c'))) + elif method == 'linear': + proj = None + else: + raise ValueError('Unknown method ({})'.format(method)) + + return proj + + def forward_conv(self, x, h, w): + if self.with_cls_token: + cls_token, x = paddle.split(x, [1, h * w], 1) + + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + + if self.conv_proj_q is not None: + q = self.conv_proj_q(x) + else: + q = rearrange(x, 'b c h w -> b (h w) c') + + if self.conv_proj_k is not None: + k = self.conv_proj_k(x) + else: + k = rearrange(x, 'b c h w -> b (h w) c') + + if self.conv_proj_v is not None: + v = self.conv_proj_v(x) + else: + v = rearrange(x, 'b c h w -> b (h w) c') + + if self.with_cls_token: + q = paddle.concat((cls_token, q), axis=1) + k = paddle.concat((cls_token, k), axis=1) + v = paddle.concat((cls_token, v), axis=1) + + return q, k, v + + def forward(self, x, h, w): + if (self.conv_proj_q is not None or self.conv_proj_k is not None or + self.conv_proj_v is not None): + q, k, v = self.forward_conv(x, h, w) + + q = rearrange(self.proj_q(q), 'b t (h d) -> b h t d', h=self.num_heads) + k = rearrange(self.proj_k(k), 'b t (h d) -> b h t d', h=self.num_heads) + v = rearrange(self.proj_v(v), 'b t (h d) -> b h t d', h=self.num_heads) + + attn_score = (q @k.transpose([0, 1, 3, 2])) * self.scale + attn = nn.functional.softmax(attn_score, axis=-1) + attn = self.attn_drop(attn) + + x = attn @v + x = rearrange(x, 'b h t d -> b t (h d)') + + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Layer): + def __init__(self, + dim_in, + dim_out, + num_heads, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + **kwargs): + super().__init__() + + self.with_cls_token = kwargs['with_cls_token'] + + self.norm1 = norm_layer(dim_in) + self.attn = Attention(dim_in, dim_out, num_heads, qkv_bias, attn_drop, + drop, **kwargs) + + self.drop_path = DropPath(drop_path) \ + if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim_out) + + dim_mlp_hidden = int(dim_out * mlp_ratio) + self.mlp = Mlp(in_features=dim_out, + hidden_features=dim_mlp_hidden, + act_layer=act_layer, + drop=drop) + + def forward(self, x, h, w): + x = x + self.drop_path(self.attn(self.norm1(x), h, w)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvEmbed(nn.Layer): + def __init__(self, + patch_size=7, + in_chans=3, + embed_dim=64, + stride=4, + padding=2, + norm_layer=None): + super().__init__() + self.patch_size = patch_size + + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=padding) + self.norm = norm_layer(embed_dim) if norm_layer else None + + def forward(self, x): + x = self.proj(x) + + B, C, H, W = x.shape + x = rearrange(x, 'b c h w -> b (h w) c') + if self.norm: + x = self.norm(x) + x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W) + + return x + + +class VisionTransformer(nn.Layer): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + patch_size=16, + patch_stride=16, + patch_padding=0, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + init='trunc_norm', + **kwargs): + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.rearrage = None + + self.patch_embed = ConvEmbed( + # img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + stride=patch_stride, + padding=patch_padding, + embed_dim=embed_dim, + norm_layer=norm_layer) + + with_cls_token = kwargs['with_cls_token'] + if with_cls_token: + self.cls_token = self.create_parameter( + shape=[1, 1, embed_dim], default_initializer=trunc_normal_) + else: + self.cls_token = None + + self.pos_drop = nn.Dropout(p=drop_rate) + dpr = [x.item() for x in paddle.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + + blocks = [] + for j in range(depth): + blocks.append( + Block( + dim_in=embed_dim, + dim_out=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[j], + act_layer=act_layer, + norm_layer=norm_layer, + **kwargs)) + self.blocks = nn.LayerList(blocks) + + if init == 'xavier': + self.apply(self._init_weights_xavier) + else: + self.apply(self._init_weights_trunc_normal) + + def _init_weights_trunc_normal(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2D)): + zeros_(m.bias) + ones_(m.weight) + + def _init_weights_xavier(self, m): + if isinstance(m, nn.Linear): + xavier_uniform_(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2D)): + zeros_(m.bias) + ones_(m.weight) + + def forward(self, x): + x = self.patch_embed(x) + B, C, H, W = x.shape + + x = rearrange(x, 'b c h w -> b (h w) c') + + cls_tokens = None + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand([B, -1, -1]) + x = paddle.concat((cls_tokens, x), axis=1) + + x = self.pos_drop(x) + + for i, blk in enumerate(self.blocks): + x = blk(x, H, W) + + if self.cls_token is not None: + cls_tokens, x = paddle.split(x, [1, H * W], 1) + x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W) + + return x, cls_tokens + + +class ConvolutionalVisionTransformer(nn.Layer): + def __init__(self, + in_chans=3, + class_num=1000, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + init='trunc_norm', + spec=None): + super().__init__() + self.class_num = class_num + + self.num_stages = spec['NUM_STAGES'] + for i in range(self.num_stages): + kwargs = { + 'patch_size': spec['PATCH_SIZE'][i], + 'patch_stride': spec['PATCH_STRIDE'][i], + 'patch_padding': spec['PATCH_PADDING'][i], + 'embed_dim': spec['DIM_EMBED'][i], + 'depth': spec['DEPTH'][i], + 'num_heads': spec['NUM_HEADS'][i], + 'mlp_ratio': spec['MLP_RATIO'][i], + 'qkv_bias': spec['QKV_BIAS'][i], + 'drop_rate': spec['DROP_RATE'][i], + 'attn_drop_rate': spec['ATTN_DROP_RATE'][i], + 'drop_path_rate': spec['DROP_PATH_RATE'][i], + 'with_cls_token': spec['CLS_TOKEN'][i], + 'method': spec['QKV_PROJ_METHOD'][i], + 'kernel_size': spec['KERNEL_QKV'][i], + 'padding_q': spec['PADDING_Q'][i], + 'padding_kv': spec['PADDING_KV'][i], + 'stride_kv': spec['STRIDE_KV'][i], + 'stride_q': spec['STRIDE_Q'][i], + } + + stage = VisionTransformer( + in_chans=in_chans, + init=init, + act_layer=act_layer, + norm_layer=norm_layer, + **kwargs) + setattr(self, f'stage{i}', stage) + + in_chans = spec['DIM_EMBED'][i] + + dim_embed = spec['DIM_EMBED'][-1] + self.norm = norm_layer(dim_embed) + self.cls_token = spec['CLS_TOKEN'][-1] + + # Classifier head + self.head = nn.Linear(dim_embed, + class_num) if class_num > 0 else nn.Identity() + trunc_normal_(self.head.weight) + + bound = 1 / dim_embed**.5 + nn.initializer.Uniform(-bound, bound)(self.head.bias) + + def no_weight_decay(self): + layers = set() + for i in range(self.num_stages): + layers.add(f'stage{i}.pos_embed') + layers.add(f'stage{i}.cls_token') + return layers + + def forward_features(self, x): + for i in range(self.num_stages): + x, cls_tokens = getattr(self, f'stage{i}')(x) + + if self.cls_token: + x = self.norm(cls_tokens) + x = paddle.squeeze(x, axis=1) + else: + x = rearrange(x, 'b c h w -> b (h w) c') + x = self.norm(x) + x = paddle.mean(x, axis=1) + + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, + model, + model_url, + use_ssld=False, + use_imagenet22kto1k_pretrained=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain( + model, + model_url, + use_ssld=use_ssld, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def CvT_13_224(pretrained=False, use_ssld=False, **kwargs): + msvit_spec = dict( + INIT='trunc_norm', + NUM_STAGES=3, + PATCH_SIZE=[7, 3, 3], + PATCH_STRIDE=[4, 2, 2], + PATCH_PADDING=[2, 1, 1], + DIM_EMBED=[64, 192, 384], + NUM_HEADS=[1, 3, 6], + DEPTH=[1, 2, 10], + MLP_RATIO=[4.0, 4.0, 4.0], + ATTN_DROP_RATE=[0.0, 0.0, 0.0], + DROP_RATE=[0.0, 0.0, 0.0], + DROP_PATH_RATE=[0.0, 0.0, 0.1], + QKV_BIAS=[True, True, True], + CLS_TOKEN=[False, False, True], + POS_EMBED=[False, False, False], + QKV_PROJ_METHOD=['dw_bn', 'dw_bn', 'dw_bn'], + KERNEL_QKV=[3, 3, 3], + PADDING_KV=[1, 1, 1], + STRIDE_KV=[2, 2, 2], + PADDING_Q=[1, 1, 1], + STRIDE_Q=[1, 1, 1]) + model = ConvolutionalVisionTransformer( + in_chans=3, + act_layer=QuickGELU, + init=msvit_spec.get('INIT', 'trunc_norm'), + spec=msvit_spec, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["CvT_13_224"], use_ssld=use_ssld) + return model + + +def CvT_13_384(pretrained=False, + use_ssld=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + msvit_spec = dict( + INIT='trunc_norm', + NUM_STAGES=3, + PATCH_SIZE=[7, 3, 3], + PATCH_STRIDE=[4, 2, 2], + PATCH_PADDING=[2, 1, 1], + DIM_EMBED=[64, 192, 384], + NUM_HEADS=[1, 3, 6], + DEPTH=[1, 2, 10], + MLP_RATIO=[4.0, 4.0, 4.0], + ATTN_DROP_RATE=[0.0, 0.0, 0.0], + DROP_RATE=[0.0, 0.0, 0.0], + DROP_PATH_RATE=[0.0, 0.0, 0.1], + QKV_BIAS=[True, True, True], + CLS_TOKEN=[False, False, True], + POS_EMBED=[False, False, False], + QKV_PROJ_METHOD=['dw_bn', 'dw_bn', 'dw_bn'], + KERNEL_QKV=[3, 3, 3], + PADDING_KV=[1, 1, 1], + STRIDE_KV=[2, 2, 2], + PADDING_Q=[1, 1, 1], + STRIDE_Q=[1, 1, 1]) + model = ConvolutionalVisionTransformer( + in_chans=3, + act_layer=QuickGELU, + init=msvit_spec.get('INIT', 'trunc_norm'), + spec=msvit_spec, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CvT_13_384"], + use_ssld=use_ssld, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def CvT_21_224(pretrained=False, use_ssld=False, **kwargs): + msvit_spec = dict( + INIT='trunc_norm', + NUM_STAGES=3, + PATCH_SIZE=[7, 3, 3], + PATCH_STRIDE=[4, 2, 2], + PATCH_PADDING=[2, 1, 1], + DIM_EMBED=[64, 192, 384], + NUM_HEADS=[1, 3, 6], + DEPTH=[1, 4, 16], + MLP_RATIO=[4.0, 4.0, 4.0], + ATTN_DROP_RATE=[0.0, 0.0, 0.0], + DROP_RATE=[0.0, 0.0, 0.0], + DROP_PATH_RATE=[0.0, 0.0, 0.1], + QKV_BIAS=[True, True, True], + CLS_TOKEN=[False, False, True], + POS_EMBED=[False, False, False], + QKV_PROJ_METHOD=['dw_bn', 'dw_bn', 'dw_bn'], + KERNEL_QKV=[3, 3, 3], + PADDING_KV=[1, 1, 1], + STRIDE_KV=[2, 2, 2], + PADDING_Q=[1, 1, 1], + STRIDE_Q=[1, 1, 1]) + model = ConvolutionalVisionTransformer( + in_chans=3, + act_layer=QuickGELU, + init=msvit_spec.get('INIT', 'trunc_norm'), + spec=msvit_spec, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["CvT_21_224"], use_ssld=use_ssld) + return model + + +def CvT_21_384(pretrained=False, + use_ssld=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + msvit_spec = dict( + INIT='trunc_norm', + NUM_STAGES=3, + PATCH_SIZE=[7, 3, 3], + PATCH_STRIDE=[4, 2, 2], + PATCH_PADDING=[2, 1, 1], + DIM_EMBED=[64, 192, 384], + NUM_HEADS=[1, 3, 6], + DEPTH=[1, 4, 16], + MLP_RATIO=[4.0, 4.0, 4.0], + ATTN_DROP_RATE=[0.0, 0.0, 0.0], + DROP_RATE=[0.0, 0.0, 0.0], + DROP_PATH_RATE=[0.0, 0.0, 0.1], + QKV_BIAS=[True, True, True], + CLS_TOKEN=[False, False, True], + POS_EMBED=[False, False, False], + QKV_PROJ_METHOD=['dw_bn', 'dw_bn', 'dw_bn'], + KERNEL_QKV=[3, 3, 3], + PADDING_KV=[1, 1, 1], + STRIDE_KV=[2, 2, 2], + PADDING_Q=[1, 1, 1], + STRIDE_Q=[1, 1, 1]) + model = ConvolutionalVisionTransformer( + in_chans=3, + act_layer=QuickGELU, + init=msvit_spec.get('INIT', 'trunc_norm'), + spec=msvit_spec, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CvT_21_384"], + use_ssld=use_ssld, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def CvT_W24_384(pretrained=False, use_ssld=False, **kwargs): + msvit_spec = dict( + INIT='trunc_norm', + NUM_STAGES=3, + PATCH_SIZE=[7, 3, 3], + PATCH_STRIDE=[4, 2, 2], + PATCH_PADDING=[2, 1, 1], + DIM_EMBED=[192, 768, 1024], + NUM_HEADS=[3, 12, 16], + DEPTH=[2, 2, 20], + MLP_RATIO=[4.0, 4.0, 4.0], + ATTN_DROP_RATE=[0.0, 0.0, 0.0], + DROP_RATE=[0.0, 0.0, 0.0], + DROP_PATH_RATE=[0.0, 0.0, 0.3], + QKV_BIAS=[True, True, True], + CLS_TOKEN=[False, False, True], + POS_EMBED=[False, False, False], + QKV_PROJ_METHOD=['dw_bn', 'dw_bn', 'dw_bn'], + KERNEL_QKV=[3, 3, 3], + PADDING_KV=[1, 1, 1], + STRIDE_KV=[2, 2, 2], + PADDING_Q=[1, 1, 1], + STRIDE_Q=[1, 1, 1]) + model = ConvolutionalVisionTransformer( + in_chans=3, + act_layer=QuickGELU, + init=msvit_spec.get('INIT', 'trunc_norm'), + spec=msvit_spec, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["CvT_W24_384"], + use_ssld=use_ssld, + use_imagenet22kto1k_pretrained=True) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/darknet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/darknet.py new file mode 100644 index 000000000..2474c1587 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/darknet.py @@ -0,0 +1,199 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1804.02767 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DarkNet53": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride, + padding, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + weight_attr=ParamAttr(name=name + ".conv.weights"), + bias_attr=False) + + bn_name = name + ".bn" + self._bn = BatchNorm( + num_channels=output_channels, + act="relu", + param_attr=ParamAttr(name=bn_name + ".scale"), + bias_attr=ParamAttr(name=bn_name + ".offset"), + moving_mean_name=bn_name + ".mean", + moving_variance_name=bn_name + ".var") + + def forward(self, inputs): + x = self._conv(inputs) + x = self._bn(x) + return x + + +class BasicBlock(nn.Layer): + def __init__(self, input_channels, output_channels, name=None): + super(BasicBlock, self).__init__() + + self._conv1 = ConvBNLayer( + input_channels, output_channels, 1, 1, 0, name=name + ".0") + self._conv2 = ConvBNLayer( + output_channels, output_channels * 2, 3, 1, 1, name=name + ".1") + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + return paddle.add(x=inputs, y=x) + + +class DarkNet(nn.Layer): + def __init__(self, class_num=1000): + super(DarkNet, self).__init__() + + self.stages = [1, 2, 8, 8, 4] + self._conv1 = ConvBNLayer(3, 32, 3, 1, 1, name="yolo_input") + self._conv2 = ConvBNLayer( + 32, 64, 3, 2, 1, name="yolo_input.downsample") + + self._basic_block_01 = BasicBlock(64, 32, name="stage.0.0") + self._downsample_0 = ConvBNLayer( + 64, 128, 3, 2, 1, name="stage.0.downsample") + + self._basic_block_11 = BasicBlock(128, 64, name="stage.1.0") + self._basic_block_12 = BasicBlock(128, 64, name="stage.1.1") + self._downsample_1 = ConvBNLayer( + 128, 256, 3, 2, 1, name="stage.1.downsample") + + self._basic_block_21 = BasicBlock(256, 128, name="stage.2.0") + self._basic_block_22 = BasicBlock(256, 128, name="stage.2.1") + self._basic_block_23 = BasicBlock(256, 128, name="stage.2.2") + self._basic_block_24 = BasicBlock(256, 128, name="stage.2.3") + self._basic_block_25 = BasicBlock(256, 128, name="stage.2.4") + self._basic_block_26 = BasicBlock(256, 128, name="stage.2.5") + self._basic_block_27 = BasicBlock(256, 128, name="stage.2.6") + self._basic_block_28 = BasicBlock(256, 128, name="stage.2.7") + self._downsample_2 = ConvBNLayer( + 256, 512, 3, 2, 1, name="stage.2.downsample") + + self._basic_block_31 = BasicBlock(512, 256, name="stage.3.0") + self._basic_block_32 = BasicBlock(512, 256, name="stage.3.1") + self._basic_block_33 = BasicBlock(512, 256, name="stage.3.2") + self._basic_block_34 = BasicBlock(512, 256, name="stage.3.3") + self._basic_block_35 = BasicBlock(512, 256, name="stage.3.4") + self._basic_block_36 = BasicBlock(512, 256, name="stage.3.5") + self._basic_block_37 = BasicBlock(512, 256, name="stage.3.6") + self._basic_block_38 = BasicBlock(512, 256, name="stage.3.7") + self._downsample_3 = ConvBNLayer( + 512, 1024, 3, 2, 1, name="stage.3.downsample") + + self._basic_block_41 = BasicBlock(1024, 512, name="stage.4.0") + self._basic_block_42 = BasicBlock(1024, 512, name="stage.4.1") + self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2") + self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3") + + self._pool = AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(1024.0) + self._out = Linear( + 1024, + class_num, + weight_attr=ParamAttr( + name="fc_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + + x = self._basic_block_01(x) + x = self._downsample_0(x) + + x = self._basic_block_11(x) + x = self._basic_block_12(x) + x = self._downsample_1(x) + + x = self._basic_block_21(x) + x = self._basic_block_22(x) + x = self._basic_block_23(x) + x = self._basic_block_24(x) + x = self._basic_block_25(x) + x = self._basic_block_26(x) + x = self._basic_block_27(x) + x = self._basic_block_28(x) + x = self._downsample_2(x) + + x = self._basic_block_31(x) + x = self._basic_block_32(x) + x = self._basic_block_33(x) + x = self._basic_block_34(x) + x = self._basic_block_35(x) + x = self._basic_block_36(x) + x = self._basic_block_37(x) + x = self._basic_block_38(x) + x = self._downsample_3(x) + + x = self._basic_block_41(x) + x = self._basic_block_42(x) + x = self._basic_block_43(x) + x = self._basic_block_44(x) + + x = self._pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DarkNet53(pretrained=False, use_ssld=False, **kwargs): + model = DarkNet(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DarkNet53"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/densenet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/densenet.py new file mode 100644 index 000000000..314579a88 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/densenet.py @@ -0,0 +1,346 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1608.06993 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DenseNet121": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams", + "DenseNet161": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams", + "DenseNet169": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams", + "DenseNet201": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams", + "DenseNet264": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class BNACConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(BNACConvLayer, self).__init__() + + self._batch_norm = BatchNorm( + num_channels, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + def forward(self, input): + y = self._batch_norm(input) + y = self._conv(y) + return y + + +class DenseLayer(nn.Layer): + def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None): + super(DenseLayer, self).__init__() + self.dropout = dropout + + self.bn_ac_func1 = BNACConvLayer( + num_channels=num_channels, + num_filters=bn_size * growth_rate, + filter_size=1, + pad=0, + stride=1, + name=name + "_x1") + + self.bn_ac_func2 = BNACConvLayer( + num_channels=bn_size * growth_rate, + num_filters=growth_rate, + filter_size=3, + pad=1, + stride=1, + name=name + "_x2") + + if dropout: + self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer") + + def forward(self, input): + conv = self.bn_ac_func1(input) + conv = self.bn_ac_func2(conv) + if self.dropout: + conv = self.dropout_func(conv) + conv = paddle.concat([input, conv], axis=1) + return conv + + +class DenseBlock(nn.Layer): + def __init__(self, + num_channels, + num_layers, + bn_size, + growth_rate, + dropout, + name=None): + super(DenseBlock, self).__init__() + self.dropout = dropout + + self.dense_layer_func = [] + + pre_channel = num_channels + for layer in range(num_layers): + self.dense_layer_func.append( + self.add_sublayer( + "{}_{}".format(name, layer + 1), + DenseLayer( + num_channels=pre_channel, + growth_rate=growth_rate, + bn_size=bn_size, + dropout=dropout, + name=name + '_' + str(layer + 1)))) + pre_channel = pre_channel + growth_rate + + def forward(self, input): + conv = input + for func in self.dense_layer_func: + conv = func(conv) + return conv + + +class TransitionLayer(nn.Layer): + def __init__(self, num_channels, num_output_features, name=None): + super(TransitionLayer, self).__init__() + + self.conv_ac_func = BNACConvLayer( + num_channels=num_channels, + num_filters=num_output_features, + filter_size=1, + pad=0, + stride=1, + name=name) + + self.pool2d_avg = AvgPool2D(kernel_size=2, stride=2, padding=0) + + def forward(self, input): + y = self.conv_ac_func(input) + y = self.pool2d_avg(y) + return y + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + def forward(self, input): + y = self._conv(input) + y = self._batch_norm(y) + return y + + +class DenseNet(nn.Layer): + def __init__(self, layers=60, bn_size=4, dropout=0, class_num=1000): + super(DenseNet, self).__init__() + + supported_layers = [121, 161, 169, 201, 264] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + densenet_spec = { + 121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32]), + 264: (64, 32, [6, 12, 64, 48]) + } + num_init_features, growth_rate, block_config = densenet_spec[layers] + + self.conv1_func = ConvBNLayer( + num_channels=3, + num_filters=num_init_features, + filter_size=7, + stride=2, + pad=3, + act='relu', + name="conv1") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_config = block_config + + self.dense_block_func_list = [] + self.transition_func_list = [] + pre_num_channels = num_init_features + num_features = num_init_features + for i, num_layers in enumerate(block_config): + self.dense_block_func_list.append( + self.add_sublayer( + "db_conv_{}".format(i + 2), + DenseBlock( + num_channels=pre_num_channels, + num_layers=num_layers, + bn_size=bn_size, + growth_rate=growth_rate, + dropout=dropout, + name='conv' + str(i + 2)))) + + num_features = num_features + num_layers * growth_rate + pre_num_channels = num_features + + if i != len(block_config) - 1: + self.transition_func_list.append( + self.add_sublayer( + "tr_conv{}_blk".format(i + 2), + TransitionLayer( + num_channels=pre_num_channels, + num_output_features=num_features // 2, + name='conv' + str(i + 2) + "_blk"))) + pre_num_channels = num_features // 2 + num_features = num_features // 2 + + self.batch_norm = BatchNorm( + num_features, + act="relu", + param_attr=ParamAttr(name='conv5_blk_bn_scale'), + bias_attr=ParamAttr(name='conv5_blk_bn_offset'), + moving_mean_name='conv5_blk_bn_mean', + moving_variance_name='conv5_blk_bn_variance') + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + stdv = 1.0 / math.sqrt(num_features * 1.0) + + self.out = Linear( + num_features, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, input): + conv = self.conv1_func(input) + conv = self.pool2d_max(conv) + + for i, num_layers in enumerate(self.block_config): + conv = self.dense_block_func_list[i](conv) + if i != len(self.block_config) - 1: + conv = self.transition_func_list[i](conv) + + conv = self.batch_norm(conv) + y = self.pool2d_avg(conv) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DenseNet121(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=121, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet121"], use_ssld=use_ssld) + return model + + +def DenseNet161(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=161, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet161"], use_ssld=use_ssld) + return model + + +def DenseNet169(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=169, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet169"], use_ssld=use_ssld) + return model + + +def DenseNet201(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=201, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet201"], use_ssld=use_ssld) + return model + + +def DenseNet264(pretrained=False, use_ssld=False, **kwargs): + model = DenseNet(layers=264, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DenseNet264"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/distilled_vision_transformer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/distilled_vision_transformer.py new file mode 100644 index 000000000..1805b1d03 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/distilled_vision_transformer.py @@ -0,0 +1,273 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/facebookresearch/deit +# reference: https://arxiv.org/abs/2012.12877 + +import paddle +import paddle.nn as nn +from .vision_transformer import VisionTransformer, Identity, trunc_normal_, zeros_ + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DeiT_tiny_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams", + "DeiT_small_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams", + "DeiT_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams", + "DeiT_tiny_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_distilled_patch16_224_pretrained.pdparams", + "DeiT_small_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_distilled_patch16_224_pretrained.pdparams", + "DeiT_base_distilled_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_224_pretrained.pdparams", + "DeiT_base_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams", + "DeiT_base_distilled_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class DistilledVisionTransformer(VisionTransformer): + def __init__(self, + img_size=224, + patch_size=16, + class_num=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + norm_layer='nn.LayerNorm', + epsilon=1e-5, + **kwargs): + super().__init__( + img_size=img_size, + patch_size=patch_size, + class_num=class_num, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + epsilon=epsilon, + **kwargs) + self.pos_embed = self.create_parameter( + shape=(1, self.patch_embed.num_patches + 2, self.embed_dim), + default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + + self.dist_token = self.create_parameter( + shape=(1, 1, self.embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.head_dist = nn.Linear( + self.embed_dim, + self.class_num) if self.class_num > 0 else Identity() + + trunc_normal_(self.dist_token) + trunc_normal_(self.pos_embed) + self.head_dist.apply(self._init_weights) + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand((B, -1, -1)).astype(x.dtype) + dist_token = self.dist_token.expand((B, -1, -1)).astype(x.dtype) + x = paddle.concat((cls_tokens, dist_token, x), axis=1) + + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + return x[:, 0], x[:, 1] + + def forward(self, x): + x, x_dist = self.forward_features(x) + x = self.head(x) + x_dist = self.head_dist(x_dist) + return (x + x_dist) / 2 + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DeiT_tiny_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_tiny_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_small_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_tiny_distilled_patch16_224(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_tiny_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_small_distilled_patch16_224(pretrained=False, + use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_small_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_distilled_patch16_224(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_distilled_patch16_224"], + use_ssld=use_ssld) + return model + + +def DeiT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_patch16_384"], + use_ssld=use_ssld) + return model + + +def DeiT_base_distilled_patch16_384(pretrained=False, use_ssld=False, + **kwargs): + model = DistilledVisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["DeiT_base_distilled_patch16_384"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dla.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dla.py new file mode 100644 index 000000000..03a73bcfb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dla.py @@ -0,0 +1,529 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/ucbdrive/dla +# reference: https://arxiv.org/abs/1707.06484 + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn.initializer import Normal, Constant + +from ..base.theseus_layer import Identity +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DLA34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams", + "DLA46_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams", + "DLA46x_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams", + "DLA60": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams", + "DLA60x": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams", + "DLA60x_c": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams", + "DLA102": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams", + "DLA102x": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams", + "DLA102x2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams", + "DLA169": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class DlaBasic(nn.Layer): + def __init__(self, inplanes, planes, stride=1, dilation=1, **cargs): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2D( + inplanes, + planes, + kernel_size=3, + stride=stride, + padding=dilation, + bias_attr=False, + dilation=dilation) + self.bn1 = nn.BatchNorm2D(planes) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2D( + planes, + planes, + kernel_size=3, + stride=1, + padding=dilation, + bias_attr=False, + dilation=dilation) + self.bn2 = nn.BatchNorm2D(planes) + self.stride = stride + + def forward(self, x, residual=None): + if residual is None: + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += residual + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Layer): + expansion = 2 + + def __init__(self, + inplanes, + outplanes, + stride=1, + dilation=1, + cardinality=1, + base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int( + math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2D( + inplanes, mid_planes, kernel_size=1, bias_attr=False) + self.bn1 = nn.BatchNorm2D(mid_planes) + self.conv2 = nn.Conv2D( + mid_planes, + mid_planes, + kernel_size=3, + stride=stride, + padding=dilation, + bias_attr=False, + dilation=dilation, + groups=cardinality) + self.bn2 = nn.BatchNorm2D(mid_planes) + self.conv3 = nn.Conv2D( + mid_planes, outplanes, kernel_size=1, bias_attr=False) + self.bn3 = nn.BatchNorm2D(outplanes) + self.relu = nn.ReLU() + + def forward(self, x, residual=None): + if residual is None: + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += residual + out = self.relu(out) + + return out + + +class DlaRoot(nn.Layer): + def __init__(self, in_channels, out_channels, kernel_size, residual): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2D( + in_channels, + out_channels, + 1, + stride=1, + bias_attr=False, + padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2D(out_channels) + self.relu = nn.ReLU() + self.residual = residual + + def forward(self, *x): + children = x + x = self.conv(paddle.concat(x, 1)) + x = self.bn(x) + if self.residual: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Layer): + def __init__(self, + levels, + block, + in_channels, + out_channels, + stride=1, + dilation=1, + cardinality=1, + base_width=64, + level_root=False, + root_dim=0, + root_kernel_size=1, + root_residual=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + + self.downsample = nn.MaxPool2D( + stride, stride=stride) if stride > 1 else Identity() + self.project = Identity() + cargs = dict( + dilation=dilation, cardinality=cardinality, base_width=base_width) + + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + self.project = nn.Sequential( + nn.Conv2D( + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias_attr=False), + nn.BatchNorm2D(out_channels)) + else: + cargs.update( + dict( + root_kernel_size=root_kernel_size, + root_residual=root_residual)) + self.tree1 = DlaTree( + levels - 1, + block, + in_channels, + out_channels, + stride, + root_dim=0, + **cargs) + self.tree2 = DlaTree( + levels - 1, + block, + out_channels, + out_channels, + root_dim=root_dim + out_channels, + **cargs) + + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, + root_residual) + + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, residual=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + residual = self.project(bottom) + + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, residual) + + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Layer): + def __init__(self, + levels, + channels, + in_chans=3, + cardinality=1, + base_width=64, + block=DlaBottleneck, + residual_root=False, + drop_rate=0.0, + class_num=1000, + with_pool=True): + super(DLA, self).__init__() + self.channels = channels + self.class_num = class_num + self.with_pool = with_pool + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + + self.base_layer = nn.Sequential( + nn.Conv2D( + in_chans, + channels[0], + kernel_size=7, + stride=1, + padding=3, + bias_attr=False), + nn.BatchNorm2D(channels[0]), + nn.ReLU()) + + self.level0 = self._make_conv_level(channels[0], channels[0], + levels[0]) + self.level1 = self._make_conv_level( + channels[0], channels[1], levels[1], stride=2) + + cargs = dict( + cardinality=cardinality, + base_width=base_width, + root_residual=residual_root) + + self.level2 = DlaTree( + levels[2], + block, + channels[1], + channels[2], + 2, + level_root=False, + **cargs) + self.level3 = DlaTree( + levels[3], + block, + channels[2], + channels[3], + 2, + level_root=True, + **cargs) + self.level4 = DlaTree( + levels[4], + block, + channels[3], + channels[4], + 2, + level_root=True, + **cargs) + self.level5 = DlaTree( + levels[5], + block, + channels[4], + channels[5], + 2, + level_root=True, + **cargs) + + self.feature_info = [ + # rare to have a meaningful stride 1 level + dict( + num_chs=channels[0], reduction=1, module='level0'), + dict( + num_chs=channels[1], reduction=2, module='level1'), + dict( + num_chs=channels[2], reduction=4, module='level2'), + dict( + num_chs=channels[3], reduction=8, module='level3'), + dict( + num_chs=channels[4], reduction=16, module='level4'), + dict( + num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + + if with_pool: + self.global_pool = nn.AdaptiveAvgPool2D(1) + + if class_num > 0: + self.fc = nn.Conv2D(self.num_features, class_num, 1) + + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + normal_ = Normal(mean=0.0, std=math.sqrt(2. / n)) + normal_(m.weight) + elif isinstance(m, nn.BatchNorm2D): + ones_(m.weight) + zeros_(m.bias) + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2D( + inplanes, + planes, + kernel_size=3, + stride=stride if i == 0 else 1, + padding=dilation, + bias_attr=False, + dilation=dilation), nn.BatchNorm2D(planes), nn.ReLU() + ]) + inplanes = planes + return nn.Sequential(*modules) + + def forward_features(self, x): + x = self.base_layer(x) + + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + + return x + + def forward(self, x): + x = self.forward_features(x) + + if self.with_pool: + x = self.global_pool(x) + + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + + if self.class_num > 0: + x = self.fc(x) + x = x.flatten(1) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DLA34(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 128, 256, 512), + block=DlaBasic, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA34"]) + return model + + +def DLA46_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA46_c"]) + return model + + +def DLA46x_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 2, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA46x_c"]) + return model + + +def DLA60(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60"]) + return model + + +def DLA60x(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60x"]) + return model + + +def DLA60x_c(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 2, 3, 1), + channels=(16, 32, 64, 64, 128, 256), + block=DlaBottleneck, + cardinality=32, + base_width=4, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA60x_c"]) + return model + + +def DLA102(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102"]) + return model + + +def DLA102x(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=32, + base_width=4, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102x"]) + return model + + +def DLA102x2(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 1, 3, 4, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + cardinality=64, + base_width=4, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA102x2"]) + return model + + +def DLA169(pretrained=False, **kwargs): + model = DLA(levels=(1, 1, 2, 3, 5, 1), + channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottleneck, + residual_root=True, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DLA169"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dpn.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dpn.py new file mode 100644 index 000000000..ef322dea9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dpn.py @@ -0,0 +1,453 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1707.01629 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import sys +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DPN68": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams", + "DPN92": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams", + "DPN98": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams", + "DPN107": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams", + "DPN131": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + def forward(self, input): + y = self._conv(input) + y = self._batch_norm(y) + return y + + +class BNACConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + name=None): + super(BNACConvLayer, self).__init__() + self.num_channels = num_channels + + self._batch_norm = BatchNorm( + num_channels, + act=act, + param_attr=ParamAttr(name=name + '_bn_scale'), + bias_attr=ParamAttr(name + '_bn_offset'), + moving_mean_name=name + '_bn_mean', + moving_variance_name=name + '_bn_variance') + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + def forward(self, input): + y = self._batch_norm(input) + y = self._conv(y) + return y + + +class DualPathFactory(nn.Layer): + def __init__(self, + num_channels, + num_1x1_a, + num_3x3_b, + num_1x1_c, + inc, + G, + _type='normal', + name=None): + super(DualPathFactory, self).__init__() + + self.num_1x1_c = num_1x1_c + self.inc = inc + self.name = name + + kw = 3 + kh = 3 + pw = (kw - 1) // 2 + ph = (kh - 1) // 2 + + # type + if _type == 'proj': + key_stride = 1 + self.has_proj = True + elif _type == 'down': + key_stride = 2 + self.has_proj = True + elif _type == 'normal': + key_stride = 1 + self.has_proj = False + else: + print("not implemented now!!!") + sys.exit(1) + + data_in_ch = sum(num_channels) if isinstance(num_channels, + list) else num_channels + + if self.has_proj: + self.c1x1_w_func = BNACConvLayer( + num_channels=data_in_ch, + num_filters=num_1x1_c + 2 * inc, + filter_size=(1, 1), + pad=(0, 0), + stride=(key_stride, key_stride), + name=name + "_match") + + self.c1x1_a_func = BNACConvLayer( + num_channels=data_in_ch, + num_filters=num_1x1_a, + filter_size=(1, 1), + pad=(0, 0), + name=name + "_conv1") + + self.c3x3_b_func = BNACConvLayer( + num_channels=num_1x1_a, + num_filters=num_3x3_b, + filter_size=(kw, kh), + pad=(pw, ph), + stride=(key_stride, key_stride), + groups=G, + name=name + "_conv2") + + self.c1x1_c_func = BNACConvLayer( + num_channels=num_3x3_b, + num_filters=num_1x1_c + inc, + filter_size=(1, 1), + pad=(0, 0), + name=name + "_conv3") + + def forward(self, input): + # PROJ + if isinstance(input, list): + data_in = paddle.concat([input[0], input[1]], axis=1) + else: + data_in = input + + if self.has_proj: + c1x1_w = self.c1x1_w_func(data_in) + data_o1, data_o2 = paddle.split( + c1x1_w, num_or_sections=[self.num_1x1_c, 2 * self.inc], axis=1) + else: + data_o1 = input[0] + data_o2 = input[1] + + c1x1_a = self.c1x1_a_func(data_in) + c3x3_b = self.c3x3_b_func(c1x1_a) + c1x1_c = self.c1x1_c_func(c3x3_b) + + c1x1_c1, c1x1_c2 = paddle.split( + c1x1_c, num_or_sections=[self.num_1x1_c, self.inc], axis=1) + + # OUTPUTS + summ = paddle.add(x=data_o1, y=c1x1_c1) + dense = paddle.concat([data_o2, c1x1_c2], axis=1) + # tensor, channels + return [summ, dense] + + +class DPN(nn.Layer): + def __init__(self, layers=68, class_num=1000): + super(DPN, self).__init__() + + self._class_num = class_num + + args = self.get_net_args(layers) + bws = args['bw'] + inc_sec = args['inc_sec'] + rs = args['r'] + k_r = args['k_r'] + k_sec = args['k_sec'] + G = args['G'] + init_num_filter = args['init_num_filter'] + init_filter_size = args['init_filter_size'] + init_padding = args['init_padding'] + + self.k_sec = k_sec + + self.conv1_x_1_func = ConvBNLayer( + num_channels=3, + num_filters=init_num_filter, + filter_size=init_filter_size, + stride=2, + pad=init_padding, + act='relu', + name="conv1") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + num_channel_dpn = init_num_filter + + self.dpn_func_list = [] + #conv2 - conv5 + match_list, num = [], 0 + for gc in range(4): + bw = bws[gc] + inc = inc_sec[gc] + R = (k_r * bw) // rs[gc] + if gc == 0: + _type1 = 'proj' + _type2 = 'normal' + match = 1 + else: + _type1 = 'down' + _type2 = 'normal' + match = match + k_sec[gc - 1] + match_list.append(match) + self.dpn_func_list.append( + self.add_sublayer( + "dpn{}".format(match), + DualPathFactory( + num_channels=num_channel_dpn, + num_1x1_a=R, + num_3x3_b=R, + num_1x1_c=bw, + inc=inc, + G=G, + _type=_type1, + name="dpn" + str(match)))) + num_channel_dpn = [bw, 3 * inc] + + for i_ly in range(2, k_sec[gc] + 1): + num += 1 + if num in match_list: + num += 1 + self.dpn_func_list.append( + self.add_sublayer( + "dpn{}".format(num), + DualPathFactory( + num_channels=num_channel_dpn, + num_1x1_a=R, + num_3x3_b=R, + num_1x1_c=bw, + inc=inc, + G=G, + _type=_type2, + name="dpn" + str(num)))) + + num_channel_dpn = [ + num_channel_dpn[0], num_channel_dpn[1] + inc + ] + + out_channel = sum(num_channel_dpn) + + self.conv5_x_x_bn = BatchNorm( + num_channels=sum(num_channel_dpn), + act="relu", + param_attr=ParamAttr(name='final_concat_bn_scale'), + bias_attr=ParamAttr('final_concat_bn_offset'), + moving_mean_name='final_concat_bn_mean', + moving_variance_name='final_concat_bn_variance') + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + stdv = 0.01 + + self.out = Linear( + out_channel, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, input): + conv1_x_1 = self.conv1_x_1_func(input) + convX_x_x = self.pool2d_max(conv1_x_1) + + dpn_idx = 0 + for gc in range(4): + convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x) + dpn_idx += 1 + for i_ly in range(2, self.k_sec[gc] + 1): + convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x) + dpn_idx += 1 + + conv5_x_x = paddle.concat(convX_x_x, axis=1) + conv5_x_x = self.conv5_x_x_bn(conv5_x_x) + + y = self.pool2d_avg(conv5_x_x) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + def get_net_args(self, layers): + if layers == 68: + k_r = 128 + G = 32 + k_sec = [3, 4, 12, 3] + inc_sec = [16, 32, 32, 64] + bw = [64, 128, 256, 512] + r = [64, 64, 64, 64] + init_num_filter = 10 + init_filter_size = 3 + init_padding = 1 + elif layers == 92: + k_r = 96 + G = 32 + k_sec = [3, 4, 20, 3] + inc_sec = [16, 32, 24, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 64 + init_filter_size = 7 + init_padding = 3 + elif layers == 98: + k_r = 160 + G = 40 + k_sec = [3, 6, 20, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 96 + init_filter_size = 7 + init_padding = 3 + elif layers == 107: + k_r = 200 + G = 50 + k_sec = [4, 8, 20, 3] + inc_sec = [20, 64, 64, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + elif layers == 131: + k_r = 160 + G = 40 + k_sec = [4, 8, 28, 3] + inc_sec = [16, 32, 32, 128] + bw = [256, 512, 1024, 2048] + r = [256, 256, 256, 256] + init_num_filter = 128 + init_filter_size = 7 + init_padding = 3 + else: + raise NotImplementedError + net_arg = { + 'k_r': k_r, + 'G': G, + 'k_sec': k_sec, + 'inc_sec': inc_sec, + 'bw': bw, + 'r': r + } + net_arg['init_num_filter'] = init_num_filter + net_arg['init_filter_size'] = init_filter_size + net_arg['init_padding'] = init_padding + + return net_arg + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DPN68(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=68, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN68"]) + return model + + +def DPN92(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=92, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN92"]) + return model + + +def DPN98(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=98, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN98"]) + return model + + +def DPN107(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=107, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN107"]) + return model + + +def DPN131(pretrained=False, use_ssld=False, **kwargs): + model = DPN(layers=131, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["DPN131"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dsnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dsnet.py new file mode 100644 index 000000000..20b28c2b0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/dsnet.py @@ -0,0 +1,701 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2105.14734v4 + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from .vision_transformer import to_2tuple, zeros_, ones_, VisionTransformer, Identity, zeros_ +from functools import partial +from paddle.nn.initializer import TruncatedNormal, Constant, Normal + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "DSNet_tiny": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DSNet_tiny_pretrained.pdparams", + "DSNet_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DSNet_small_pretrained.pdparams", + "DSNet_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DSNet_base_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2D(in_features, hidden_features, 1) + self.act = act_layer() + self.fc2 = nn.Conv2D(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class DWConv(nn.Layer): + def __init__(self, dim=768): + super(DWConv, self).__init__() + self.dwconv = nn.Conv2D(dim, dim, 3, 1, 1, bias=True, groups=dim) + + def forward(self, x): + x = self.dwconv(x) + return x + + +class DWConvMlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2D(in_features, hidden_features, 1) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Conv2D(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.dwconv(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + C = int(C // 3) + qkv = x.reshape( + (B, N, 3, self.num_heads, C // self.num_heads)).transpose( + (2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((B, N, C)) + x = self.proj_drop(x) + return x + + +class Cross_Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, tokens_q, memory_k, memory_v, shape=None): + assert shape is not None + attn = (tokens_q.matmul(memory_k.transpose((0, 1, 3, 2)))) * self.scale + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(memory_v)).transpose((0, 2, 1, 3)).reshape( + (shape[0], shape[1], shape[2])) + x = self.proj_drop(x) + return x + + +class MixBlock(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + downsample=2, + conv_ffn=False): + super().__init__() + self.pos_embed = nn.Conv2D(dim, dim, 3, padding=1, groups=dim) + self.dim = dim + self.norm1 = nn.BatchNorm2D(dim) + self.conv1 = nn.Conv2D(dim, dim, 1) + self.conv2 = nn.Conv2D(dim, dim, 1) + self.dim_conv = int(dim * 0.5) + self.dim_sa = dim - self.dim_conv + self.norm_conv1 = nn.BatchNorm2D(self.dim_conv) + self.norm_sa1 = nn.LayerNorm(self.dim_sa) + self.conv = nn.Conv2D( + self.dim_conv, self.dim_conv, 3, padding=1, groups=self.dim_conv) + self.channel_up = nn.Linear(self.dim_sa, 3 * self.dim_sa) + self.cross_channel_up_conv = nn.Conv2D(self.dim_conv, + 3 * self.dim_conv, 1) + self.cross_channel_up_sa = nn.Linear(self.dim_sa, 3 * self.dim_sa) + self.fuse_channel_conv = nn.Linear(self.dim_conv, self.dim_conv) + self.fuse_channel_sa = nn.Linear(self.dim_sa, self.dim_sa) + self.num_heads = num_heads + self.attn = Attention( + self.dim_sa, + num_heads=self.num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=0.1, + proj_drop=drop) + self.cross_attn = Cross_Attention( + self.dim_sa, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=0.1, + proj_drop=drop) + self.norm_conv2 = nn.BatchNorm2D(self.dim_conv) + self.norm_sa2 = nn.LayerNorm(self.dim_sa) + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = nn.BatchNorm2D(dim) + self.downsample = downsample + mlp_hidden_dim = int(dim * mlp_ratio) + if conv_ffn: + self.mlp = DWConvMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + else: + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.pos_embed(x) + _, _, H, W = x.shape + residual = x + x = self.norm1(x) + x = self.conv1(x) + + qkv = x[:, :self.dim_sa, :] + conv = x[:, self.dim_sa:, :, :] + residual_conv = conv + conv = residual_conv + self.conv(self.norm_conv1(conv)) + + sa = F.interpolate( + qkv, + size=(H // self.downsample, W // self.downsample), + mode='bilinear') + B, _, H_down, W_down = sa.shape + sa = sa.flatten(2).transpose([0, 2, 1]) + residual_sa = sa + sa = self.norm_sa1(sa) + sa = self.channel_up(sa) + sa = residual_sa + self.attn(sa) + + # cross attention + residual_conv_co = conv + residual_sa_co = sa + conv_qkv = self.cross_channel_up_conv(self.norm_conv2(conv)) + conv_qkv = conv_qkv.flatten(2).transpose([0, 2, 1]) + + sa_qkv = self.cross_channel_up_sa(self.norm_sa2(sa)) + + B_conv, N_conv, C_conv = conv_qkv.shape + C_conv = int(C_conv // 3) + conv_qkv = conv_qkv.reshape((B_conv, N_conv, 3, self.num_heads, + C_conv // self.num_heads)).transpose( + (2, 0, 3, 1, 4)) + conv_q, conv_k, conv_v = conv_qkv[0], conv_qkv[1], conv_qkv[2] + + B_sa, N_sa, C_sa = sa_qkv.shape + C_sa = int(C_sa // 3) + sa_qkv = sa_qkv.reshape( + (B_sa, N_sa, 3, self.num_heads, C_sa // self.num_heads)).transpose( + (2, 0, 3, 1, 4)) + sa_q, sa_k, sa_v = sa_qkv[0], sa_qkv[1], sa_qkv[2] + + # sa -> conv + conv = self.cross_attn( + conv_q, sa_k, sa_v, shape=(B_conv, N_conv, C_conv)) + conv = self.fuse_channel_conv(conv) + conv = conv.reshape((B, H, W, C_conv)).transpose((0, 3, 1, 2)) + conv = residual_conv_co + conv + + # conv -> sa + sa = self.cross_attn(sa_q, conv_k, conv_v, shape=(B_sa, N_sa, C_sa)) + sa = residual_sa_co + self.fuse_channel_sa(sa) + sa = sa.reshape((B, H_down, W_down, C_sa)).transpose((0, 3, 1, 2)) + sa = F.interpolate(sa, size=(H, W), mode='bilinear') + x = paddle.concat([conv, sa], axis=1) + x = residual + self.drop_path(self.conv2(x)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // + patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + return x + + +class OverlapPatchEmbed(nn.Layer): + """ Image to Overlapping Patch Embedding + """ + + def __init__(self, + img_size=224, + patch_size=7, + stride=4, + in_chans=3, + embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[ + 1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + return x + + +class MixVisionTransformer(nn.Layer): + """ Mixed Vision Transformer for DSNet + A PaddlePaddle impl of : `Dual-stream Network for Visual Recognition` - https://arxiv.org/abs/2105.14734v4 + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=[64, 128, 320, 512], + depth=[2, 2, 4, 1], + num_heads=[1, 2, 5, 8], + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + representation_size=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=None, + overlap_embed=False, + conv_ffn=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + class_num (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Layer): normalization layer + overlap_embed (bool): enable overlapped patch embedding if True + conv_ffn (bool): enable depthwise convolution for mlp if True + """ + super().__init__() + self.class_num = class_num + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + downsamples = [8, 4, 2, 2] + if overlap_embed: + self.patch_embed1 = OverlapPatchEmbed( + img_size=img_size, + patch_size=7, + stride=4, + in_chans=in_chans, + embed_dim=embed_dim[0]) + self.patch_embed2 = OverlapPatchEmbed( + img_size=img_size // 4, + patch_size=3, + stride=2, + in_chans=embed_dim[0], + embed_dim=embed_dim[1]) + self.patch_embed3 = OverlapPatchEmbed( + img_size=img_size // 8, + patch_size=3, + stride=2, + in_chans=embed_dim[1], + embed_dim=embed_dim[2]) + self.patch_embed4 = OverlapPatchEmbed( + img_size=img_size // 16, + patch_size=3, + stride=2, + in_chans=embed_dim[2], + embed_dim=embed_dim[3]) + else: + self.patch_embed1 = PatchEmbed( + img_size=img_size, + patch_size=4, + in_chans=in_chans, + embed_dim=embed_dim[0]) + self.patch_embed2 = PatchEmbed( + img_size=img_size // 4, + patch_size=2, + in_chans=embed_dim[0], + embed_dim=embed_dim[1]) + self.patch_embed3 = PatchEmbed( + img_size=img_size // 8, + patch_size=2, + in_chans=embed_dim[1], + embed_dim=embed_dim[2]) + self.patch_embed4 = PatchEmbed( + img_size=img_size // 16, + patch_size=2, + in_chans=embed_dim[2], + embed_dim=embed_dim[3]) + + self.pos_drop = nn.Dropout(p=drop_rate) + self.mixture = False + dpr = [ + x.item() for x in paddle.linspace(0, drop_path_rate, sum(depth)) + ] + self.blocks1 = nn.LayerList([ + MixBlock( + dim=embed_dim[0], + num_heads=num_heads[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + downsample=downsamples[0], + conv_ffn=conv_ffn) for i in range(depth[0]) + ]) + + self.blocks2 = nn.LayerList([ + MixBlock( + dim=embed_dim[1], + num_heads=num_heads[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + downsample=downsamples[1], + conv_ffn=conv_ffn) for i in range(depth[1]) + ]) + + self.blocks3 = nn.LayerList([ + MixBlock( + dim=embed_dim[2], + num_heads=num_heads[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + downsample=downsamples[2], + conv_ffn=conv_ffn) for i in range(depth[2]) + ]) + + if self.mixture: + self.blocks4 = nn.LayerList([ + Block( + dim=embed_dim[3], + num_heads=16, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + downsample=downsamples[3], + conv_ffn=conv_ffn) for i in range(depth[3]) + ]) + self.norm = norm_layer(embed_dim[-1]) + else: + self.blocks4 = nn.LayerList([ + MixBlock( + dim=embed_dim[3], + num_heads=num_heads[3], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + downsample=downsamples[3], + conv_ffn=conv_ffn) for i in range(depth[3]) + ]) + self.norm = nn.BatchNorm2D(embed_dim[-1]) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential( + OrderedDict([('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh())])) + else: + self.pre_logits = Identity() + + # Classifier head + self.head = nn.Linear(embed_dim[-1], + class_num) if class_num > 0 else Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + TruncatedNormal(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def get_classifier(self): + return self.head + + def reset_classifier(self, class_num, global_pool=''): + self.class_num = class_num + self.head = nn.Linear(self.embed_dim, + class_num) if class_num > 0 else Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed1(x) + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x) + x = self.patch_embed2(x) + for blk in self.blocks2: + x = blk(x) + x = self.patch_embed3(x) + for blk in self.blocks3: + x = blk(x) + x = self.patch_embed4(x) + if self.mixture: + x = x.flatten(2).transpose([0, 2, 1]) + for blk in self.blocks4: + x = blk(x) + x = self.norm(x) + x = self.pre_logits(x) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.mixture: + x = x.mean(1) + else: + x = x.flatten(2).mean(-1) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def DSNet_tiny(pretrained=False, use_ssld=False, **kwargs): + model = MixVisionTransformer( + patch_size=16, + depth=[2, 2, 4, 1], + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, eps=1e-6), + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DSNet_tiny"], use_ssld=use_ssld) + return model + + +def DSNet_small(pretrained=False, use_ssld=False, **kwargs): + model = MixVisionTransformer( + patch_size=16, + depth=[3, 4, 8, 3], + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, eps=1e-6), + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DSNet_small"], use_ssld=use_ssld) + return model + + +def DSNet_base(pretrained=False, use_ssld=False, **kwargs): + model = MixVisionTransformer( + patch_size=16, + depth=[3, 4, 28, 3], + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, eps=1e-6), + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["DSNet_base"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet.py new file mode 100644 index 000000000..9217efd4f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet.py @@ -0,0 +1,1028 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/lukemelas/EfficientNet-PyTorch +# reference: https://arxiv.org/abs/1905.11946 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +import math +import collections +import re +import copy + +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "EfficientNetB0_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams", + "EfficientNetB0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams", + "EfficientNetB1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams", + "EfficientNetB2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams", + "EfficientNetB3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams", + "EfficientNetB4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams", + "EfficientNetB5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams", + "EfficientNetB6": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams", + "EfficientNetB7": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +GlobalParams = collections.namedtuple('GlobalParams', [ + 'batch_norm_momentum', + 'batch_norm_epsilon', + 'dropout_rate', + 'num_classes', + 'width_coefficient', + 'depth_coefficient', + 'depth_divisor', + 'depth_trunc', + 'min_depth', + 'drop_connect_rate', +]) + +BlockArgs = collections.namedtuple('BlockArgs', [ + 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', + 'expand_ratio', 'id_skip', 'stride', 'se_ratio' +]) + +GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields) +BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields) + + +def efficientnet_params(model_name): + """ Map EfficientNet model name to parameter coefficients. """ + params_dict = { + # Coefficients: width,depth,resolution,dropout + 'efficientnet-b0-small': (1.0, 1.0, 224, 0.2), + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + } + return params_dict[model_name] + + +def efficientnet(width_coefficient=None, + depth_coefficient=None, + dropout_rate=0.2, + drop_connect_rate=0.2): + """ Get block arguments according to parameter and coefficients. """ + blocks_args = [ + 'r1_k3_s11_e1_i32_o16_se0.25', + 'r2_k3_s22_e6_i16_o24_se0.25', + 'r2_k5_s22_e6_i24_o40_se0.25', + 'r3_k3_s22_e6_i40_o80_se0.25', + 'r3_k5_s11_e6_i80_o112_se0.25', + 'r4_k5_s22_e6_i112_o192_se0.25', + 'r1_k3_s11_e6_i192_o320_se0.25', + ] + blocks_args = BlockDecoder.decode(blocks_args) + + global_params = GlobalParams( + batch_norm_momentum=0.99, + batch_norm_epsilon=1e-3, + dropout_rate=dropout_rate, + drop_connect_rate=drop_connect_rate, + num_classes=1000, + width_coefficient=width_coefficient, + depth_coefficient=depth_coefficient, + depth_divisor=8, + depth_trunc='ceil', + min_depth=None) + + return blocks_args, global_params + + +def get_model_params(model_name, override_params): + """ Get the block args and global params for a given model """ + if model_name.startswith('efficientnet'): + w, d, _, p = efficientnet_params(model_name) + blocks_args, global_params = efficientnet( + width_coefficient=w, depth_coefficient=d, dropout_rate=p) + else: + raise NotImplementedError('model name is not pre-defined: %s' % + model_name) + if override_params: + global_params = global_params._replace(**override_params) + return blocks_args, global_params + + +def round_filters(filters, global_params): + """ Calculate and round number of filters based on depth multiplier. """ + multiplier = global_params.width_coefficient + if not multiplier: + return filters + divisor = global_params.depth_divisor + min_depth = global_params.min_depth + filters *= multiplier + min_depth = min_depth or divisor + new_filters = max(min_depth, + int(filters + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters: # prevent rounding by more than 10% + new_filters += divisor + return int(new_filters) + + +def round_repeats(repeats, global_params): + """ Round number of filters based on depth multiplier. """ + multiplier = global_params.depth_coefficient + if not multiplier: + return repeats + if global_params.depth_trunc == 'round': + return max(1, round(multiplier * repeats)) + else: + return int(math.ceil(multiplier * repeats)) + + +class BlockDecoder(object): + """ + Block Decoder, straight from the official TensorFlow repository. + """ + + @staticmethod + def _decode_block_string(block_string): + """ Gets a block through a string notation of arguments. """ + assert isinstance(block_string, str) + + ops = block_string.split('_') + options = {} + for op in ops: + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # Check stride + cond_1 = ('s' in options and len(options['s']) == 1) + cond_2 = ((len(options['s']) == 2) and + (options['s'][0] == options['s'][1])) + assert (cond_1 or cond_2) + + return BlockArgs( + kernel_size=int(options['k']), + num_repeat=int(options['r']), + input_filters=int(options['i']), + output_filters=int(options['o']), + expand_ratio=int(options['e']), + id_skip=('noskip' not in block_string), + se_ratio=float(options['se']) if 'se' in options else None, + stride=[int(options['s'][0])]) + + @staticmethod + def _encode_block_string(block): + """Encodes a block to a string.""" + args = [ + 'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % + (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, + 'i%d' % block.input_filters, 'o%d' % block.output_filters + ] + if 0 < block.se_ratio <= 1: + args.append('se%s' % block.se_ratio) + if block.id_skip is False: + args.append('noskip') + return '_'.join(args) + + @staticmethod + def decode(string_list): + """ + Decode a list of string notations to specify blocks in the network. + + string_list: list of strings, each string is a notation of block + return + list of BlockArgs namedtuples of block args + """ + assert isinstance(string_list, list) + blocks_args = [] + for block_string in string_list: + blocks_args.append(BlockDecoder._decode_block_string(block_string)) + return blocks_args + + @staticmethod + def encode(blocks_args): + """ + Encodes a list of BlockArgs to a list of strings. + + :param blocks_args: a list of BlockArgs namedtuples of block args + :return: a list of strings, each string is a notation of block + """ + block_strings = [] + for block in blocks_args: + block_strings.append(BlockDecoder._encode_block_string(block)) + return block_strings + + +def initial_type(name, use_bias=False): + param_attr = ParamAttr(name=name + "_weights") + if use_bias: + bias_attr = ParamAttr(name=name + "_offset") + else: + bias_attr = False + return param_attr, bias_attr + + +def init_batch_norm_layer(name="batch_norm"): + param_attr = ParamAttr(name=name + "_scale") + bias_attr = ParamAttr(name=name + "_offset") + return param_attr, bias_attr + + +def init_fc_layer(name="fc"): + param_attr = ParamAttr(name=name + "_weights") + bias_attr = ParamAttr(name=name + "_offset") + return param_attr, bias_attr + + +def cal_padding(img_size, stride, filter_size, dilation=1): + """Calculate padding size.""" + if img_size % stride == 0: + out_size = max(filter_size - stride, 0) + else: + out_size = max(filter_size - (img_size % stride), 0) + return out_size // 2, out_size - out_size // 2 + + +inp_shape = { + "b0_small": [224, 112, 112, 56, 28, 14, 14, 7], + "b0": [224, 112, 112, 56, 28, 14, 14, 7], + "b1": [240, 120, 120, 60, 30, 15, 15, 8], + "b2": [260, 130, 130, 65, 33, 17, 17, 9], + "b3": [300, 150, 150, 75, 38, 19, 19, 10], + "b4": [380, 190, 190, 95, 48, 24, 24, 12], + "b5": [456, 228, 228, 114, 57, 29, 29, 15], + "b6": [528, 264, 264, 132, 66, 33, 33, 17], + "b7": [600, 300, 300, 150, 75, 38, 38, 19] +} + + +def _drop_connect(inputs, prob, is_test): + if is_test: + output = inputs + else: + keep_prob = 1.0 - prob + inputs_shape = inputs.shape + random_tensor = keep_prob + paddle.rand( + shape=[inputs_shape[0], 1, 1, 1]) + binary_tensor = paddle.floor(random_tensor) + output = paddle.multiply(inputs, binary_tensor) / keep_prob + return output + + +class Conv2ds(TheseusLayer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + padding=0, + groups=None, + name="conv2d", + act=None, + use_bias=False, + padding_type=None, + model_name=None, + cur_stage=None): + super(Conv2ds, self).__init__() + assert act in [None, "swish", "sigmoid"] + self.act = act + + param_attr, bias_attr = initial_type(name=name, use_bias=use_bias) + + def get_padding(filter_size, stride=1, dilation=1): + padding = ((stride - 1) + dilation * (filter_size - 1)) // 2 + return padding + + self.need_crop = False + if padding_type == "SAME": + inps = 1 if model_name == None and cur_stage == None else inp_shape[ + model_name][cur_stage] + top_padding, bottom_padding = cal_padding(inps, stride, + filter_size) + left_padding, right_padding = cal_padding(inps, stride, + filter_size) + height_padding = bottom_padding + width_padding = right_padding + if top_padding != bottom_padding or left_padding != right_padding: + height_padding = top_padding + stride + width_padding = left_padding + stride + self.need_crop = True + padding = [height_padding, width_padding] + elif padding_type == "VALID": + height_padding = 0 + width_padding = 0 + padding = [height_padding, width_padding] + elif padding_type == "DYNAMIC": + padding = get_padding(filter_size, stride) + else: + padding = padding_type + + groups = 1 if groups is None else groups + self._conv = Conv2D( + input_channels, + output_channels, + filter_size, + groups=groups, + stride=stride, + # act=act, + padding=padding, + weight_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + x = self._conv(inputs) + if self.act == "swish": + x = F.swish(x) + elif self.act == "sigmoid": + x = F.sigmoid(x) + + if self.need_crop: + x = x[:, :, 1:, 1:] + return x + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + input_channels, + filter_size, + output_channels, + stride=1, + num_groups=1, + global_params=None, + padding_type="SAME", + conv_act=None, + bn_act="swish", + use_bn=True, + use_bias=False, + name=None, + conv_name=None, + bn_name=None, + model_name=None, + cur_stage=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2ds( + input_channels=input_channels, + output_channels=output_channels, + filter_size=filter_size, + stride=stride, + groups=num_groups, + act=conv_act, + padding_type=padding_type, + name=conv_name, + use_bias=use_bias, + model_name=model_name, + cur_stage=cur_stage) + self.use_bn = use_bn + if use_bn is True: + bn_name = name + bn_name + param_attr, bias_attr = init_batch_norm_layer(bn_name) + momentum = global_params.batch_norm_momentum + epsilon = global_params.batch_norm_epsilon + + self._bn = BatchNorm( + num_channels=output_channels, + act=bn_act, + momentum=momentum, + epsilon=epsilon, + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance", + param_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + if self.use_bn: + x = self._conv(inputs) + x = self._bn(x) + return x + else: + return self._conv(inputs) + + +class ExpandConvNorm(TheseusLayer): + def __init__(self, + input_channels, + block_args, + global_params, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(ExpandConvNorm, self).__init__() + + self.oup = block_args.input_filters * block_args.expand_ratio + self.expand_ratio = block_args.expand_ratio + + if self.expand_ratio != 1: + self._conv = ConvBNLayer( + input_channels, + 1, + self.oup, + global_params=global_params, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_expand_conv", + bn_name="_bn0", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + if self.expand_ratio != 1: + return self._conv(inputs) + else: + return inputs + + +class DepthwiseConvNorm(TheseusLayer): + def __init__(self, + input_channels, + block_args, + global_params, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(DepthwiseConvNorm, self).__init__() + + self.k = block_args.kernel_size + self.s = block_args.stride + if isinstance(self.s, list) or isinstance(self.s, tuple): + self.s = self.s[0] + oup = block_args.input_filters * block_args.expand_ratio + + self._conv = ConvBNLayer( + input_channels, + self.k, + oup, + self.s, + num_groups=input_channels, + global_params=global_params, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_depthwise_conv", + bn_name="_bn1", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class ProjectConvNorm(TheseusLayer): + def __init__(self, + input_channels, + block_args, + global_params, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(ProjectConvNorm, self).__init__() + + self.final_oup = block_args.output_filters + + self._conv = ConvBNLayer( + input_channels, + 1, + self.final_oup, + global_params=global_params, + bn_act=None, + padding_type=padding_type, + name=name, + conv_name=name + "_project_conv", + bn_name="_bn2", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class SEBlock(TheseusLayer): + def __init__(self, + input_channels, + num_squeezed_channels, + oup, + padding_type, + name=None, + model_name=None, + cur_stage=None): + super(SEBlock, self).__init__() + + self._pool = AdaptiveAvgPool2D(1) + self._conv1 = Conv2ds( + input_channels, + num_squeezed_channels, + 1, + use_bias=True, + padding_type=padding_type, + act="swish", + name=name + "_se_reduce") + + self._conv2 = Conv2ds( + num_squeezed_channels, + oup, + 1, + act="sigmoid", + use_bias=True, + padding_type=padding_type, + name=name + "_se_expand") + + def forward(self, inputs): + x = self._pool(inputs) + x = self._conv1(x) + x = self._conv2(x) + out = paddle.multiply(inputs, x) + return out + + +class MbConvBlock(TheseusLayer): + def __init__(self, + input_channels, + block_args, + global_params, + padding_type, + use_se, + name=None, + drop_connect_rate=None, + model_name=None, + cur_stage=None): + super(MbConvBlock, self).__init__() + + oup = block_args.input_filters * block_args.expand_ratio + self.block_args = block_args + self.has_se = use_se and (block_args.se_ratio is not None) and ( + 0 < block_args.se_ratio <= 1) + self.id_skip = block_args.id_skip + self.expand_ratio = block_args.expand_ratio + self.drop_connect_rate = drop_connect_rate + + if self.expand_ratio != 1: + self._ecn = ExpandConvNorm( + input_channels, + block_args, + global_params, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + self._dcn = DepthwiseConvNorm( + input_channels * block_args.expand_ratio, + block_args, + global_params, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + if self.has_se: + num_squeezed_channels = max( + 1, int(block_args.input_filters * block_args.se_ratio)) + self._se = SEBlock( + input_channels * block_args.expand_ratio, + num_squeezed_channels, + oup, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + self._pcn = ProjectConvNorm( + input_channels * block_args.expand_ratio, + block_args, + global_params, + padding_type=padding_type, + name=name, + model_name=model_name, + cur_stage=cur_stage) + + self.final_oup = self._pcn.final_oup + + def forward(self, inputs): + x = inputs + if self.expand_ratio != 1: + x = self._ecn(x) + x = F.swish(x) + + x = self._dcn(x) + x = F.swish(x) + if self.has_se: + x = self._se(x) + x = self._pcn(x) + + if self.id_skip and \ + self.block_args.stride == 1 and \ + self.block_args.input_filters == self.block_args.output_filters: + if self.drop_connect_rate: + x = _drop_connect(x, self.drop_connect_rate, not self.training) + x = paddle.add(x, inputs) + return x + + +class ConvStemNorm(TheseusLayer): + def __init__(self, + input_channels, + padding_type, + _global_params, + name=None, + model_name=None, + fix_stem=False, + cur_stage=None): + super(ConvStemNorm, self).__init__() + + output_channels = 32 if fix_stem else round_filters(32, _global_params) + self._conv = ConvBNLayer( + input_channels, + filter_size=3, + output_channels=output_channels, + stride=2, + global_params=_global_params, + bn_act=None, + padding_type=padding_type, + name="", + conv_name="_conv_stem", + bn_name="_bn0", + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, inputs): + return self._conv(inputs) + + +class ExtractFeatures(TheseusLayer): + def __init__(self, + input_channels, + _block_args, + _global_params, + padding_type, + use_se, + model_name=None, + fix_stem=False): + super(ExtractFeatures, self).__init__() + + self._global_params = _global_params + + self._conv_stem = ConvStemNorm( + input_channels, + padding_type=padding_type, + _global_params=_global_params, + model_name=model_name, + fix_stem=fix_stem, + cur_stage=0) + + self.block_args_copy = copy.deepcopy(_block_args) + idx = 0 + block_size = 0 + for block_arg in self.block_args_copy: + block_arg = block_arg._replace( + input_filters=round_filters(block_arg.input_filters, + _global_params), + output_filters=round_filters(block_arg.output_filters, + _global_params), + num_repeat=round_repeats(block_arg.num_repeat, _global_params)) + block_size += 1 + for _ in range(block_arg.num_repeat - 1): + block_size += 1 + + self.final_oup = None + self.conv_seq = [] + cur_stage = 1 + for block_idx, block_args in enumerate(_block_args): + if not (fix_stem and block_idx == 0): + block_args = block_args._replace(input_filters=round_filters( + block_args.input_filters, _global_params)) + block_args = block_args._replace( + output_filters=round_filters(block_args.output_filters, + _global_params), + num_repeat=round_repeats(block_args.num_repeat, + _global_params)) + + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + + _mc_block = self.add_sublayer( + "_blocks." + str(idx) + ".", + MbConvBlock( + block_args.input_filters, + block_args=block_args, + global_params=_global_params, + padding_type=padding_type, + use_se=use_se, + name="_blocks." + str(idx) + ".", + drop_connect_rate=drop_connect_rate, + model_name=model_name, + cur_stage=cur_stage)) + self.conv_seq.append(_mc_block) + self.final_oup = _mc_block.final_oup + idx += 1 + if block_args.num_repeat > 1: + block_args = block_args._replace( + input_filters=block_args.output_filters, stride=1) + for _ in range(block_args.num_repeat - 1): + drop_connect_rate = self._global_params.drop_connect_rate + if drop_connect_rate: + drop_connect_rate *= float(idx) / block_size + _mc_block = self.add_sublayer( + "block." + str(idx) + ".", + MbConvBlock( + block_args.input_filters, + block_args, + global_params=_global_params, + padding_type=padding_type, + use_se=use_se, + name="_blocks." + str(idx) + ".", + drop_connect_rate=drop_connect_rate, + model_name=model_name, + cur_stage=cur_stage)) + self.conv_seq.append(_mc_block) + self.final_oup = _mc_block.final_oup + idx += 1 + cur_stage += 1 + + def forward(self, inputs): + x = self._conv_stem(inputs) + x = F.swish(x) + for _mc_block in self.conv_seq: + x = _mc_block(x) + return x + + +class EfficientNet(TheseusLayer): + def __init__(self, + block_args, + global_params, + name="b0", + padding_type="SAME", + use_se=True, + fix_stem=False, + num_features=None, + class_num=1000): + super(EfficientNet, self).__init__() + + self.name = name + self.fix_stem = fix_stem + self._block_args = block_args + self._global_params = global_params + self.padding_type = padding_type + self.use_se = use_se + + self._ef = ExtractFeatures( + 3, + self._block_args, + self._global_params, + self.padding_type, + self.use_se, + model_name=self.name, + fix_stem=self.fix_stem) + + output_channels = num_features or round_filters(1280, + self._global_params) + self._conv = ConvBNLayer( + self._ef.final_oup, + 1, + output_channels, + global_params=self._global_params, + bn_act="swish", + padding_type=self.padding_type, + name="", + conv_name="_conv_head", + bn_name="_bn1", + model_name=self.name, + cur_stage=7) + self._pool = AdaptiveAvgPool2D(1) + + if self._global_params.dropout_rate: + self._drop = Dropout( + p=self._global_params.dropout_rate, mode="upscale_in_train") + + param_attr, bias_attr = init_fc_layer("_fc") + self._fc = Linear( + output_channels, + class_num, + weight_attr=param_attr, + bias_attr=bias_attr) + + def forward(self, inputs): + x = self._ef(inputs) + x = self._conv(x) + x = self._pool(x) + if self._global_params.dropout_rate: + x = self._drop(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def EfficientNetB0_small(padding_type='DYNAMIC', + override_params=None, + use_se=False, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b0-small", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b0', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0_small"]) + return model + + +def EfficientNetB0(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b0", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b0', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0"]) + return model + + +def EfficientNetB1(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b1", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b1', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB1"]) + return model + + +def EfficientNetB2(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b2", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b2', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB2"]) + return model + + +def EfficientNetB3(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b3", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b3', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB3"]) + return model + + +def EfficientNetB4(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b4", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b4', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB4"]) + return model + + +def EfficientNetB5(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b5", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b5', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB5"]) + return model + + +def EfficientNetB6(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b6", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b6', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB6"]) + return model + + +def EfficientNetB7(padding_type='SAME', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("efficientnet-b7", + override_params) + model = EfficientNet( + block_args, + global_params, + name='b7', + padding_type=padding_type, + use_se=use_se, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB7"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet_v2.py new file mode 100644 index 000000000..f620d895d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/efficientnet_v2.py @@ -0,0 +1,994 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/lukemelas/EfficientNet-PyTorch +# reference: https://arxiv.org/abs/1905.11946 + +import math +import re + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn.initializer import Constant, Normal, Uniform +from paddle.regularizer import L2Decay + +from ....utils.config import AttrDict + +from ....utils.save_load import (load_dygraph_pretrain, + load_dygraph_pretrain) + +MODEL_URLS = { + "EfficientNetV2_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetV2_S_pretrained.pdparams", + "EfficientNetV2_M": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetV2_M_pretrained.pdparams", + "EfficientNetV2_L": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetV2_L_pretrained.pdparams", + "EfficientNetV2_XL": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetV2_XL_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +inp_shape = { + "efficientnetv2-s": [384, 192, 192, 96, 48, 24, 24, 12], + "efficientnetv2-m": [384, 192, 192, 96, 48, 24, 24, 12], + "efficientnetv2-l": [384, 192, 192, 96, 48, 24, 24, 12], + "efficientnetv2-xl": [384, 192, 192, 96, 48, 24, 24, 12], +} + + +def cal_padding(img_size, stride, kernel_size): + """Calculate padding size.""" + if img_size % stride == 0: + out_size = max(kernel_size - stride, 0) + else: + out_size = max(kernel_size - (img_size % stride), 0) + return out_size // 2, out_size - out_size // 2 + + +class Conv2ds(nn.Layer): + """Customed Conv2D with tensorflow's padding style + + Args: + input_channels (int): input channels + output_channels (int): output channels + kernel_size (int): filter size + stride (int, optional): stride. Defaults to 1. + padding (int, optional): padding. Defaults to 0. + groups (int, optional): groups. Defaults to None. + act (str, optional): act. Defaults to None. + use_bias (bool, optional): use_bias. Defaults to None. + padding_type (str, optional): padding_type. Defaults to None. + model_name (str, optional): model name. Defaults to None. + cur_stage (int, optional): current stage. Defaults to None. + + Returns: + nn.Layer: Customed Conv2D instance + """ + + def __init__(self, + input_channels: int, + output_channels: int, + kernel_size: int, + stride=1, + padding=0, + groups=None, + act=None, + use_bias=None, + padding_type=None, + model_name=None, + cur_stage=None): + super(Conv2ds, self).__init__() + assert act in [None, "swish", "sigmoid"] + self._act = act + + def get_padding(kernel_size, stride=1, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + inps = inp_shape[model_name][cur_stage] + self.need_crop = False + if padding_type == "SAME": + top_padding, bottom_padding = cal_padding(inps, stride, + kernel_size) + left_padding, right_padding = cal_padding(inps, stride, + kernel_size) + height_padding = bottom_padding + width_padding = right_padding + if top_padding != bottom_padding or left_padding != right_padding: + height_padding = top_padding + stride + width_padding = left_padding + stride + self.need_crop = True + padding = [height_padding, width_padding] + elif padding_type == "VALID": + height_padding = 0 + width_padding = 0 + padding = [height_padding, width_padding] + elif padding_type == "DYNAMIC": + padding = get_padding(kernel_size, stride) + else: + padding = padding_type + + groups = 1 if groups is None else groups + self._conv = nn.Conv2D( + input_channels, + output_channels, + kernel_size, + groups=groups, + stride=stride, + padding=padding, + weight_attr=None, + bias_attr=use_bias + if not use_bias else ParamAttr(regularizer=L2Decay(0.0))) + + def forward(self, inputs): + x = self._conv(inputs) + if self._act == "swish": + x = F.swish(x) + elif self._act == "sigmoid": + x = F.sigmoid(x) + + if self.need_crop: + x = x[:, :, 1:, 1:] + return x + + +class BlockDecoder(object): + """Block Decoder for readability.""" + + def _decode_block_string(self, block_string): + """Gets a block through a string notation of arguments.""" + assert isinstance(block_string, str) + ops = block_string.split('_') + options = AttrDict() + for op in ops: + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + t = AttrDict( + kernel_size=int(options['k']), + num_repeat=int(options['r']), + in_channels=int(options['i']), + out_channels=int(options['o']), + expand_ratio=int(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + strides=int(options['s']), + conv_type=int(options['c']) if 'c' in options else 0, ) + return t + + def _encode_block_string(self, block): + """Encodes a block to a string.""" + args = [ + 'r%d' % block.num_repeat, + 'k%d' % block.kernel_size, + 's%d' % block.strides, + 'e%s' % block.expand_ratio, + 'i%d' % block.in_channels, + 'o%d' % block.out_channels, + 'c%d' % block.conv_type, + 'f%d' % block.fused_conv, + ] + if block.se_ratio > 0 and block.se_ratio <= 1: + args.append('se%s' % block.se_ratio) + return '_'.join(args) + + def decode(self, string_list): + """Decodes a list of string notations to specify blocks inside the network. + + Args: + string_list: a list of strings, each string is a notation of block. + + Returns: + A list of namedtuples to represent blocks arguments. + """ + assert isinstance(string_list, list) + blocks_args = [] + for block_string in string_list: + blocks_args.append(self._decode_block_string(block_string)) + return blocks_args + + def encode(self, blocks_args): + """Encodes a list of Blocks to a list of strings. + + Args: + blocks_args: A list of namedtuples to represent blocks arguments. + Returns: + a list of strings, each string is a notation of block. + """ + block_strings = [] + for block in blocks_args: + block_strings.append(self._encode_block_string(block)) + return block_strings + + +#################### EfficientNet V2 configs #################### +v2_base_block = [ # The baseline config for v2 models. + "r1_k3_s1_e1_i32_o16_c1", + "r2_k3_s2_e4_i16_o32_c1", + "r2_k3_s2_e4_i32_o48_c1", + "r3_k3_s2_e4_i48_o96_se0.25", + "r5_k3_s1_e6_i96_o112_se0.25", + "r8_k3_s2_e6_i112_o192_se0.25", +] + +v2_s_block = [ # about base * (width1.4, depth1.8) + "r2_k3_s1_e1_i24_o24_c1", + "r4_k3_s2_e4_i24_o48_c1", + "r4_k3_s2_e4_i48_o64_c1", + "r6_k3_s2_e4_i64_o128_se0.25", + "r9_k3_s1_e6_i128_o160_se0.25", + "r15_k3_s2_e6_i160_o256_se0.25", +] + +v2_m_block = [ # about base * (width1.6, depth2.2) + "r3_k3_s1_e1_i24_o24_c1", + "r5_k3_s2_e4_i24_o48_c1", + "r5_k3_s2_e4_i48_o80_c1", + "r7_k3_s2_e4_i80_o160_se0.25", + "r14_k3_s1_e6_i160_o176_se0.25", + "r18_k3_s2_e6_i176_o304_se0.25", + "r5_k3_s1_e6_i304_o512_se0.25", +] + +v2_l_block = [ # about base * (width2.0, depth3.1) + "r4_k3_s1_e1_i32_o32_c1", + "r7_k3_s2_e4_i32_o64_c1", + "r7_k3_s2_e4_i64_o96_c1", + "r10_k3_s2_e4_i96_o192_se0.25", + "r19_k3_s1_e6_i192_o224_se0.25", + "r25_k3_s2_e6_i224_o384_se0.25", + "r7_k3_s1_e6_i384_o640_se0.25", +] + +v2_xl_block = [ # only for 21k pretraining. + "r4_k3_s1_e1_i32_o32_c1", + "r8_k3_s2_e4_i32_o64_c1", + "r8_k3_s2_e4_i64_o96_c1", + "r16_k3_s2_e4_i96_o192_se0.25", + "r24_k3_s1_e6_i192_o256_se0.25", + "r32_k3_s2_e6_i256_o512_se0.25", + "r8_k3_s1_e6_i512_o640_se0.25", +] +efficientnetv2_params = { + # params: (block, width, depth, dropout) + "efficientnetv2-s": + (v2_s_block, 1.0, 1.0, np.linspace(0.1, 0.3, 4).tolist()), + "efficientnetv2-m": (v2_m_block, 1.0, 1.0, 0.3), + "efficientnetv2-l": (v2_l_block, 1.0, 1.0, 0.4), + "efficientnetv2-xl": (v2_xl_block, 1.0, 1.0, 0.4), +} + + +def efficientnetv2_config(model_name: str): + """EfficientNetV2 model config.""" + block, width, depth, dropout = efficientnetv2_params[model_name] + + cfg = AttrDict(model=AttrDict( + model_name=model_name, + blocks_args=BlockDecoder().decode(block), + width_coefficient=width, + depth_coefficient=depth, + dropout_rate=dropout, + feature_size=1280, + bn_momentum=0.9, + bn_epsilon=1e-3, + depth_divisor=8, + min_depth=8, + act_fn="silu", + survival_prob=0.8, + local_pooling=False, + conv_dropout=0, + num_classes=1000)) + return cfg + + +def get_model_config(model_name: str): + """Main entry for model name to config.""" + if model_name.startswith("efficientnetv2-"): + return efficientnetv2_config(model_name) + raise ValueError(f"Unknown model_name {model_name}") + + +################################################################################ + + +def round_filters(filters, + width_coefficient, + depth_divisor, + min_depth, + skip=False): + """Round number of filters based on depth multiplier.""" + multiplier = width_coefficient + divisor = depth_divisor + min_depth = min_depth + if skip or not multiplier: + return filters + + filters *= multiplier + min_depth = min_depth or divisor + new_filters = max(min_depth, + int(filters + divisor / 2) // divisor * divisor) + return int(new_filters) + + +def round_repeats(repeats, multiplier, skip=False): + """Round number of filters based on depth multiplier.""" + if skip or not multiplier: + return repeats + return int(math.ceil(multiplier * repeats)) + + +def activation_fn(act_fn: str): + """Customized non-linear activation type.""" + if not act_fn: + return nn.Silu() + elif act_fn in ("silu", "swish"): + return nn.Swish() + elif act_fn == "relu": + return nn.ReLU() + elif act_fn == "relu6": + return nn.ReLU6() + elif act_fn == "elu": + return nn.ELU() + elif act_fn == "leaky_relu": + return nn.LeakyReLU() + elif act_fn == "selu": + return nn.SELU() + elif act_fn == "mish": + return nn.Mish() + else: + raise ValueError("Unsupported act_fn {}".format(act_fn)) + + +def drop_path(x, training=False, survival_prob=1.0): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if not training: + return x + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + keep_prob = paddle.to_tensor(survival_prob, dtype=x.dtype) + random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class SE(nn.Layer): + """Squeeze-and-excitation layer. + + Args: + local_pooling (bool): local_pooling + act_fn (str): act_fn + in_channels (int): in_channels + se_channels (int): se_channels + out_channels (int): out_channels + cur_stage (int): cur_stage + padding_type (str): padding_type + model_name (str): model_name + """ + + def __init__(self, + local_pooling: bool, + act_fn: str, + in_channels: int, + se_channels: int, + out_channels: int, + cur_stage: int, + padding_type: str, + model_name: str): + super(SE, self).__init__() + + self._local_pooling = local_pooling + self._act = activation_fn(act_fn) + + # Squeeze and Excitation layer. + self._se_reduce = Conv2ds( + in_channels, + se_channels, + 1, + stride=1, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._se_expand = Conv2ds( + se_channels, + out_channels, + 1, + stride=1, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + + def forward(self, x): + if self._local_pooling: + se_tensor = F.adaptive_avg_pool2d(x, output_size=1) + else: + se_tensor = paddle.mean(x, axis=[2, 3], keepdim=True) + se_tensor = self._se_expand(self._act(self._se_reduce(se_tensor))) + return F.sigmoid(se_tensor) * x + + +class MBConvBlock(nn.Layer): + """A class of MBConv: Mobile Inverted Residual Bottleneck. + + Args: + se_ratio (int): se_ratio + in_channels (int): in_channels + expand_ratio (int): expand_ratio + kernel_size (int): kernel_size + strides (int): strides + out_channels (int): out_channels + bn_momentum (float): bn_momentum + bn_epsilon (float): bn_epsilon + local_pooling (bool): local_pooling + conv_dropout (float): conv_dropout + cur_stage (int): cur_stage + padding_type (str): padding_type + model_name (str): model_name + """ + + def __init__(self, + se_ratio: int, + in_channels: int, + expand_ratio: int, + kernel_size: int, + strides: int, + out_channels: int, + bn_momentum: float, + bn_epsilon: float, + local_pooling: bool, + conv_dropout: float, + cur_stage: int, + padding_type: str, + model_name: str): + super(MBConvBlock, self).__init__() + + self.se_ratio = se_ratio + self.in_channels = in_channels + self.expand_ratio = expand_ratio + self.kernel_size = kernel_size + self.strides = strides + self.out_channels = out_channels + + self.bn_momentum = bn_momentum + self.bn_epsilon = bn_epsilon + + self._local_pooling = local_pooling + self.act_fn = None + self.conv_dropout = conv_dropout + + self._act = activation_fn(None) + self._has_se = (self.se_ratio is not None and 0 < self.se_ratio <= 1) + """Builds block according to the arguments.""" + expand_channels = self.in_channels * self.expand_ratio + kernel_size = self.kernel_size + + # Expansion phase. Called if not using fused convolutions and expansion + # phase is necessary. + if self.expand_ratio != 1: + self._expand_conv = Conv2ds( + self.in_channels, + expand_channels, + 1, + stride=1, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._norm0 = nn.BatchNorm2D( + expand_channels, + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + + # Depth-wise convolution phase. Called if not using fused convolutions. + self._depthwise_conv = Conv2ds( + expand_channels, + expand_channels, + kernel_size, + padding=kernel_size // 2, + stride=self.strides, + groups=expand_channels, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + + self._norm1 = nn.BatchNorm2D( + expand_channels, + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + + if self._has_se: + num_reduced_filters = max(1, int(self.in_channels * self.se_ratio)) + self._se = SE(self._local_pooling, None, expand_channels, + num_reduced_filters, expand_channels, cur_stage, + padding_type, model_name) + else: + self._se = None + + # Output phase. + self._project_conv = Conv2ds( + expand_channels, + self.out_channels, + 1, + stride=1, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._norm2 = nn.BatchNorm2D( + self.out_channels, + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.drop_out = nn.Dropout(self.conv_dropout) + + def residual(self, inputs, x, survival_prob): + if (self.strides == 1 and self.in_channels == self.out_channels): + # Apply only if skip connection presents. + if survival_prob: + x = drop_path(x, self.training, survival_prob) + x = paddle.add(x, inputs) + + return x + + def forward(self, inputs, survival_prob=None): + """Implementation of call(). + + Args: + inputs: the inputs tensor. + survival_prob: float, between 0 to 1, drop connect rate. + + Returns: + A output tensor. + """ + x = inputs + if self.expand_ratio != 1: + x = self._act(self._norm0(self._expand_conv(x))) + + x = self._act(self._norm1(self._depthwise_conv(x))) + + if self.conv_dropout and self.expand_ratio > 1: + x = self.drop_out(x) + + if self._se: + x = self._se(x) + + x = self._norm2(self._project_conv(x)) + x = self.residual(inputs, x, survival_prob) + + return x + + +class FusedMBConvBlock(MBConvBlock): + """Fusing the proj conv1x1 and depthwise_conv into a conv2d.""" + + def __init__(self, se_ratio, in_channels, expand_ratio, kernel_size, + strides, out_channels, bn_momentum, bn_epsilon, local_pooling, + conv_dropout, cur_stage, padding_type, model_name): + """Builds block according to the arguments.""" + super(MBConvBlock, self).__init__() + self.se_ratio = se_ratio + self.in_channels = in_channels + self.expand_ratio = expand_ratio + self.kernel_size = kernel_size + self.strides = strides + self.out_channels = out_channels + + self.bn_momentum = bn_momentum + self.bn_epsilon = bn_epsilon + + self._local_pooling = local_pooling + self.act_fn = None + self.conv_dropout = conv_dropout + + self._act = activation_fn(None) + self._has_se = (self.se_ratio is not None and 0 < self.se_ratio <= 1) + + expand_channels = self.in_channels * self.expand_ratio + kernel_size = self.kernel_size + if self.expand_ratio != 1: + # Expansion phase: + self._expand_conv = Conv2ds( + self.in_channels, + expand_channels, + kernel_size, + padding=kernel_size // 2, + stride=self.strides, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._norm0 = nn.BatchNorm2D( + expand_channels, + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + + if self._has_se: + num_reduced_filters = max(1, int(self.in_channels * self.se_ratio)) + self._se = SE(self._local_pooling, None, expand_channels, + num_reduced_filters, expand_channels, cur_stage, + padding_type, model_name) + else: + self._se = None + + # Output phase: + self._project_conv = Conv2ds( + expand_channels, + self.out_channels, + 1 if (self.expand_ratio != 1) else kernel_size, + padding=(1 if (self.expand_ratio != 1) else kernel_size) // 2, + stride=1 if (self.expand_ratio != 1) else self.strides, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._norm1 = nn.BatchNorm2D( + self.out_channels, + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self.drop_out = nn.Dropout(conv_dropout) + + def forward(self, inputs, survival_prob=None): + """Implementation of call(). + + Args: + inputs: the inputs tensor. + training: boolean, whether the model is constructed for training. + survival_prob: float, between 0 to 1, drop connect rate. + + Returns: + A output tensor. + """ + x = inputs + if self.expand_ratio != 1: + x = self._act(self._norm0(self._expand_conv(x))) + + if self.conv_dropout and self.expand_ratio > 1: + x = self.drop_out(x) + + if self._se: + x = self._se(x) + + x = self._norm1(self._project_conv(x)) + if self.expand_ratio == 1: + x = self._act(x) # add act if no expansion. + + x = self.residual(inputs, x, survival_prob) + return x + + +class Stem(nn.Layer): + """Stem layer at the begining of the network.""" + + def __init__(self, width_coefficient, depth_divisor, min_depth, skip, + bn_momentum, bn_epsilon, act_fn, stem_channels, cur_stage, + padding_type, model_name): + super(Stem, self).__init__() + self._conv_stem = Conv2ds( + 3, + round_filters(stem_channels, width_coefficient, depth_divisor, + min_depth, skip), + 3, + padding=1, + stride=2, + use_bias=False, + padding_type=padding_type, + model_name=model_name, + cur_stage=cur_stage) + self._norm = nn.BatchNorm2D( + round_filters(stem_channels, width_coefficient, depth_divisor, + min_depth, skip), + bn_momentum, + bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self._act = activation_fn(act_fn) + + def forward(self, inputs): + return self._act(self._norm(self._conv_stem(inputs))) + + +class Head(nn.Layer): + """Head layer for network outputs.""" + + def __init__(self, + in_channels, + feature_size, + bn_momentum, + bn_epsilon, + act_fn, + dropout_rate, + local_pooling, + width_coefficient, + depth_divisor, + min_depth, + skip=False): + super(Head, self).__init__() + self.in_channels = in_channels + self.feature_size = feature_size + self.bn_momentum = bn_momentum + self.bn_epsilon = bn_epsilon + self.dropout_rate = dropout_rate + self._local_pooling = local_pooling + self._conv_head = nn.Conv2D( + in_channels, + round_filters(self.feature_size or 1280, width_coefficient, + depth_divisor, min_depth, skip), + kernel_size=1, + stride=1, + bias_attr=False) + self._norm = nn.BatchNorm2D( + round_filters(self.feature_size or 1280, width_coefficient, + depth_divisor, min_depth, skip), + self.bn_momentum, + self.bn_epsilon, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + self._act = activation_fn(act_fn) + + self._avg_pooling = nn.AdaptiveAvgPool2D(output_size=1) + + if isinstance(self.dropout_rate, + (list, tuple)) or self.dropout_rate > 0: + self._dropout = nn.Dropout(self.dropout_rate[0] if isinstance( + self.dropout_rate, (list, tuple)) else self.dropout_rate) + else: + self._dropout = None + + def forward(self, x): + """Call the layer.""" + outputs = self._act(self._norm(self._conv_head(x))) + + if self._local_pooling: + outputs = F.adaptive_avg_pool2d(outputs, output_size=1) + if self._dropout: + outputs = self._dropout(outputs) + if self._fc: + outputs = paddle.squeeze(outputs, axis=[2, 3]) + outputs = self._fc(outputs) + else: + outputs = self._avg_pooling(outputs) + if self._dropout: + outputs = self._dropout(outputs) + return paddle.flatten(outputs, start_axis=1) + + +class EfficientNetV2(nn.Layer): + """A class implements tf.keras.Model. + + Reference: https://arxiv.org/abs/1807.11626 + """ + + def __init__(self, + model_name, + blocks_args=None, + mconfig=None, + include_top=True, + class_num=1000, + padding_type="SAME"): + """Initializes an `Model` instance. + + Args: + model_name: A string of model name. + model_config: A dict of model configurations or a string of hparams. + Raises: + ValueError: when blocks_args is not specified as a list. + """ + super(EfficientNetV2, self).__init__() + self.blocks_args = blocks_args + self.mconfig = mconfig + """Builds a model.""" + self._blocks = nn.LayerList() + + cur_stage = 0 + # Stem part. + self._stem = Stem( + self.mconfig.width_coefficient, + self.mconfig.depth_divisor, + self.mconfig.min_depth, + False, + self.mconfig.bn_momentum, + self.mconfig.bn_epsilon, + self.mconfig.act_fn, + stem_channels=self.blocks_args[0].in_channels, + cur_stage=cur_stage, + padding_type=padding_type, + model_name=model_name) + cur_stage += 1 + + # Builds blocks. + for block_args in self.blocks_args: + assert block_args.num_repeat > 0 + # Update block input and output filters based on depth multiplier. + in_channels = round_filters( + block_args.in_channels, self.mconfig.width_coefficient, + self.mconfig.depth_divisor, self.mconfig.min_depth, False) + out_channels = round_filters( + block_args.out_channels, self.mconfig.width_coefficient, + self.mconfig.depth_divisor, self.mconfig.min_depth, False) + + repeats = round_repeats(block_args.num_repeat, + self.mconfig.depth_coefficient) + block_args.update( + dict( + in_channels=in_channels, + out_channels=out_channels, + num_repeat=repeats)) + + # The first block needs to take care of stride and filter size increase. + conv_block = { + 0: MBConvBlock, + 1: FusedMBConvBlock + }[block_args.conv_type] + self._blocks.append( + conv_block(block_args.se_ratio, block_args.in_channels, + block_args.expand_ratio, block_args.kernel_size, + block_args.strides, block_args.out_channels, + self.mconfig.bn_momentum, self.mconfig.bn_epsilon, + self.mconfig.local_pooling, self.mconfig. + conv_dropout, cur_stage, padding_type, model_name)) + if block_args.num_repeat > 1: # rest of blocks with the same block_arg + block_args.in_channels = block_args.out_channels + block_args.strides = 1 + for _ in range(block_args.num_repeat - 1): + self._blocks.append( + conv_block( + block_args.se_ratio, block_args.in_channels, + block_args.expand_ratio, block_args.kernel_size, + block_args.strides, block_args.out_channels, + self.mconfig.bn_momentum, self.mconfig.bn_epsilon, + self.mconfig.local_pooling, self.mconfig.conv_dropout, + cur_stage, padding_type, model_name)) + cur_stage += 1 + + # Head part. + self._head = Head( + self.blocks_args[-1].out_channels, self.mconfig.feature_size, + self.mconfig.bn_momentum, self.mconfig.bn_epsilon, + self.mconfig.act_fn, self.mconfig.dropout_rate, + self.mconfig.local_pooling, self.mconfig.width_coefficient, + self.mconfig.depth_divisor, self.mconfig.min_depth, False) + + # top part for classification + if include_top and class_num: + self._fc = nn.Linear( + self.mconfig.feature_size, + class_num, + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + else: + self._fc = None + + # initialize weight + def _init_weights(m): + if isinstance(m, nn.Conv2D): + out_filters, in_channels, kernel_height, kernel_width = m.weight.shape + if in_channels == 1 and out_filters > in_channels: + out_filters = in_channels + fan_out = int(kernel_height * kernel_width * out_filters) + Normal(mean=0.0, std=np.sqrt(2.0 / fan_out))(m.weight) + elif isinstance(m, nn.Linear): + init_range = 1.0 / np.sqrt(m.weight.shape[1]) + Uniform(-init_range, init_range)(m.weight) + Constant(0.0)(m.bias) + + self.apply(_init_weights) + + def forward(self, inputs): + # Calls Stem layers + outputs = self._stem(inputs) + # print(f"stem: {outputs.mean().item():.10f}") + + # Calls blocks. + for idx, block in enumerate(self._blocks): + survival_prob = self.mconfig.survival_prob + if survival_prob: + drop_rate = 1.0 - survival_prob + survival_prob = 1.0 - drop_rate * float(idx) / len( + self._blocks) + outputs = block(outputs, survival_prob=survival_prob) + + # Head to obtain the final feature. + outputs = self._head(outputs) + # Calls final dense layers and returns logits. + if self._fc: + outputs = self._fc(outputs) + + return outputs + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def EfficientNetV2_S(include_top=True, pretrained=False, **kwargs): + """Get a V2 model instance. + + Returns: + nn.Layer: A single model instantce + """ + model_name = "efficientnetv2-s" + model_config = efficientnetv2_config(model_name) + model = EfficientNetV2(model_name, model_config.model.blocks_args, + model_config.model, include_top, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetV2_S"]) + return model + + +def EfficientNetV2_M(include_top=True, pretrained=False, **kwargs): + """Get a V2 model instance. + + Returns: + nn.Layer: A single model instantce + """ + model_name = "efficientnetv2-m" + model_config = efficientnetv2_config(model_name) + model = EfficientNetV2(model_name, model_config.model.blocks_args, + model_config.model, include_top, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetV2_M"]) + return model + + +def EfficientNetV2_L(include_top=True, pretrained=False, **kwargs): + """Get a V2 model instance. + + Returns: + nn.Layer: A single model instantce + """ + model_name = "efficientnetv2-l" + model_config = efficientnetv2_config(model_name) + model = EfficientNetV2(model_name, model_config.model.blocks_args, + model_config.model, include_top, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetV2_L"]) + return model + + +def EfficientNetV2_XL(include_top=True, pretrained=False, **kwargs): + """Get a V2 model instance. + + Returns: + nn.Layer: A single model instantce + """ + model_name = "efficientnetv2-xl" + model_config = efficientnetv2_config(model_name) + model = EfficientNetV2(model_name, model_config.model.blocks_args, + model_config.model, include_top, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetV2_XL"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/fasternet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/fasternet.py new file mode 100644 index 000000000..4389d7c29 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/fasternet.py @@ -0,0 +1,399 @@ +# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2303.03667 + +import os +import math +import copy +import warnings + +import paddle +import paddle.nn as nn + +from .vision_transformer import trunc_normal_, zeros_, ones_ +from ....utils.save_load import load_dygraph_pretrain +from ..model_zoo.vision_transformer import DropPath + +MODEL_URLS = { + "FasterNet_T0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_T0_pretrained.pdparams", + "FasterNet_T1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_T1_pretrained.pdparams", + "FasterNet_T2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_T2_pretrained.pdparams", + "FasterNet_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_S_pretrained.pdparams", + "FasterNet_M": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_M_pretrained.pdparams", + "FasterNet_L": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/FasterNet_L_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() + +NET_CONFIG = { + "FasterNet_T0": + [3, 40, [1, 2, 8, 2], 2, 4, 4, 4, 2, 2, True, 1280, 0.0, 0, 'BN', 'GELU'], + "FasterNet_T1": [ + 3, 64, [1, 2, 8, 2], 2, 4, 4, 4, 2, 2, True, 1280, 0.02, 0, 'BN', + 'GELU' + ], + "FasterNet_T2": [ + 3, 96, [1, 2, 8, 2], 2, 4, 4, 4, 2, 2, True, 1280, 0.05, 0, 'BN', + 'RELU' + ], + "FasterNet_S": [ + 3, 128, [1, 2, 13, 2], 2, 4, 4, 4, 2, 2, True, 1280, 0.1, 0, 'BN', + 'RELU' + ], + "FasterNet_M": [ + 3, 144, [3, 4, 18, 3], 2, 4, 4, 4, 2, 2, True, 1280, 0.2, 0, 'BN', + 'RELU' + ], + "FasterNet_L": [ + 3, 192, [3, 4, 18, 3], 2, 4, 4, 4, 2, 2, True, 1280, 0.3, 0, 'BN', + 'RELU' + ], +} + + +class PartialConv(nn.Layer): + def __init__(self, dim: int, n_div: int, forward: str): + super().__init__() + self.dim_conv3 = dim // n_div + self.dim_untouched = dim - self.dim_conv3 + self.partial_conv3 = nn.Conv2D( + in_channels=self.dim_conv3, + out_channels=self.dim_conv3, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + if forward == 'slicing': + self.forward = self.forward_slicing + elif forward == 'split_cat': + self.forward = self.forward_split_cat + else: + raise NotImplementedError( + f"Forward method '{forward}' is not implemented.") + + def forward_slicing(self, x): + x = x.clone() + x[:, :self.dim_conv3, :, :] = self.partial_conv3( + x[:, :self.dim_conv3, :, :]) + return x + + def forward_split_cat(self, x): + x1, x2 = paddle.split( + x=x, num_or_sections=[self.dim_conv3, self.dim_untouched], axis=1) + x1 = self.partial_conv3(x1) + x = paddle.concat(x=(x1, x2), axis=1) + return x + + +class MLPBlock(nn.Layer): + def __init__(self, dim, n_div, mlp_ratio, drop_path, + layer_scale_init_value, act_layer, norm_layer, pconv_fw_type): + super().__init__() + self.dim = dim + self.mlp_ratio = mlp_ratio + if drop_path > 0.: + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.n_div = n_div + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_layer = [ + nn.Conv2D( + in_channels=dim, + out_channels=mlp_hidden_dim, + kernel_size=1, + bias_attr=False), norm_layer(mlp_hidden_dim), act_layer(), + nn.Conv2D( + in_channels=mlp_hidden_dim, + out_channels=dim, + kernel_size=1, + bias_attr=False) + ] + self.mlp = nn.Sequential(*mlp_layer) + self.spatial_mixing = PartialConv(dim, n_div, pconv_fw_type) + if layer_scale_init_value > 0: + self.layer_scale = ( + paddle.base.framework.EagerParamBase.from_tensor( + tensor=layer_scale_init_value * paddle.ones(shape=dim), + trainable=True)) + self.forward = self.forward_layer_scale + else: + self.forward = self.forward + + def forward(self, x): + shortcut = x + x = self.spatial_mixing(x) + x = shortcut + self.drop_path(self.mlp(x)) + return x + + def forward_layer_scale(self, x): + shortcut = x + x = self.spatial_mixing(x) + x = shortcut + self.drop_path( + self.layer_scale.unsqueeze(axis=-1).unsqueeze(axis=-1) * + self.mlp(x)) + return x + + +class BasicStage(nn.Layer): + def __init__(self, dim, depth, n_div, mlp_ratio, drop_path, + layer_scale_init_value, norm_layer, act_layer, pconv_fw_type): + super().__init__() + blocks_list = [ + MLPBlock( + dim=dim, + n_div=n_div, + mlp_ratio=mlp_ratio, + drop_path=drop_path[i], + layer_scale_init_value=layer_scale_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + pconv_fw_type=pconv_fw_type) for i in range(depth) + ] + self.blocks = nn.Sequential(*blocks_list) + + def forward(self, x): + x = self.blocks(x) + return x + + +class PatchEmbed(nn.Layer): + def __init__(self, patch_size, patch_stride, in_chans, embed_dim, + norm_layer): + super().__init__() + self.proj = nn.Conv2D( + in_channels=in_chans, + out_channels=embed_dim, + kernel_size=patch_size, + stride=patch_stride, + bias_attr=False) + if norm_layer is not None: + self.norm = norm_layer(embed_dim, momentum=0.1) + else: + self.norm = nn.Identity() + + def forward(self, x): + x = self.norm(self.proj(x)) + return x + + +class PatchMerging(nn.Layer): + def __init__(self, patch_size_t, patch_stride_t, dim, norm_layer): + super().__init__() + self.reduction = nn.Conv2D( + in_channels=dim, + out_channels=2 * dim, + kernel_size=patch_size_t, + stride=patch_stride_t, + bias_attr=False) + if norm_layer is not None: + self.norm = norm_layer(2 * dim) + else: + self.norm = nn.Identity() + + def forward(self, x): + x = self.norm(self.reduction(x)) + return x + + +class FasterNet(nn.Layer): + """ + FasterNet + Args: + in_chans: int=3. Number of input channels. Default value is 3. + embed_dim: int=96. The dimension of embedding. Default value is 96. + depths: tuple=(1, 2, 8, 2). The depth of each stage. Default value is (1, 2, 8, 2). + mlp_ratio: float=2.0. The ratio of hidden dimension to embedding dimension. Default value is 2.0. + n_div: int=4. The number of divisions in the spatial dimension. Default value is 4. + patch_size: int=4. The size of patch. Default value is 4. + patch_stride: int=4. The stride of patch. Default value is 4. + patch_size_t: int=2. The size of patch for merging. Default value is 2. + patch_stride_t: int=2. The stride of patch for merging. Default value is 2. + patch_norm: bool=True. Whether to use patch normalization. Default value is True. + feature_dim: int=1280. The dimension of feature. Default value is 1280. + drop_path_rate: float=0.1. The drop path rate. Default value is 0.1. + layer_scale_init_value: float=0.0. The initial value of layer scale. Default value is 0.0. + norm_layer: str='BN'. The type of normalization layer. Default value is 'BN'. + act_layer: str='RELU'. The type of activation layer. Default value is 'RELU'. + class_num: int=1000. The number of classes. Default value is 1000. + fork_feat: bool=False. Whether to return feature maps. Default value is False. + pretrained: str=None. The path of pretrained model. Default value is None. + pconv_fw_type: str='split_cat'. The type of partial convolution forward. Default value is 'split_cat'. + scale: float=1.0. The coefficient that controls the size of network parameters. + Returns: + model: nn.Layer. Specific FasterNet model depends on args. + """ + def __init__(self, + in_chans=3, + embed_dim=96, + depths=(1, 2, 8, 2), + mlp_ratio=2.0, + n_div=4, + patch_size=4, + patch_stride=4, + patch_size_t=2, + patch_stride_t=2, + patch_norm=True, + feature_dim=1280, + drop_path_rate=0.1, + layer_scale_init_value=0, + norm_layer='BN', + act_layer='RELU', + class_num=1000, + fork_feat=False, + pretrained=None, + pconv_fw_type='split_cat', + **kwargs): + super().__init__() + if norm_layer == 'BN': + norm_layer = nn.BatchNorm2D + else: + raise NotImplementedError + if act_layer == 'GELU': + act_layer = nn.GELU + elif act_layer == 'RELU': + act_layer = nn.ReLU + else: + raise NotImplementedError + if not fork_feat: + self.class_num = class_num + self.num_stages = len(depths) + self.embed_dim = embed_dim + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2**(self.num_stages - 1)) + self.mlp_ratio = mlp_ratio + self.depths = depths + self.patch_embed = PatchEmbed( + patch_size=patch_size, + patch_stride=patch_stride, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + + dpr = [ + x.item() + for x in paddle.linspace( + start=0, stop=drop_path_rate, num=sum(depths)) + ] + stages_list = [] + for i_stage in range(self.num_stages): + stage = BasicStage( + dim=int(embed_dim * 2**i_stage), + n_div=n_div, + depth=depths[i_stage], + mlp_ratio=self.mlp_ratio, + drop_path=dpr[sum(depths[:i_stage]):sum(depths[:i_stage + 1])], + layer_scale_init_value=layer_scale_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + pconv_fw_type=pconv_fw_type) + stages_list.append(stage) + if i_stage < self.num_stages - 1: + stages_list.append( + PatchMerging( + patch_size_t=patch_size_t, + patch_stride_t=patch_stride_t, + dim=int(embed_dim * 2**i_stage), + norm_layer=norm_layer)) + self.stages = nn.Sequential(*stages_list) + self.avgpool_pre_head = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=1), + nn.Conv2D( + in_channels=self.num_features, + out_channels=feature_dim, + kernel_size=1, + bias_attr=False), + act_layer()) + self.head = (nn.Linear( + in_features=feature_dim, out_features=class_num) + if class_num > 0 else nn.Identity()) + self.apply(self.cls_init_weights) + + + def cls_init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, (nn.Conv1D, nn.Conv2D)): + trunc_normal_(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, (nn.LayerNorm, nn.GroupNorm)): + zeros_(m.bias) + ones_(m.weight) + + def forward(self, x): + x = self.patch_embed(x) + x = self.stages(x) + x = self.avgpool_pre_head(x) + x = paddle.flatten(x=x, start_axis=1) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def FasterNet_T0(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_T0"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_T0"], use_ssld) + return model + + +def FasterNet_T1(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_T1"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_T1"], use_ssld) + return model + + +def FasterNet_T2(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_T2"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_T2"], use_ssld) + return model + + +def FasterNet_S(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_S"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_S"], use_ssld) + return model + + +def FasterNet_M(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_M"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_M"], use_ssld) + return model + + +def FasterNet_L(pretrained=False, use_ssld=False, **kwargs): + model = FasterNet(*NET_CONFIG["FasterNet_L"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["FasterNet_L"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/foundation_vit.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/foundation_vit.py new file mode 100644 index 000000000..35146960c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/foundation_vit.py @@ -0,0 +1,1261 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# reference: https://arxiv.org/abs/2010.11929 + +from collections.abc import Callable, Iterable + +import numpy as np +import paddle +import paddle.nn as nn +import sys +from paddle.nn.initializer import TruncatedNormal, Constant, Normal, Assign + +from ....utils import logger +from ....utils.save_load import load_dygraph_pretrain +from ..base.theseus_layer import TheseusLayer + +MODEL_URLS = { + "CLIP_vit_base_patch32_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CLIP_vit_base_patch32_224.pdparams", + "CLIP_vit_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CLIP_vit_base_patch16_224.pdparams", + "CLIP_vit_large_patch14_336": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CLIP_vit_large_patch14_336.pdparams", + "CLIP_vit_large_patch14_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CLIP_vit_large_patch14_224.pdparams", + "BEiTv2_vit_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/BEiTv2_vit_base_patch16_224.pdparams", + "BEiTv2_vit_large_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/BEiTv2_vit_large_patch16_224.pdparams", + "CAE_vit_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/CAE_vit_base_patch16_224.pdparams", + 'EVA_vit_giant_patch14': + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/EVA_vit_giant_patch14.pdparams", + "MOCOV3_vit_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/MOCOV3_vit_small.pdparams", + "MOCOV3_vit_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/MOCOV3_vit_base.pdparams", + "MAE_vit_huge_patch14": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/MAE_vit_huge_patch14.pdparams", + "MAE_vit_large_patch16": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/MAE_vit_large_patch16.pdparams", + "MAE_vit_base_patch16": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/foundation_models/MAE_vit_base_patch16.pdparams", +} + + +def check_support_fused_op(use_fused_linear): + if use_fused_linear: + if paddle.device.cuda.get_device_capability()[0] >= 8: + return True + else: + logger.warning("The current device don't support Fused OP! Using the general Linear instead.") + return False + + +def resize_pos_embed(pos_embed, + src_shape, + dst_shape, + mode='bicubic', + num_extra_tokens=1): + """Resize pos_embed weights. + + Args: + pos_embed (torch.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (tuple): The resolution of downsampled new training + image, in format (H, W). + mode (str): Algorithm used for upsampling. Choose one from 'nearest', + 'linear', 'bilinear', 'bicubic' and 'trilinear'. + Defaults to 'bicubic'. + num_extra_tokens (int): The number of extra tokens, such as cls_token. + Defaults to 1. + + Returns: + torch.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]: + return pos_embed + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_h, src_w = src_shape + assert L == src_h * src_w + num_extra_tokens, \ + f"The length of `pos_embed` ({L}) doesn't match the expected " \ + f'shape ({src_h}*{src_w}+{num_extra_tokens}). Please check the' \ + '`img_size` argument.' + extra_tokens = pos_embed[:, :num_extra_tokens] + + src_weight = pos_embed[:, num_extra_tokens:] + src_weight = src_weight.reshape([-1, src_h, src_w, C]).transpose( + [0, 3, 1, 2]) + + # The cubic interpolate algorithm only accepts float32 + dst_weight = paddle.nn.functional.interpolate( + paddle.cast(src_weight, paddle.float32), + size=dst_shape, + align_corners=False, + mode=mode) + dst_weight = paddle.flatten(dst_weight, 2).transpose([0, 2, 1]) + dst_weight = paddle.cast(dst_weight, src_weight.dtype) + + return paddle.concat((extra_tokens, dst_weight), axis=1) + + +def pading_for_not_divisible(pixel_values, + height, + width, + patch_size, + format="BCHW", + function="split"): + if isinstance(patch_size, int): + patch_size = (patch_size, patch_size) + if height % patch_size[0] == 0 and width % patch_size[1] == 0: + return pixel_values, None + if function == "split": + pading_width = patch_size[1] - width % patch_size[1] + pading_height = patch_size[0] - height % patch_size[0] + elif function == "merge": + pading_width = width % 2 + pading_height = height % 2 + if format == "BCHW": + pad_index = (0, 0, 0, 0, 0, pading_height, 0, pading_width) + elif format == "BHWC": + pad_index = (0, 0, 0, pading_height, 0, pading_width, 0, 0) + else: + assert ("vaild format") + + return paddle.nn.functional.pad(pixel_values, pad_index), pad_index + + +__all__ = list(MODEL_URLS.keys()) + +_model_size = None +_model_diff = None + +_CLIP_diff = { + 'add_layer_norm_before_encoder': [ + 'vit_base_patch32_224', 'vit_base_patch16_224', + 'vit_large_patch14_336', 'vit_large_patch14_224' + ], + 'add_relative_position_bias_in_msa': [], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': [], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': [], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': ['vit_base_patch16_224'], + }, + 'remove_cls_token_in_forward': ['vit_base_patch16_224'], +} + + +_MOCOV3_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': [], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': [], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': [], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +_CoCa_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': [], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': [], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': [], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +_BEiTv2_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': + ['vit_base_patch16_224', 'vit_large_patch16_224'], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': + ['vit_base_patch16_224', 'vit_large_patch16_224'], + 'remove_cls_token': [], + 'remove_abs_pos_emb': ['vit_base_patch16_224', 'vit_large_patch16_224'], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': [], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +_CAE_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': ['vit_base_patch16_224'], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': ['vit_base_patch16_224'], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': [], # 3 x 197 x 786 + 'return_all_tokens': [], # 3 x 197 x 1000 + 'return_patch_tokens': [], # 3 x 196 x 1000 + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +_EVA_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': [], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': [], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': ['vit_huge_patch14'], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +_MAE_diff = { + 'add_layer_norm_before_encoder': [], + 'add_relative_position_bias_in_msa': [], + 'add_shared_rel_pos_bias': [], + 'add_mul_gamma_to_msa_mlp': [], + 'remove_cls_token': [], + 'remove_abs_pos_emb': [], + 'replace_mlp_GELU': [], + 'head': { + 'fc_norm': ['vit_huge_patch14'], + 'return_all_tokens': [], + 'return_patch_tokens': [], + 'return_tokens_mean': [], + }, + 'remove_cls_token_in_forward': [], +} + +trunc_normal_ = TruncatedNormal(std=.02) +normal_ = Normal +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def to_2tuple(x): + return tuple([x] * 2) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(TheseusLayer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class QuickGELU(TheseusLayer): + def forward(self, x): + return x * nn.functional.sigmoid(1.702 * x) + + +class Mlp(TheseusLayer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0., + Linear=nn.Linear): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = Linear(in_features, hidden_features) + self.act = act_layer() if _model_size not in _model_diff[ + 'replace_mlp_GELU'] else QuickGELU() + self.fc2 = Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(TheseusLayer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + model_name=None, + window_size=None, + use_fused_attn=False, + Linear=nn.Linear): + super().__init__() + self._model_name = model_name + + if _model_size in _model_diff['add_relative_position_bias_in_msa']: + assert isinstance( + window_size, Iterable + ), f'window_size must be iterable, should not be {type(window_size)}' + self.window_size = window_size + self._register_relative_position_index( + window_size=window_size, + num_heads=num_heads, ) + + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.use_fused_attn = use_fused_attn + # TODO: support mask + if use_fused_attn: + if hasattr(self, 'relative_position_bias_table') or (_model_size in _model_diff['add_shared_rel_pos_bias'] and rel_pos_bias is not None): + logger.warning("The fused attn don't support `relative_position` yet, so fused attn will not be used.") + self.use_fused_attn = False + + def _register_relative_position_index( + self, + window_size, + num_heads, ): + self.num_relative_distance = (2 * window_size[0] - 1) * ( + 2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = self.create_parameter( + [self.num_relative_distance, num_heads], + default_initializer=zeros_) # 2*Wh-1 * 2*Ww-1, nH + coords_h = paddle.arange(window_size[0]) + coords_w = paddle.arange(window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, + None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + paddle.zeros((window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum( + -1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", + relative_position_index) + + def forward(self, x, rel_pos_bias=None): + # B= x.shape[0] + N, C = x.shape[1], x.shape[2] + qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C // self.num_heads)) + + if not self.use_fused_attn: + qkv = qkv.transpose((2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale + if hasattr(self, 'relative_position_bias_table'): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.reshape([-1])].reshape([ + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1]) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if _model_size in _model_diff[ + 'add_shared_rel_pos_bias'] and rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn).matmul(v) + attn = attn.transpose((0, 2, 1, 3)) + else: + qkv = qkv.transpose((2, 0, 1, 3, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + # TODO: support mask + attn = paddle.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) + + x = attn.reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(TheseusLayer): + def __init__(self, + dim, + num_heads, + model_name, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + init_values=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-5, + window_size=None, + use_fused_attn=False, + use_fused_linear=False): + super().__init__() + global _model_size + global _model_diff + self._model_name = model_name + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm1 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + Linear = paddle.incubate.nn.FusedLinear if use_fused_linear else nn.Linear + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + model_name=self._model_name, + window_size=window_size, + use_fused_attn=use_fused_attn, + Linear=Linear) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + + if _model_size in _model_diff['add_mul_gamma_to_msa_mlp']: + self.gamma_1 = self.create_parameter( + [dim], + default_initializer=nn.initializer.Constant(value=init_values)) + self.gamma_2 = self.create_parameter( + [dim], + default_initializer=nn.initializer.Constant(value=init_values)) + else: + self.gamma_1 = None + self.gamma_2 = None + + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm2 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + Linear=Linear) + + def forward(self, x, rel_pos_bias=None): + if self.gamma_1 is not None: + x = x + self.drop_path(self.gamma_1 * self.attn( + self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + else: + atten_result = self.drop_path( + self.attn( + self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + atten_result + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(TheseusLayer): + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * ( + 2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = self.create_parameter( + [self.num_relative_distance, num_heads], + default_initializer=zeros_) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(window_size[0]) + coords_w = paddle.arange(window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, + None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + paddle.zeros((window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum( + -1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", + relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.reshape([-1])].reshape([ + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1]) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.transpose([2, 0, 1]) # nH, Wh*Ww, Wh*Ww + + +class PatchEmbed(TheseusLayer): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + conv_bias=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * \ + (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if conv_bias: + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + else: + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=patch_size, + bias_attr=False) + + def forward(self, x): + B, C, H, W = x.shape + x, _ = pading_for_not_divisible(x, H, W, patch_size=self.patch_size) + + x = self.proj(x) + _, _, H, W = x.shape + + x = x.flatten(2).transpose((0, 2, 1)) + return x, (H, W) + + +class Head(TheseusLayer): + def __init__(self, embed_dim, class_num, norm_layer, model_size, setting): + super().__init__() + self.model_size = model_size + self.setting = setting + + self.fc_norm = eval(norm_layer)( + embed_dim, + epsilon=1e-5) if model_size in setting['fc_norm'] else None + self.return_all_tokens = model_size in setting['return_all_tokens'] + self.return_patch_tokens = model_size in setting['return_patch_tokens'] + self.return_tokens_mean = model_size in setting['return_tokens_mean'] + + self.fc_head = nn.Linear(embed_dim, + class_num) if class_num > 0 else Identity() + + def forward(self, x): + if self.fc_norm is not None: + if self.return_all_tokens: + x = self.fc_norm(x) + else: + t = x[:, 1:] + if self.return_patch_tokens: + x = self.fc_norm(t) + else: + x = self.fc_norm(t.mean(1)) + elif isinstance(self.fc_head, Identity): + if self.return_all_tokens: + x = x + elif self.return_patch_tokens: + x = x[:, 1:] + elif self.return_tokens_mean: + x = x.mean(1) + else: + x = x[:, 0] + else: + x = x + return self.fc_head(x) + + +class VisionTransformer(TheseusLayer): + """ Vision Transformer with support for patch input + """ + + def __init__(self, + model_name, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=768, + output_dim=512, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + qk_scale=None, + image_project=False, + conv_bias=False, + feature_frame=False, + hugging_face_framework=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer='nn.LayerNorm', + epsilon=1e-5, + head_init_scale=0.001, + **kwargs): + super().__init__() + global _model_diff + global _model_size + _model_split = model_name.split('_') + self.model_name = _model_split[0] + self.feature_frame = feature_frame + self.model_size = '_'.join(_model_split[1:]) + _model_size = self.model_size + _model_diff = eval(f'_{self.model_name}_diff') + + self.class_num = class_num + self.return_embed = kwargs.get('return_embed', False) + self.return_mean_embed = kwargs.get('return_mean_embed', False) and self.return_embed + self.num_features = self.embed_dim = embed_dim + use_fused_attn = check_support_fused_op(kwargs.get('use_fused_attn', False)) + use_fused_linear = check_support_fused_op(kwargs.get('use_fused_linear', False)) + _img_size = to_2tuple(img_size) + _patch_size = to_2tuple(patch_size) + self.window_size = (_img_size[0] // _patch_size[0], + _img_size[1] // _patch_size[1]) + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + conv_bias=conv_bias) + num_patches = self.patch_embed.num_patches + + if _model_size in _model_diff['add_shared_rel_pos_bias']: + self.rel_pos_bias = RelativePositionBias( + window_size=self.window_size, num_heads=num_heads) + + #self.ln_pre = nn.LayerNorm(embed_dim) if _model_size in _model_diff[ + # 'add_layer_norm_before_encoder'] else nn.Identity() + + if _model_size in _model_diff['remove_cls_token'] or self.feature_frame: + self.pos_embed = self.create_parameter( + shape=(1, num_patches, embed_dim), default_initializer=zeros_) + self.cls_token = None + else: + self.pos_embed = self.create_parameter( + shape=(1, num_patches + 1, embed_dim), + default_initializer=zeros_) + self.cls_token = self.create_parameter( + shape=(1, 1, embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.pos_drop = nn.Dropout(p=drop_rate) + # for LaClip + if image_project: + image_projection = self.create_parameter( + shape=(img_size, embed_dim), + default_initializer=Assign( + paddle.empty((img_size, embed_dim)))) + self.add_parameter("image_projection", image_projection) + else: + self.image_projection = None + self.hugging_face_framework = hugging_face_framework + #for path size hugging face plan + if hugging_face_framework: + self.ln_pre = nn.LayerNorm(embed_dim) + self.add_parameter("pos_embed", self.pos_embed) + else: + self.ln_pre = nn.Identity() if _model_size not in _model_diff[ + 'add_layer_norm_before_encoder'] else nn.LayerNorm(embed_dim) + if _model_size in _model_diff['remove_abs_pos_emb']: + self.pos_embed = None + else: + self.add_parameter("pos_embed", self.pos_embed) + + #proj + proj = self.create_parameter( + shape=(embed_dim, ), + default_initializer=Assign((embed_dim**-0.5) * paddle.randn(( + (embed_dim, output_dim))))) + self.add_parameter("proj", proj) + + dpr = np.linspace(0, drop_path_rate, depth) + + self.blocks = nn.LayerList([ + Block( + dim=embed_dim, + num_heads=num_heads, + model_name=self.model_name, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + epsilon=epsilon, + window_size=self.window_size, + use_fused_attn=use_fused_attn, + use_fused_linear=use_fused_linear) for i in range(depth) + ]) + + self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon) + + self.head = Identity() if self.return_embed else Head( + embed_dim, class_num, norm_layer, self.model_size, + _model_diff['head']) + + if self.pos_embed is not None: + trunc_normal_(self.pos_embed) + if not _model_size in _model_diff['remove_cls_token'] and self.feature_frame == False: + trunc_normal_(self.cls_token) + + self.apply(self._init_weights) + + if feature_frame: + self.feature = nn.Sequential( + nn.Linear(embed_dim * self.patch_embed.num_patches, embed_dim,bias_attr=False), + nn.BatchNorm1D(embed_dim, epsilon=2e-5), + nn.Linear(embed_dim, output_dim, bias_attr=False), + nn.BatchNorm1D(output_dim, epsilon=2e-5)) + self.pos_drop = Identity() + self.cls_token = None + self.image_projection = Identity() + self.proj = None + self.ln_pre = Identity() + + if head_init_scale != 1: + if not self.return_embed and class_num > 0: + self.head.fc_head.weight.set_value( + self.head.fc_head.weight * + paddle.to_tensor(head_init_scale)) + self.head.fc_head.bias.set_value( + self.head.fc_head.bias * paddle.to_tensor(head_init_scale)) + else: + logger.warning( + "Because the head or head.fc_head of ViT is Identity() class, the argument head_init_scale is invalid." + ) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def get_num_layers(self): + return len(self.blocks) + + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward_features(self, x): + B = x.shape[0] + x, output_dimensions = self.patch_embed(x) + if not _model_size in _model_diff['remove_cls_token'] and (self.feature_frame==False): + cls_tokens = self.cls_token.expand((B, -1, -1)) + x = paddle.concat((cls_tokens, x), axis=1) + + if self.pos_embed is not None: + x = x + resize_pos_embed(self.pos_embed, self.window_size, + output_dimensions) + + x = self.ln_pre(x) + x = self.pos_drop(x) + rel_pos_bias = self.rel_pos_bias() if hasattr(self, + 'rel_pos_bias') else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + if _model_size in _model_diff['remove_cls_token_in_forward']: + x = x[:, 1:, :] + if self.hugging_face_framework or self.return_embed == False: + pooled, token = x[:, 0], x[:, 1:] + else: + pooled = x + x = self.norm(pooled) + return x + + def forward(self, x): + x = self.forward_features(x) + + if self.feature_frame: + B, L, C = x.shape + x = paddle.reshape(x,[B, -1]) + x = self.feature(x) + + x = self.head(x) + + if self.proj is not None and isinstance(self.head,Identity): + x = x @self.proj + if self.return_mean_embed: + x = x.mean(1) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def CLIP_vit_base_patch32_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-5, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def CLIP_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-5, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def React_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "React_vit_base_patch16_224" + model = VisionTransformer( + model_name=model_name.replace("React","CLIP"), + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + hugging_face_framework=True, + epsilon=1e-5, + **kwargs, ) + return model + + +def React_vit_base_patch32_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "React_vit_base_patch32_224" + model = VisionTransformer( + model_name=model_name.replace("React","CLIP"), + img_size=224, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + hugging_face_framework=True, + epsilon=1e-5, + **kwargs, ) + return model + + +def LaCLIP_vit_base_patch32_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "LaCLIP_vit_base_patch32_224" + model = VisionTransformer( + model_name=model_name.replace("LaCLIP","CLIP"), + img_size=224, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + hugging_face_framework=True, + epsilon=1e-5, + **kwargs, ) + + return model + + +def LaCLIP_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "LaCLIP_vit_base_patch16_224" + model = VisionTransformer( + model_name=model_name.replace("LaCLIP","CLIP"), + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + hugging_face_framework=True, + epsilon=1e-5, + **kwargs, ) + + return model + + +def Unicom_vit_base_patch32_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "Unicom_vit_base_patch32_224" + model = VisionTransformer( + model_name=model_name.replace("Unicom","CLIP"), + img_size=224, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + conv_bias=True, + feature_frame=True, + hugging_face_framework=False, + image_project=False, + epsilon=1e-5, + **kwargs, ) + + return model + + +def Unicom_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = "Unicom_vit_base_patch16_224" + model = VisionTransformer( + model_name=model_name.replace("Unicom","CLIP"), + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + hugging_face_framework=False, + image_project=False, + feature_frame=True, + conv_bias=True, + epsilon=1e-5, + **kwargs, ) + + return model + + +def CLIP_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-5, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def CLIP_vit_large_patch14_336(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=336, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-5, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def CLIP_vit_large_patch14_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-5, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def BEiTv2_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def BEiTv2_vit_large_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def MOCOV3_vit_small(pretrained=False, use_ssld=False, **kwargs): + """ + vit small in mocov3 + """ + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=16, + embed_dim=384, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def MOCOV3_vit_base(pretrained=False, use_ssld=False, **kwargs): + """ + vit base in mocov3 + """ + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def MAE_vit_base_patch16(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def MAE_vit_large_patch16(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def MAE_vit_huge_patch14(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=14, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def EVA_vit_giant_patch14(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + patch_size=14, + embed_dim=1408, + depth=40, + num_heads=16, + init_values=None, + mlp_ratio=4.3637, + qkv_bias=True, + class_num=0, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model + + +def CAE_vit_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model_name = sys._getframe().f_code.co_name + model = VisionTransformer( + model_name=model_name, + img_size=224, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs, ) + _load_pretrained( + pretrained, model, MODEL_URLS[model_name], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/ghostnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/ghostnet.py new file mode 100644 index 000000000..8a3960827 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/ghostnet.py @@ -0,0 +1,364 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/huawei-noah/CV-Backbones/tree/master/ghostnet_pytorch +# reference: https://arxiv.org/abs/1911.11907 + +import math +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, AdaptiveAvgPool2D, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import Uniform, KaimingNormal + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "GhostNet_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams", + "GhostNet_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams", + "GhostNet_x1_3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + act="relu", + name=None): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr( + initializer=KaimingNormal(), name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + + self._batch_norm = BatchNorm( + num_channels=out_channels, + act=act, + param_attr=ParamAttr( + name=bn_name + "_scale", regularizer=L2Decay(0.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", regularizer=L2Decay(0.0)), + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class SEBlock(nn.Layer): + def __init__(self, num_channels, reduction_ratio=4, name=None): + super(SEBlock, self).__init__() + self.pool2d_gap = AdaptiveAvgPool2D(1) + self._num_channels = num_channels + stdv = 1.0 / math.sqrt(num_channels * 1.0) + med_ch = num_channels // reduction_ratio + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_1_weights"), + bias_attr=ParamAttr(name=name + "_1_offset")) + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_channels, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_2_weights"), + bias_attr=ParamAttr(name=name + "_2_offset")) + + def forward(self, inputs): + pool = self.pool2d_gap(inputs) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = paddle.clip(x=excitation, min=0, max=1) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = paddle.multiply(inputs, excitation) + return out + + +class GhostModule(nn.Layer): + def __init__(self, + in_channels, + output_channels, + kernel_size=1, + ratio=2, + dw_size=3, + stride=1, + relu=True, + name=None): + super(GhostModule, self).__init__() + init_channels = int(math.ceil(output_channels / ratio)) + new_channels = int(init_channels * (ratio - 1)) + self.primary_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=init_channels, + kernel_size=kernel_size, + stride=stride, + groups=1, + act="relu" if relu else None, + name=name + "_primary_conv") + self.cheap_operation = ConvBNLayer( + in_channels=init_channels, + out_channels=new_channels, + kernel_size=dw_size, + stride=1, + groups=init_channels, + act="relu" if relu else None, + name=name + "_cheap_operation") + + def forward(self, inputs): + x = self.primary_conv(inputs) + y = self.cheap_operation(x) + out = paddle.concat([x, y], axis=1) + return out + + +class GhostBottleneck(nn.Layer): + def __init__(self, + in_channels, + hidden_dim, + output_channels, + kernel_size, + stride, + use_se, + name=None): + super(GhostBottleneck, self).__init__() + self._stride = stride + self._use_se = use_se + self._num_channels = in_channels + self._output_channels = output_channels + self.ghost_module_1 = GhostModule( + in_channels=in_channels, + output_channels=hidden_dim, + kernel_size=1, + stride=1, + relu=True, + name=name + "_ghost_module_1") + if stride == 2: + self.depthwise_conv = ConvBNLayer( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=kernel_size, + stride=stride, + groups=hidden_dim, + act=None, + name=name + + "_depthwise_depthwise" # looks strange due to an old typo, will be fixed later. + ) + if use_se: + self.se_block = SEBlock(num_channels=hidden_dim, name=name + "_se") + self.ghost_module_2 = GhostModule( + in_channels=hidden_dim, + output_channels=output_channels, + kernel_size=1, + relu=False, + name=name + "_ghost_module_2") + if stride != 1 or in_channels != output_channels: + self.shortcut_depthwise = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + groups=in_channels, + act=None, + name=name + + "_shortcut_depthwise_depthwise" # looks strange due to an old typo, will be fixed later. + ) + self.shortcut_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=output_channels, + kernel_size=1, + stride=1, + groups=1, + act=None, + name=name + "_shortcut_conv") + + def forward(self, inputs): + x = self.ghost_module_1(inputs) + if self._stride == 2: + x = self.depthwise_conv(x) + if self._use_se: + x = self.se_block(x) + x = self.ghost_module_2(x) + if self._stride == 1 and self._num_channels == self._output_channels: + shortcut = inputs + else: + shortcut = self.shortcut_depthwise(inputs) + shortcut = self.shortcut_conv(shortcut) + return paddle.add(x=x, y=shortcut) + + +class GhostNet(nn.Layer): + def __init__(self, scale, class_num=1000): + super(GhostNet, self).__init__() + self.cfgs = [ + # k, t, c, SE, s + [3, 16, 16, 0, 1], + [3, 48, 24, 0, 2], + [3, 72, 24, 0, 1], + [5, 72, 40, 1, 2], + [5, 120, 40, 1, 1], + [3, 240, 80, 0, 2], + [3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 1, 1], + [3, 672, 112, 1, 1], + [5, 672, 160, 1, 2], + [5, 960, 160, 0, 1], + [5, 960, 160, 1, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 1, 1] + ] + self.scale = scale + output_channels = int(self._make_divisible(16 * self.scale, 4)) + self.conv1 = ConvBNLayer( + in_channels=3, + out_channels=output_channels, + kernel_size=3, + stride=2, + groups=1, + act="relu", + name="conv1") + # build inverted residual blocks + idx = 0 + self.ghost_bottleneck_list = [] + for k, exp_size, c, use_se, s in self.cfgs: + in_channels = output_channels + output_channels = int(self._make_divisible(c * self.scale, 4)) + hidden_dim = int(self._make_divisible(exp_size * self.scale, 4)) + ghost_bottleneck = self.add_sublayer( + name="_ghostbottleneck_" + str(idx), + sublayer=GhostBottleneck( + in_channels=in_channels, + hidden_dim=hidden_dim, + output_channels=output_channels, + kernel_size=k, + stride=s, + use_se=use_se, + name="_ghostbottleneck_" + str(idx))) + self.ghost_bottleneck_list.append(ghost_bottleneck) + idx += 1 + # build last several layers + in_channels = output_channels + output_channels = int(self._make_divisible(exp_size * self.scale, 4)) + self.conv_last = ConvBNLayer( + in_channels=in_channels, + out_channels=output_channels, + kernel_size=1, + stride=1, + groups=1, + act="relu", + name="conv_last") + self.pool2d_gap = AdaptiveAvgPool2D(1) + in_channels = output_channels + self._fc0_output_channels = 1280 + self.fc_0 = ConvBNLayer( + in_channels=in_channels, + out_channels=self._fc0_output_channels, + kernel_size=1, + stride=1, + act="relu", + name="fc_0") + self.dropout = nn.Dropout(p=0.2) + stdv = 1.0 / math.sqrt(self._fc0_output_channels * 1.0) + self.fc_1 = Linear( + self._fc0_output_channels, + class_num, + weight_attr=ParamAttr( + name="fc_1_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_1_offset")) + + def forward(self, inputs): + x = self.conv1(inputs) + for ghost_bottleneck in self.ghost_bottleneck_list: + x = ghost_bottleneck(x) + x = self.conv_last(x) + x = self.pool2d_gap(x) + x = self.fc_0(x) + x = self.dropout(x) + x = paddle.reshape(x, shape=[-1, self._fc0_output_channels]) + x = self.fc_1(x) + return x + + def _make_divisible(self, v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def GhostNet_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x0_5"], use_ssld=use_ssld) + return model + + +def GhostNet_x1_0(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x1_0"], use_ssld=use_ssld) + return model + + +def GhostNet_x1_3(pretrained=False, use_ssld=False, **kwargs): + model = GhostNet(scale=1.3, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GhostNet_x1_3"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/googlenet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/googlenet.py new file mode 100644 index 000000000..fcd52c923 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/googlenet.py @@ -0,0 +1,365 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1409.4842 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "GoogLeNet": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def xavier(channels, filter_size, name): + stdv = (3.0 / (filter_size**2 * channels))**0.5 + param_attr = ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_weights") + return param_attr + + +class ConvLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format="NCHW"): + super(ConvLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + + def forward(self, inputs): + y = self._conv(inputs) + return y + + +class Inception(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter1, + filter3R, + filter3, + filter5R, + filter5, + proj, + name=None, + data_format="NCHW"): + super(Inception, self).__init__() + self.data_format = data_format + + self._conv1 = ConvLayer( + input_channels, + filter1, + 1, + name="inception_" + name + "_1x1", + data_format=data_format) + self._conv3r = ConvLayer( + input_channels, + filter3R, + 1, + name="inception_" + name + "_3x3_reduce", + data_format=data_format) + self._conv3 = ConvLayer( + filter3R, + filter3, + 3, + name="inception_" + name + "_3x3", + data_format=data_format) + self._conv5r = ConvLayer( + input_channels, + filter5R, + 1, + name="inception_" + name + "_5x5_reduce", + data_format=data_format) + self._conv5 = ConvLayer( + filter5R, + filter5, + 5, + name="inception_" + name + "_5x5", + data_format=data_format) + self._pool = MaxPool2D( + kernel_size=3, stride=1, padding=1, data_format=data_format) + + self._convprj = ConvLayer( + input_channels, + proj, + 1, + name="inception_" + name + "_3x3_proj", + data_format=data_format) + + def forward(self, inputs): + conv1 = self._conv1(inputs) + + conv3r = self._conv3r(inputs) + conv3 = self._conv3(conv3r) + + conv5r = self._conv5r(inputs) + conv5 = self._conv5(conv5r) + + pool = self._pool(inputs) + convprj = self._convprj(pool) + + if self.data_format == "NHWC": + cat = paddle.concat([conv1, conv3, conv5, convprj], axis=3) + else: + cat = paddle.concat([conv1, conv3, conv5, convprj], axis=1) + cat = F.relu(cat) + return cat + + +class GoogLeNetDY(nn.Layer): + def __init__(self, class_num=1000, data_format="NCHW"): + super(GoogLeNetDY, self).__init__() + self.data_format = data_format + self._conv = ConvLayer( + 3, 64, 7, 2, name="conv1", data_format=data_format) + self._pool = MaxPool2D( + kernel_size=3, stride=2, data_format=data_format) + self._conv_1 = ConvLayer( + 64, 64, 1, name="conv2_1x1", data_format=data_format) + self._conv_2 = ConvLayer( + 64, 192, 3, name="conv2_3x3", data_format=data_format) + + self._ince3a = Inception( + 192, + 192, + 64, + 96, + 128, + 16, + 32, + 32, + name="ince3a", + data_format=data_format) + self._ince3b = Inception( + 256, + 256, + 128, + 128, + 192, + 32, + 96, + 64, + name="ince3b", + data_format=data_format) + + self._ince4a = Inception( + 480, + 480, + 192, + 96, + 208, + 16, + 48, + 64, + name="ince4a", + data_format=data_format) + self._ince4b = Inception( + 512, + 512, + 160, + 112, + 224, + 24, + 64, + 64, + name="ince4b", + data_format=data_format) + self._ince4c = Inception( + 512, + 512, + 128, + 128, + 256, + 24, + 64, + 64, + name="ince4c", + data_format=data_format) + self._ince4d = Inception( + 512, + 512, + 112, + 144, + 288, + 32, + 64, + 64, + name="ince4d", + data_format=data_format) + self._ince4e = Inception( + 528, + 528, + 256, + 160, + 320, + 32, + 128, + 128, + name="ince4e", + data_format=data_format) + + self._ince5a = Inception( + 832, + 832, + 256, + 160, + 320, + 32, + 128, + 128, + name="ince5a", + data_format=data_format) + self._ince5b = Inception( + 832, + 832, + 384, + 192, + 384, + 48, + 128, + 128, + name="ince5b", + data_format=data_format) + + self._pool_5 = AdaptiveAvgPool2D(1, data_format=data_format) + + self._drop = Dropout(p=0.4, mode="downscale_in_infer") + self.flatten = nn.Flatten() + self._fc_out = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out"), + bias_attr=ParamAttr(name="out_offset")) + self._pool_o1 = AvgPool2D( + kernel_size=5, stride=3, data_format=data_format) + self._conv_o1 = ConvLayer( + 512, 128, 1, name="conv_o1", data_format=data_format) + self._fc_o1 = Linear( + 1152, + 1024, + weight_attr=xavier(2048, 1, "fc_o1"), + bias_attr=ParamAttr(name="fc_o1_offset")) + self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer") + self._out1 = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out1"), + bias_attr=ParamAttr(name="out1_offset")) + self._pool_o2 = AvgPool2D( + kernel_size=5, stride=3, data_format=data_format) + self._conv_o2 = ConvLayer( + 528, 128, 1, name="conv_o2", data_format=data_format) + self._fc_o2 = Linear( + 1152, + 1024, + weight_attr=xavier(2048, 1, "fc_o2"), + bias_attr=ParamAttr(name="fc_o2_offset")) + self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer") + self._out2 = Linear( + 1024, + class_num, + weight_attr=xavier(1024, 1, "out2"), + bias_attr=ParamAttr(name="out2_offset")) + + def forward(self, inputs): + if self.data_format == "NHWC": + inputs = paddle.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + x = self._conv(inputs) + x = self._pool(x) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._pool(x) + + x = self._ince3a(x) + x = self._ince3b(x) + x = self._pool(x) + + ince4a = self._ince4a(x) + x = self._ince4b(ince4a) + x = self._ince4c(x) + ince4d = self._ince4d(x) + x = self._ince4e(ince4d) + x = self._pool(x) + + x = self._ince5a(x) + ince5b = self._ince5b(x) + + x = self._pool_5(ince5b) + x = self._drop(x) + x = self.flatten(x) + out = self._fc_out(x) + + x = self._pool_o1(ince4a) + x = self._conv_o1(x) + x = self.flatten(x) + x = self._fc_o1(x) + x = F.relu(x) + x = self._drop_o1(x) + out1 = self._out1(x) + + x = self._pool_o2(ince4d) + x = self._conv_o2(x) + x = self.flatten(x) + x = self._fc_o2(x) + x = self._drop_o2(x) + out2 = self._out2(x) + return [out, out1, out2] + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def GoogLeNet(pretrained=False, use_ssld=False, **kwargs): + model = GoogLeNetDY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["GoogLeNet"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/hardnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/hardnet.py new file mode 100644 index 000000000..fa3399e3e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/hardnet.py @@ -0,0 +1,294 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/PingoLH/Pytorch-HarDNet +# reference: https://arxiv.org/abs/1909.00948 + +import paddle +import paddle.nn as nn + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + 'HarDNet39_ds': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams', + 'HarDNet68_ds': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams', + 'HarDNet68': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams', + 'HarDNet85': + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams' +} + +__all__ = MODEL_URLS.keys() + + +def ConvLayer(in_channels, + out_channels, + kernel_size=3, + stride=1, + bias_attr=False): + layer = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=1, + bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)), + ('relu', nn.ReLU6())) + return layer + + +def DWConvLayer(in_channels, + out_channels, + kernel_size=3, + stride=1, + bias_attr=False): + layer = nn.Sequential( + ('dwconv', nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=1, + groups=out_channels, + bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels))) + return layer + + +def CombConvLayer(in_channels, out_channels, kernel_size=1, stride=1): + layer = nn.Sequential( + ('layer1', ConvLayer( + in_channels, out_channels, kernel_size=kernel_size)), + ('layer2', DWConvLayer( + out_channels, out_channels, stride=stride))) + return layer + + +class HarDBlock(nn.Layer): + def __init__(self, + in_channels, + growth_rate, + grmul, + n_layers, + keepBase=False, + residual_out=False, + dwconv=False): + super().__init__() + self.keepBase = keepBase + self.links = [] + layers_ = [] + self.out_channels = 0 # if upsample else in_channels + for i in range(n_layers): + outch, inch, link = self.get_link(i + 1, in_channels, growth_rate, + grmul) + self.links.append(link) + if dwconv: + layers_.append(CombConvLayer(inch, outch)) + else: + layers_.append(ConvLayer(inch, outch)) + + if (i % 2 == 0) or (i == n_layers - 1): + self.out_channels += outch + # print("Blk out =",self.out_channels) + self.layers = nn.LayerList(layers_) + + def get_link(self, layer, base_ch, growth_rate, grmul): + if layer == 0: + return base_ch, 0, [] + out_channels = growth_rate + + link = [] + for i in range(10): + dv = 2**i + if layer % dv == 0: + k = layer - dv + link.append(k) + if i > 0: + out_channels *= grmul + + out_channels = int(int(out_channels + 1) / 2) * 2 + in_channels = 0 + + for i in link: + ch, _, _ = self.get_link(i, base_ch, growth_rate, grmul) + in_channels += ch + + return out_channels, in_channels, link + + def forward(self, x): + layers_ = [x] + + for layer in range(len(self.layers)): + link = self.links[layer] + tin = [] + for i in link: + tin.append(layers_[i]) + if len(tin) > 1: + x = paddle.concat(tin, 1) + else: + x = tin[0] + out = self.layers[layer](x) + layers_.append(out) + + t = len(layers_) + out_ = [] + for i in range(t): + if (i == 0 and self.keepBase) or (i == t - 1) or (i % 2 == 1): + out_.append(layers_[i]) + out = paddle.concat(out_, 1) + + return out + + +class HarDNet(nn.Layer): + def __init__(self, + depth_wise=False, + arch=85, + class_num=1000, + with_pool=True): + super().__init__() + first_ch = [32, 64] + second_kernel = 3 + max_pool = True + grmul = 1.7 + drop_rate = 0.1 + + # HarDNet68 + ch_list = [128, 256, 320, 640, 1024] + gr = [14, 16, 20, 40, 160] + n_layers = [8, 16, 16, 16, 4] + downSamp = [1, 0, 1, 1, 0] + + if arch == 85: + # HarDNet85 + first_ch = [48, 96] + ch_list = [192, 256, 320, 480, 720, 1280] + gr = [24, 24, 28, 36, 48, 256] + n_layers = [8, 16, 16, 16, 16, 4] + downSamp = [1, 0, 1, 0, 1, 0] + drop_rate = 0.2 + + elif arch == 39: + # HarDNet39 + first_ch = [24, 48] + ch_list = [96, 320, 640, 1024] + grmul = 1.6 + gr = [16, 20, 64, 160] + n_layers = [4, 16, 8, 4] + downSamp = [1, 1, 1, 0] + + if depth_wise: + second_kernel = 1 + max_pool = False + drop_rate = 0.05 + + blks = len(n_layers) + self.base = nn.LayerList([]) + + # First Layer: Standard Conv3x3, Stride=2 + self.base.append( + ConvLayer( + in_channels=3, + out_channels=first_ch[0], + kernel_size=3, + stride=2, + bias_attr=False)) + + # Second Layer + self.base.append( + ConvLayer( + first_ch[0], first_ch[1], kernel_size=second_kernel)) + + # Maxpooling or DWConv3x3 downsampling + if max_pool: + self.base.append(nn.MaxPool2D(kernel_size=3, stride=2, padding=1)) + else: + self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2)) + + # Build all HarDNet blocks + ch = first_ch[1] + for i in range(blks): + blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise) + ch = blk.out_channels + self.base.append(blk) + + if i == blks - 1 and arch == 85: + self.base.append(nn.Dropout(0.1)) + + self.base.append(ConvLayer(ch, ch_list[i], kernel_size=1)) + ch = ch_list[i] + if downSamp[i] == 1: + if max_pool: + self.base.append(nn.MaxPool2D(kernel_size=2, stride=2)) + else: + self.base.append(DWConvLayer(ch, ch, stride=2)) + + ch = ch_list[blks - 1] + + layers = [] + + if with_pool: + layers.append(nn.AdaptiveAvgPool2D((1, 1))) + + if class_num > 0: + layers.append(nn.Flatten()) + layers.append(nn.Dropout(drop_rate)) + layers.append(nn.Linear(ch, class_num)) + + self.base.append(nn.Sequential(*layers)) + + def forward(self, x): + for layer in self.base: + x = layer(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def HarDNet39_ds(pretrained=False, **kwargs): + model = HarDNet(arch=39, depth_wise=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet39_ds"]) + return model + + +def HarDNet68_ds(pretrained=False, **kwargs): + model = HarDNet(arch=68, depth_wise=True, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet68_ds"]) + return model + + +def HarDNet68(pretrained=False, **kwargs): + model = HarDNet(arch=68, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet68"]) + return model + + +def HarDNet85(pretrained=False, **kwargs): + model = HarDNet(arch=85, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["HarDNet85"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/inception_v4.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/inception_v4.py new file mode 100644 index 000000000..476330004 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/inception_v4.py @@ -0,0 +1,479 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1602.07261 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "InceptionV4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + groups=1, + act='relu', + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = name + "_bn" + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class InceptionStem(nn.Layer): + def __init__(self): + super(InceptionStem, self).__init__() + self._conv_1 = ConvBNLayer( + 3, 32, 3, stride=2, act="relu", name="conv1_3x3_s2") + self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1") + self._conv_3 = ConvBNLayer( + 32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2 = ConvBNLayer( + 64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2") + self._conv1_1 = ConvBNLayer( + 160, 64, 1, act="relu", name="inception_stem2_3x3_reduce") + self._conv1_2 = ConvBNLayer( + 64, 96, 3, act="relu", name="inception_stem2_3x3") + self._conv2_1 = ConvBNLayer( + 160, 64, 1, act="relu", name="inception_stem2_1x7_reduce") + self._conv2_2 = ConvBNLayer( + 64, + 64, (7, 1), + padding=(3, 0), + act="relu", + name="inception_stem2_1x7") + self._conv2_3 = ConvBNLayer( + 64, + 64, (1, 7), + padding=(0, 3), + act="relu", + name="inception_stem2_7x1") + self._conv2_4 = ConvBNLayer( + 64, 96, 3, act="relu", name="inception_stem2_3x3_2") + self._conv3 = ConvBNLayer( + 192, 192, 3, stride=2, act="relu", name="inception_stem3_3x3_s2") + + def forward(self, inputs): + conv = self._conv_1(inputs) + conv = self._conv_2(conv) + conv = self._conv_3(conv) + + pool1 = self._pool(conv) + conv2 = self._conv2(conv) + concat = paddle.concat([pool1, conv2], axis=1) + + conv1 = self._conv1_1(concat) + conv1 = self._conv1_2(conv1) + + conv2 = self._conv2_1(concat) + conv2 = self._conv2_2(conv2) + conv2 = self._conv2_3(conv2) + conv2 = self._conv2_4(conv2) + + concat = paddle.concat([conv1, conv2], axis=1) + + conv1 = self._conv3(concat) + pool1 = self._pool(concat) + + concat = paddle.concat([conv1, pool1], axis=1) + return concat + + +class InceptionA(nn.Layer): + def __init__(self, name): + super(InceptionA, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 384, 96, 1, act="relu", name="inception_a" + name + "_1x1") + self._conv2 = ConvBNLayer( + 384, 96, 1, act="relu", name="inception_a" + name + "_1x1_2") + self._conv3_1 = ConvBNLayer( + 384, 64, 1, act="relu", name="inception_a" + name + "_3x3_reduce") + self._conv3_2 = ConvBNLayer( + 64, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3") + self._conv4_1 = ConvBNLayer( + 384, + 64, + 1, + act="relu", + name="inception_a" + name + "_3x3_2_reduce") + self._conv4_2 = ConvBNLayer( + 64, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3_2") + self._conv4_3 = ConvBNLayer( + 96, + 96, + 3, + padding=1, + act="relu", + name="inception_a" + name + "_3x3_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + + conv4 = self._conv4_1(inputs) + conv4 = self._conv4_2(conv4) + conv4 = self._conv4_3(conv4) + + concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1) + return concat + + +class ReductionA(nn.Layer): + def __init__(self): + super(ReductionA, self).__init__() + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2 = ConvBNLayer( + 384, 384, 3, stride=2, act="relu", name="reduction_a_3x3") + self._conv3_1 = ConvBNLayer( + 384, 192, 1, act="relu", name="reduction_a_3x3_2_reduce") + self._conv3_2 = ConvBNLayer( + 192, 224, 3, padding=1, act="relu", name="reduction_a_3x3_2") + self._conv3_3 = ConvBNLayer( + 224, 256, 3, stride=2, act="relu", name="reduction_a_3x3_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv2 = self._conv2(inputs) + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + concat = paddle.concat([pool1, conv2, conv3], axis=1) + return concat + + +class InceptionB(nn.Layer): + def __init__(self, name=None): + super(InceptionB, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 1024, 128, 1, act="relu", name="inception_b" + name + "_1x1") + self._conv2 = ConvBNLayer( + 1024, 384, 1, act="relu", name="inception_b" + name + "_1x1_2") + self._conv3_1 = ConvBNLayer( + 1024, + 192, + 1, + act="relu", + name="inception_b" + name + "_1x7_reduce") + self._conv3_2 = ConvBNLayer( + 192, + 224, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7") + self._conv3_3 = ConvBNLayer( + 224, + 256, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1") + self._conv4_1 = ConvBNLayer( + 1024, + 192, + 1, + act="relu", + name="inception_b" + name + "_7x1_2_reduce") + self._conv4_2 = ConvBNLayer( + 192, + 192, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7_2") + self._conv4_3 = ConvBNLayer( + 192, + 224, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1_2") + self._conv4_4 = ConvBNLayer( + 224, + 224, (1, 7), + padding=(0, 3), + act="relu", + name="inception_b" + name + "_1x7_3") + self._conv4_5 = ConvBNLayer( + 224, + 256, (7, 1), + padding=(3, 0), + act="relu", + name="inception_b" + name + "_7x1_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + + conv4 = self._conv4_1(inputs) + conv4 = self._conv4_2(conv4) + conv4 = self._conv4_3(conv4) + conv4 = self._conv4_4(conv4) + conv4 = self._conv4_5(conv4) + + concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1) + return concat + + +class ReductionB(nn.Layer): + def __init__(self): + super(ReductionB, self).__init__() + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv2_1 = ConvBNLayer( + 1024, 192, 1, act="relu", name="reduction_b_3x3_reduce") + self._conv2_2 = ConvBNLayer( + 192, 192, 3, stride=2, act="relu", name="reduction_b_3x3") + self._conv3_1 = ConvBNLayer( + 1024, 256, 1, act="relu", name="reduction_b_1x7_reduce") + self._conv3_2 = ConvBNLayer( + 256, + 256, (1, 7), + padding=(0, 3), + act="relu", + name="reduction_b_1x7") + self._conv3_3 = ConvBNLayer( + 256, + 320, (7, 1), + padding=(3, 0), + act="relu", + name="reduction_b_7x1") + self._conv3_4 = ConvBNLayer( + 320, 320, 3, stride=2, act="relu", name="reduction_b_3x3_2") + + def forward(self, inputs): + pool1 = self._pool(inputs) + + conv2 = self._conv2_1(inputs) + conv2 = self._conv2_2(conv2) + + conv3 = self._conv3_1(inputs) + conv3 = self._conv3_2(conv3) + conv3 = self._conv3_3(conv3) + conv3 = self._conv3_4(conv3) + + concat = paddle.concat([pool1, conv2, conv3], axis=1) + + return concat + + +class InceptionC(nn.Layer): + def __init__(self, name=None): + super(InceptionC, self).__init__() + self._pool = AvgPool2D(kernel_size=3, stride=1, padding=1) + self._conv1 = ConvBNLayer( + 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1") + self._conv2 = ConvBNLayer( + 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1_2") + self._conv3_0 = ConvBNLayer( + 1536, 384, 1, act="relu", name="inception_c" + name + "_1x1_3") + self._conv3_1 = ConvBNLayer( + 384, + 256, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3") + self._conv3_2 = ConvBNLayer( + 384, + 256, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1") + self._conv4_0 = ConvBNLayer( + 1536, 384, 1, act="relu", name="inception_c" + name + "_1x1_4") + self._conv4_00 = ConvBNLayer( + 384, + 448, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3_2") + self._conv4_000 = ConvBNLayer( + 448, + 512, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1_2") + self._conv4_1 = ConvBNLayer( + 512, + 256, (1, 3), + padding=(0, 1), + act="relu", + name="inception_c" + name + "_1x3_3") + self._conv4_2 = ConvBNLayer( + 512, + 256, (3, 1), + padding=(1, 0), + act="relu", + name="inception_c" + name + "_3x1_3") + + def forward(self, inputs): + pool1 = self._pool(inputs) + conv1 = self._conv1(pool1) + + conv2 = self._conv2(inputs) + + conv3 = self._conv3_0(inputs) + conv3_1 = self._conv3_1(conv3) + conv3_2 = self._conv3_2(conv3) + + conv4 = self._conv4_0(inputs) + conv4 = self._conv4_00(conv4) + conv4 = self._conv4_000(conv4) + conv4_1 = self._conv4_1(conv4) + conv4_2 = self._conv4_2(conv4) + + concat = paddle.concat( + [conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1) + + return concat + + +class InceptionV4DY(nn.Layer): + def __init__(self, class_num=1000): + super(InceptionV4DY, self).__init__() + self._inception_stem = InceptionStem() + + self._inceptionA_1 = InceptionA(name="1") + self._inceptionA_2 = InceptionA(name="2") + self._inceptionA_3 = InceptionA(name="3") + self._inceptionA_4 = InceptionA(name="4") + self._reductionA = ReductionA() + + self._inceptionB_1 = InceptionB(name="1") + self._inceptionB_2 = InceptionB(name="2") + self._inceptionB_3 = InceptionB(name="3") + self._inceptionB_4 = InceptionB(name="4") + self._inceptionB_5 = InceptionB(name="5") + self._inceptionB_6 = InceptionB(name="6") + self._inceptionB_7 = InceptionB(name="7") + self._reductionB = ReductionB() + + self._inceptionC_1 = InceptionC(name="1") + self._inceptionC_2 = InceptionC(name="2") + self._inceptionC_3 = InceptionC(name="3") + + self.avg_pool = AdaptiveAvgPool2D(1) + self._drop = Dropout(p=0.2, mode="downscale_in_infer") + stdv = 1.0 / math.sqrt(1536 * 1.0) + self.out = Linear( + 1536, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="final_fc_weights"), + bias_attr=ParamAttr(name="final_fc_offset")) + + def forward(self, inputs): + x = self._inception_stem(inputs) + + x = self._inceptionA_1(x) + x = self._inceptionA_2(x) + x = self._inceptionA_3(x) + x = self._inceptionA_4(x) + x = self._reductionA(x) + + x = self._inceptionB_1(x) + x = self._inceptionB_2(x) + x = self._inceptionB_3(x) + x = self._inceptionB_4(x) + x = self._inceptionB_5(x) + x = self._inceptionB_6(x) + x = self._inceptionB_7(x) + x = self._reductionB(x) + + x = self._inceptionC_1(x) + x = self._inceptionC_2(x) + x = self._inceptionC_3(x) + + x = self.avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._drop(x) + x = self.out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def InceptionV4(pretrained=False, use_ssld=False, **kwargs): + model = InceptionV4DY(**kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["InceptionV4"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/levit.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/levit.py new file mode 100644 index 000000000..47734cc9c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/levit.py @@ -0,0 +1,590 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/facebookresearch/LeViT +# reference: https://openaccess.thecvf.com/content/ICCV2021/html/Graham_LeViT_A_Vision_Transformer_in_ConvNets_Clothing_for_Faster_Inference_ICCV_2021_paper.html + +import itertools +import math +import warnings + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant +from paddle.regularizer import L2Decay + +from .vision_transformer import trunc_normal_, zeros_, ones_, Identity + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "LeViT_128S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams", + "LeViT_128": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams", + "LeViT_192": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams", + "LeViT_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams", + "LeViT_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def cal_attention_biases(attention_biases, attention_bias_idxs): + gather_list = [] + attention_bias_t = paddle.transpose(attention_biases, (1, 0)) + nums = attention_bias_idxs.shape[0] + for idx in range(nums): + gather = paddle.gather(attention_bias_t, attention_bias_idxs[idx]) + gather_list.append(gather) + shape0, shape1 = attention_bias_idxs.shape + gather = paddle.concat(gather_list) + return paddle.transpose(gather, (1, 0)).reshape((0, shape0, shape1)) + + +class Conv2d_BN(nn.Sequential): + def __init__(self, + a, + b, + ks=1, + stride=1, + pad=0, + dilation=1, + groups=1, + bn_weight_init=1, + resolution=-10000): + super().__init__() + self.add_sublayer( + 'c', + nn.Conv2D( + a, b, ks, stride, pad, dilation, groups, bias_attr=False)) + bn = nn.BatchNorm2D(b) + ones_(bn.weight) + zeros_(bn.bias) + self.add_sublayer('bn', bn) + + +class Linear_BN(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1): + super().__init__() + self.add_sublayer('c', nn.Linear(a, b, bias_attr=False)) + bn = nn.BatchNorm1D(b) + if bn_weight_init == 0: + zeros_(bn.weight) + else: + ones_(bn.weight) + zeros_(bn.bias) + self.add_sublayer('bn', bn) + + def forward(self, x): + l, bn = self._sub_layers.values() + x = l(x) + return paddle.reshape(bn(x.flatten(0, 1)), x.shape) + + +class BN_Linear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_sublayer('bn', nn.BatchNorm1D(a)) + l = nn.Linear(a, b, bias_attr=bias) + trunc_normal_(l.weight) + if bias: + zeros_(l.bias) + self.add_sublayer('l', l) + + +def b16(n, activation, resolution=224): + return nn.Sequential( + Conv2d_BN( + 3, n // 8, 3, 2, 1, resolution=resolution), + activation(), + Conv2d_BN( + n // 8, n // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + Conv2d_BN( + n // 4, n // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + Conv2d_BN( + n // 2, n, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Layer): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + y = paddle.rand( + shape=[x.shape[0], 1, 1]).__ge__(self.drop).astype("float32") + y = y.divide(paddle.full_like(y, 1 - self.drop)) + return paddle.add(x, y) + else: + return paddle.add(x, self.m(x)) + + +class Attention(nn.Layer): + def __init__(self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + activation=None, + resolution=14): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.h = self.dh + nh_kd * 2 + self.qkv = Linear_BN(dim, self.h) + self.proj = nn.Sequential( + activation(), Linear_BN( + self.dh, dim, bn_weight_init=0)) + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = self.create_parameter( + shape=(num_heads, len(attention_offsets)), + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + tensor_idxs = paddle.to_tensor(idxs, dtype='int64') + self.register_buffer('attention_bias_idxs', + paddle.reshape(tensor_idxs, [N, N])) + + @paddle.no_grad() + def train(self, mode=True): + if mode: + super().train() + else: + super().eval() + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + + def forward(self, x): + self.training = True + B, N, C = x.shape + qkv = self.qkv(x) + qkv = paddle.reshape(qkv, + [B, N, self.num_heads, self.h // self.num_heads]) + q, k, v = paddle.split( + qkv, [self.key_dim, self.key_dim, self.d], axis=3) + q = paddle.transpose(q, perm=[0, 2, 1, 3]) + k = paddle.transpose(k, perm=[0, 2, 1, 3]) + v = paddle.transpose(v, perm=[0, 2, 1, 3]) + k_transpose = paddle.transpose(k, perm=[0, 1, 3, 2]) + + if self.training: + attention_biases = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + else: + attention_biases = self.ab + attn = (paddle.matmul(q, k_transpose) * self.scale + attention_biases) + attn = F.softmax(attn) + x = paddle.transpose(paddle.matmul(attn, v), perm=[0, 2, 1, 3]) + x = paddle.reshape(x, [B, N, self.dh]) + x = self.proj(x) + return x + + +class Subsample(nn.Layer): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = paddle.reshape(x, [B, self.resolution, self.resolution, C]) + end1, end2 = x.shape[1], x.shape[2] + x = x[:, 0:end1:self.stride, 0:end2:self.stride] + x = paddle.reshape(x, [B, -1, C]) + return x + + +class AttentionSubsample(nn.Layer): + def __init__(self, + in_dim, + out_dim, + key_dim, + num_heads=8, + attn_ratio=2, + activation=None, + stride=2, + resolution=14, + resolution_=7): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_**2 + self.training = True + h = self.dh + nh_kd + self.kv = Linear_BN(in_dim, h) + + self.q = nn.Sequential( + Subsample(stride, resolution), Linear_BN(in_dim, nh_kd)) + self.proj = nn.Sequential(activation(), Linear_BN(self.dh, out_dim)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list( + itertools.product(range(resolution_), range(resolution_))) + + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + i = 0 + j = 0 + for p1 in points_: + i += 1 + for p2 in points: + j += 1 + size = 1 + offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = self.create_parameter( + shape=(num_heads, len(attention_offsets)), + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + + tensor_idxs_ = paddle.to_tensor(idxs, dtype='int64') + self.register_buffer('attention_bias_idxs', + paddle.reshape(tensor_idxs_, [N_, N])) + + @paddle.no_grad() + def train(self, mode=True): + if mode: + super().train() + else: + super().eval() + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + + def forward(self, x): + self.training = True + B, N, C = x.shape + kv = self.kv(x) + kv = paddle.reshape(kv, [B, N, self.num_heads, -1]) + k, v = paddle.split(kv, [self.key_dim, self.d], axis=3) + k = paddle.transpose(k, perm=[0, 2, 1, 3]) # BHNC + v = paddle.transpose(v, perm=[0, 2, 1, 3]) + q = paddle.reshape( + self.q(x), [B, self.resolution_2, self.num_heads, self.key_dim]) + q = paddle.transpose(q, perm=[0, 2, 1, 3]) + + if self.training: + attention_biases = cal_attention_biases(self.attention_biases, + self.attention_bias_idxs) + else: + attention_biases = self.ab + + attn = (paddle.matmul( + q, paddle.transpose( + k, perm=[0, 1, 3, 2]))) * self.scale + attention_biases + attn = F.softmax(attn) + + x = paddle.reshape( + paddle.transpose( + paddle.matmul(attn, v), perm=[0, 2, 1, 3]), [B, -1, self.dh]) + x = self.proj(x) + return x + + +class LeViT(nn.Layer): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=[192], + key_dim=[64], + depth=[12], + num_heads=[3], + attn_ratio=[2], + mlp_ratio=[2], + hybrid_backbone=None, + down_ops=[], + attention_activation=nn.Hardswish, + mlp_activation=nn.Hardswish, + distillation=True, + drop_path=0): + super().__init__() + + self.class_num = class_num + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + self.distillation = distillation + + self.patch_embed = hybrid_backbone + + self.blocks = [] + down_ops.append(['']) + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, + mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, + kd, + nh, + attn_ratio=ar, + activation=attention_activation, + resolution=resolution, ), + drop_path)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual( + nn.Sequential( + Linear_BN(ed, h), + mlp_activation(), + Linear_BN( + h, ed, bn_weight_init=0), ), + drop_path)) + if do[0] == 'Subsample': + #('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], + key_dim=do[1], + num_heads=do[2], + attn_ratio=do[3], + activation=attention_activation, + stride=do[5], + resolution=resolution, + resolution_=resolution_)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual( + nn.Sequential( + Linear_BN(embed_dim[i + 1], h), + mlp_activation(), + Linear_BN( + h, embed_dim[i + 1], bn_weight_init=0), ), + drop_path)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = BN_Linear(embed_dim[-1], + class_num) if class_num > 0 else Identity() + if distillation: + self.head_dist = BN_Linear( + embed_dim[-1], class_num) if class_num > 0 else Identity() + + def forward(self, x): + x = self.patch_embed(x) + x = x.flatten(2) + x = paddle.transpose(x, perm=[0, 2, 1]) + x = self.blocks(x) + x = x.mean(1) + + x = paddle.reshape(x, [-1, self.embed_dim[-1]]) + if self.distillation: + x = self.head(x), self.head_dist(x) + if not self.training: + x = (x[0] + x[1]) / 2 + else: + x = self.head(x) + return x + + +def model_factory(C, D, X, N, drop_path, class_num, distillation): + embed_dim = [int(x) for x in C.split('_')] + num_heads = [int(x) for x in N.split('_')] + depth = [int(x) for x in X.split('_')] + act = nn.Hardswish + model = LeViT( + patch_size=16, + embed_dim=embed_dim, + num_heads=num_heads, + key_dim=[D] * 3, + depth=depth, + attn_ratio=[2, 2, 2], + mlp_ratio=[2, 2, 2], + down_ops=[ + #('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ['Subsample', D, embed_dim[0] // D, 4, 2, 2], + ['Subsample', D, embed_dim[1] // D, 4, 2, 2], + ], + attention_activation=act, + mlp_activation=act, + hybrid_backbone=b16(embed_dim[0], activation=act), + class_num=class_num, + drop_path=drop_path, + distillation=distillation) + + return model + + +specification = { + 'LeViT_128S': { + 'C': '128_256_384', + 'D': 16, + 'N': '4_6_8', + 'X': '2_3_4', + 'drop_path': 0 + }, + 'LeViT_128': { + 'C': '128_256_384', + 'D': 16, + 'N': '4_8_12', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_192': { + 'C': '192_288_384', + 'D': 32, + 'N': '3_5_6', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_256': { + 'C': '256_384_512', + 'D': 32, + 'N': '4_6_8', + 'X': '4_4_4', + 'drop_path': 0 + }, + 'LeViT_384': { + 'C': '384_512_768', + 'D': 32, + 'N': '6_9_12', + 'X': '4_4_4', + 'drop_path': 0.1 + }, +} + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def LeViT_128S(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_128S'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_128S"], use_ssld=use_ssld) + return model + + +def LeViT_128(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_128'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_128"], use_ssld=use_ssld) + return model + + +def LeViT_192(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_192'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_192"], use_ssld=use_ssld) + return model + + +def LeViT_256(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_256'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_256"], use_ssld=use_ssld) + return model + + +def LeViT_384(pretrained=False, + use_ssld=False, + class_num=1000, + distillation=False, + **kwargs): + model = model_factory( + **specification['LeViT_384'], + class_num=class_num, + distillation=distillation) + _load_pretrained( + pretrained, model, MODEL_URLS["LeViT_384"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/micronet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/micronet.py new file mode 100644 index 000000000..8069bb860 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/micronet.py @@ -0,0 +1,618 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Code was heavily based on https://github.com/liyunsheng13/micronet +# reference: https://arxiv.org/pdf/2108.05894 + +import math + +import paddle +import paddle.nn as nn + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MicroNet_M0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MicroNet_M0_pretrained.pdparams", + "MicroNet_M1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MicroNet_M1_pretrained.pdparams", + "MicroNet_M2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MicroNet_M2_pretrained.pdparams", + "MicroNet_M3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MicroNet_M3_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() + +NET_CONFIG = { + "msnx_dy6_exp4_4M_221": [ + #s, n, c, ks, c1, c2, g1, g2, c3, g3, g4,y1,y2,y3,r + [2, 1, 8, 3, 2, 2, 0, 4, 8, 2, 2, 2, 0, 1, + 1], # 6 ->12(0,0) ->24 ->8(4,2) ->8 + [2, 1, 12, 3, 2, 2, 0, 8, 12, 4, 4, 2, 2, 1, + 1], # 8 ->16(0,0) ->32 ->16(4,4) ->12 + [2, 1, 16, 5, 2, 2, 0, 12, 16, 4, 4, 2, 2, 1, + 1], # 16 ->32(0,0) ->64 ->16(8,2) ->16 + [1, 1, 32, 5, 1, 4, 4, 4, 32, 4, 4, 2, 2, 1, + 1], # 16 ->16(2,8) ->96 ->32(8,4) ->32 + [2, 1, 64, 5, 1, 4, 8, 8, 64, 8, 8, 2, 2, 1, + 1], # 32 ->32(2,16) ->192 ->64(12,4) ->64 + [1, 1, 96, 3, 1, 4, 8, 8, 96, 8, 8, 2, 2, 1, + 2], # 64 ->64(3,16) ->384 ->96(16,6) ->96 + [1, 1, 384, 3, 1, 4, 12, 12, 0, 0, 0, 2, 2, 1, + 2], # 96 ->96(4,24) ->384 + ], + "msnx_dy6_exp6_6M_221": [ + #s, n, c, ks, c1, c2, g1, g2, c3, g3, g4 + [2, 1, 8, 3, 2, 2, 0, 6, 8, 2, 2, 2, 0, 1, + 1], # 6 ->12(0,0) ->24 ->8(4,2) ->8 + [2, 1, 16, 3, 2, 2, 0, 8, 16, 4, 4, 2, 2, 1, + 1], # 8 ->16(0,0) ->32 ->16(4,4) ->16 + [2, 1, 16, 5, 2, 2, 0, 16, 16, 4, 4, 2, 2, 1, + 1], # 16 ->32(0,0) ->64 ->16(8,2) ->16 + [1, 1, 32, 5, 1, 6, 4, 4, 32, 4, 4, 2, 2, 1, + 1], # 16 ->16(2,8) ->96 ->32(8,4) ->32 + [2, 1, 64, 5, 1, 6, 8, 8, 64, 8, 8, 2, 2, 1, + 1], # 32 ->32(2,16) ->192 ->64(12,4) ->64 + [1, 1, 96, 3, 1, 6, 8, 8, 96, 8, 8, 2, 2, 1, + 2], # 64 ->64(3,16) ->384 ->96(16,6) ->96 + [1, 1, 576, 3, 1, 6, 12, 12, 0, 0, 0, 2, 2, 1, + 2], # 96 ->96(4,24) ->576 + ], + "msnx_dy9_exp6_12M_221": [ + #s, n, c, ks, c1, c2, g1, g2, c3, g3, g4 + [2, 1, 12, 3, 2, 2, 0, 8, 12, 4, 4, 2, 0, 1, + 1], # 8 ->16(0,0) ->32 ->12(4,3) ->12 + [2, 1, 16, 3, 2, 2, 0, 12, 16, 4, 4, 2, 2, 1, + 1], # 12 ->24(0,0) ->48 ->16(8,2) ->16 + [1, 1, 24, 3, 2, 2, 0, 16, 24, 4, 4, 2, 2, 1, + 1], # 16 ->16(0,0) ->64 ->24(8,3) ->24 + [2, 1, 32, 5, 1, 6, 6, 6, 32, 4, 4, 2, 2, 1, + 1], # 24 ->24(2,12) ->144 ->32(16,2) ->32 + [1, 1, 32, 5, 1, 6, 8, 8, 32, 4, 4, 2, 2, 1, + 2], # 32 ->32(2,16) ->192 ->32(16,2) ->32 + [1, 1, 64, 5, 1, 6, 8, 8, 64, 8, 8, 2, 2, 1, + 2], # 32 ->32(2,16) ->192 ->64(12,4) ->64 + [2, 1, 96, 5, 1, 6, 8, 8, 96, 8, 8, 2, 2, 1, + 2], # 64 ->64(4,12) ->384 ->96(16,5) ->96 + [1, 1, 128, 3, 1, 6, 12, 12, 128, 8, 8, 2, 2, 1, + 2], # 96 ->96(5,16) ->576 ->128(16,8) ->128 + [1, 1, 768, 3, 1, 6, 16, 16, 0, 0, 0, 2, 2, 1, + 2], # 128 ->128(4,32) ->768 + ], + "msnx_dy12_exp6_20M_020": [ + #s, n, c, ks, c1, c2, g1, g2, c3, g3, g4 + [2, 1, 16, 3, 2, 2, 0, 12, 16, 4, 4, 0, 2, 0, + 1], # 12 ->24(0,0) ->48 ->16(8,2) ->16 + [2, 1, 24, 3, 2, 2, 0, 16, 24, 4, 4, 0, 2, 0, + 1], # 16 ->32(0,0) ->64 ->24(8,3) ->24 + [1, 1, 24, 3, 2, 2, 0, 24, 24, 4, 4, 0, 2, 0, + 1], # 24 ->48(0,0) ->96 ->24(8,3) ->24 + [2, 1, 32, 5, 1, 6, 6, 6, 32, 4, 4, 0, 2, 0, + 1], # 24 ->24(2,12) ->144 ->32(16,2) ->32 + [1, 1, 32, 5, 1, 6, 8, 8, 32, 4, 4, 0, 2, 0, + 2], # 32 ->32(2,16) ->192 ->32(16,2) ->32 + [1, 1, 64, 5, 1, 6, 8, 8, 48, 8, 8, 0, 2, 0, + 2], # 32 ->32(2,16) ->192 ->48(12,4) ->64 + [1, 1, 80, 5, 1, 6, 8, 8, 80, 8, 8, 0, 2, 0, + 2], # 48 ->48(3,16) ->288 ->80(16,5) ->80 + [1, 1, 80, 5, 1, 6, 10, 10, 80, 8, 8, 0, 2, 0, + 2], # 80 ->80(4,20) ->480 ->80(20,4) ->80 + [2, 1, 120, 5, 1, 6, 10, 10, 120, 10, 10, 0, 2, 0, + 2], # 80 ->80(4,20) ->480 ->128(16,8) ->120 + [1, 1, 120, 5, 1, 6, 12, 12, 120, 10, 10, 0, 2, 0, + 2], # 120 ->128(4,32) ->720 ->128(32,4) ->120 + [1, 1, 144, 3, 1, 6, 12, 12, 144, 12, 12, 0, 2, 0, + 2], # 120 ->128(4,32) ->720 ->160(32,5) ->144 + [1, 1, 864, 3, 1, 6, 12, 12, 0, 0, 0, 0, 2, 0, + 2], # 144 ->144(5,32) ->864 + ], +} + +ACTIVATION_CONFIG = { + "msnx_dy6_exp4_4M_221": { + "act_max": 2.0, + "reduction": 8, + "init_ab3": [1.0, 0.0], + "init_a": [1.0, 1.0], + "init_b": [0.0, 0.0], + }, + "msnx_dy6_exp6_6M_221": { + "act_max": 2.0, + "reduction": 8, + "init_ab3": [1.0, 0.0], + "init_a": [1.0, 1.0], + "init_b": [0.0, 0.0], + }, + "msnx_dy9_exp6_12M_221": { + "act_max": 2.0, + "reduction": 8, + "init_ab3": [1.0, 0.0], + "init_a": [1.0, 1.0], + "init_b": [0.0, 0.0], + }, + "msnx_dy12_exp6_20M_020": { + "act_max": 2.0, + "reduction": 8, + "init_ab3": [1.0, 0.0], + "init_a": [1.0, 0.5], + "init_b": [0.0, 0.5], + }, +} + + +def _make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class MaxGroupPooling(nn.Layer): + def __init__(self, channel_per_group=2): + super().__init__() + self.channel_per_group = channel_per_group + + def forward(self, x): + if self.channel_per_group == 1: + return x + + # max op + b, c, h, w = x.shape + + # reshape + y = x.reshape([b, c // self.channel_per_group, -1, h, w]) + out, _ = paddle.max(y, axis=2) + return out + + +class SwishLinear(nn.Layer): + def __init__(self, inp, oup): + super().__init__() + self.linear = nn.Sequential( + nn.Linear(inp, oup), nn.BatchNorm1D(oup), nn.Hardswish()) + + def forward(self, x): + return self.linear(x) + + +class StemLayer(nn.Layer): + def __init__(self, inp, oup, stride, groups=(4, 4)): + super().__init__() + g1, g2 = groups + self.stem = nn.Sequential( + SpatialSepConvSF(inp, groups, 3, stride), + MaxGroupPooling(2) if g1 * g2 == 2 * oup else nn.ReLU6()) + + def forward(self, x): + out = self.stem(x) + return out + + +class GroupConv(nn.Layer): + def __init__(self, inp, oup, groups=2): + super().__init__() + self.inp = inp + self.oup = oup + self.groups = groups + self.conv = nn.Sequential( + nn.Conv2D( + inp, oup, 1, groups=self.groups[0], bias_attr=False), + nn.BatchNorm2D(oup)) + + def forward(self, x): + x = self.conv(x) + return x + + +class ChannelShuffle(nn.Layer): + def __init__(self, groups): + super().__init__() + self.groups = groups + + def forward(self, x): + b, c, h, w = x.shape + + channels_per_group = c // self.groups + + # reshape + x = x.reshape([b, self.groups, channels_per_group, h, w]) + x = x.transpose([0, 2, 1, 3, 4]) + out = x.reshape([b, c, h, w]) + + return out + + +class SpatialSepConvSF(nn.Layer): + def __init__(self, inp, oups, kernel_size, stride): + super().__init__() + + oup1, oup2 = oups + self.conv = nn.Sequential( + nn.Conv2D( + inp, + oup1, (kernel_size, 1), (stride, 1), (kernel_size // 2, 0), + groups=1, + bias_attr=False), + nn.BatchNorm2D(oup1), + nn.Conv2D( + oup1, + oup1 * oup2, (1, kernel_size), (1, stride), + (0, kernel_size // 2), + groups=oup1, + bias_attr=False), + nn.BatchNorm2D(oup1 * oup2), + ChannelShuffle(oup1)) + + def forward(self, x): + out = self.conv(x) + return out + + +class DepthSpatialSepConv(nn.Layer): + def __init__(self, inp, expand, kernel_size, stride): + super().__init__() + + exp1, exp2 = expand + hidden_dim = inp * exp1 + oup = inp * exp1 * exp2 + + self.conv = nn.Sequential( + nn.Conv2D( + inp, + inp * exp1, (kernel_size, 1), (stride, 1), + (kernel_size // 2, 0), + groups=inp, + bias_attr=False), + nn.BatchNorm2D(inp * exp1), + nn.Conv2D( + hidden_dim, + oup, (1, kernel_size), (1, stride), (0, kernel_size // 2), + groups=hidden_dim, + bias_attr=False), + nn.BatchNorm2D(oup)) + + def forward(self, x): + out = self.conv(x) + return out + + +class DYShiftMax(nn.Layer): + def __init__(self, + inp, + oup, + reduction=4, + act_max=1.0, + act_relu=True, + init_a=[0.0, 0.0], + init_b=[0.0, 0.0], + relu_before_pool=False, + g=None, + expansion=False): + super().__init__() + self.oup = oup + self.act_max = act_max * 2 + self.act_relu = act_relu + self.avg_pool = nn.Sequential(nn.ReLU() if relu_before_pool == True + else nn.Identity(), + nn.AdaptiveAvgPool2D(1)) + + self.exp = 4 if act_relu else 2 + self.init_a = init_a + self.init_b = init_b + + # determine squeeze + squeeze = _make_divisible(inp // reduction, 4) + if squeeze < 4: + squeeze = 4 + + self.fc = nn.Sequential( + nn.Linear(inp, squeeze), + nn.ReLU(), nn.Linear(squeeze, oup * self.exp), nn.Hardsigmoid()) + if g is None: + g = 1 + self.g = g[1] + if self.g != 1 and expansion: + self.g = inp // self.g + self.gc = inp // self.g + index = paddle.to_tensor(list(range(inp))).reshape([1, inp, 1, 1]) + index = index.reshape([1, self.g, self.gc, 1, 1]) + indexgs = paddle.split(index, [1, self.g - 1], axis=1) + indexgs = paddle.concat((indexgs[1], indexgs[0]), axis=1) + indexs = paddle.split(indexgs, [1, self.gc - 1], axis=2) + indexs = paddle.concat((indexs[1], indexs[0]), axis=2) + self.index = indexs.reshape([inp]).astype(paddle.int64) + self.expansion = expansion + + def forward(self, x): + x_in = x + x_out = x + + b, c, _, _ = x_in.shape + y = self.avg_pool(x_in).reshape([b, c]) + y = self.fc(y).reshape([b, self.oup * self.exp, 1, 1]) + y = (y - 0.5) * self.act_max + + n2, c2, h2, w2 = x_out.shape + x2 = paddle.index_select(x_out, self.index, axis=1) + + if self.exp == 4: + a1, b1, a2, b2 = paddle.split(y, 4, axis=1) + + a1 = a1 + self.init_a[0] + a2 = a2 + self.init_a[1] + + b1 = b1 + self.init_b[0] + b2 = b2 + self.init_b[1] + + z1 = x_out * a1 + x2 * b1 + z2 = x_out * a2 + x2 * b2 + + out = paddle.maximum(z1, z2) + + elif self.exp == 2: + a1, b1 = paddle.split(y, 2, axis=1) + a1 = a1 + self.init_a[0] + b1 = b1 + self.init_b[0] + out = x_out * a1 + x2 * b1 + + return out + + +class DYMicroBlock(nn.Layer): + def __init__(self, + inp, + oup, + kernel_size=3, + stride=1, + ch_exp=(2, 2), + ch_per_group=4, + groups_1x1=(1, 1), + dy=[0, 0, 0], + ratio=1.0, + activation_cfg=None): + super().__init__() + + self.identity = stride == 1 and inp == oup + + y1, y2, y3 = dy + act_max = activation_cfg["act_max"] + act_reduction = activation_cfg["reduction"] * ratio + init_a = activation_cfg["init_a"] + init_b = activation_cfg["init_b"] + init_ab3 = activation_cfg["init_ab3"] + + t1 = ch_exp + gs1 = ch_per_group + hidden_fft, g1, g2 = groups_1x1 + + hidden_dim1 = inp * t1[0] + hidden_dim2 = inp * t1[0] * t1[1] + + if gs1[0] == 0: + self.layers = nn.Sequential( + DepthSpatialSepConv(inp, t1, kernel_size, stride), + DYShiftMax( + hidden_dim2, + hidden_dim2, + act_max=act_max, + act_relu=True if y2 == 2 else False, + init_a=init_a, + reduction=act_reduction, + init_b=init_b, + g=gs1, + expansion=False) if y2 > 0 else nn.ReLU6(), + ChannelShuffle(gs1[1]), + ChannelShuffle(hidden_dim2 // 2) if y2 != 0 else nn.Identity(), + GroupConv(hidden_dim2, oup, (g1, g2)), + DYShiftMax( + oup, + oup, + act_max=act_max, + act_relu=False, + init_a=[init_ab3[0], 0.0], + reduction=act_reduction // 2, + init_b=[init_ab3[1], 0.0], + g=(g1, g2), + expansion=False) if y3 > 0 else nn.Identity(), + ChannelShuffle(g2), + ChannelShuffle(oup // 2) + if oup % 2 == 0 and y3 != 0 else nn.Identity()) + elif g2 == 0: + self.layers = nn.Sequential( + GroupConv(inp, hidden_dim2, gs1), + DYShiftMax( + hidden_dim2, + hidden_dim2, + act_max=act_max, + act_relu=False, + init_a=[init_ab3[0], 0.0], + reduction=act_reduction, + init_b=[init_ab3[1], 0.0], + g=gs1, + expansion=False) if y3 > 0 else nn.Identity()) + else: + self.layers = nn.Sequential( + GroupConv(inp, hidden_dim2, gs1), + DYShiftMax( + hidden_dim2, + hidden_dim2, + act_max=act_max, + act_relu=True if y1 == 2 else False, + init_a=init_a, + reduction=act_reduction, + init_b=init_b, + g=gs1, + expansion=False) if y1 > 0 else nn.ReLU6(), + ChannelShuffle(gs1[1]), + DepthSpatialSepConv(hidden_dim2, (1, 1), kernel_size, stride), + nn.Identity(), + DYShiftMax( + hidden_dim2, + hidden_dim2, + act_max=act_max, + act_relu=True if y2 == 2 else False, + init_a=init_a, + reduction=act_reduction, + init_b=init_b, + g=gs1, + expansion=True) if y2 > 0 else nn.ReLU6(), + ChannelShuffle(hidden_dim2 // 4) + if y1 != 0 and y2 != 0 else nn.Identity() + if y1 == 0 and y2 == 0 else ChannelShuffle(hidden_dim2 // 2), + GroupConv(hidden_dim2, oup, (g1, g2)), + DYShiftMax( + oup, + oup, + act_max=act_max, + act_relu=False, + init_a=[init_ab3[0], 0.0], + reduction=act_reduction // 2 + if oup < hidden_dim2 else act_reduction, + init_b=[init_ab3[1], 0.0], + g=(g1, g2), + expansion=False) if y3 > 0 else nn.Identity(), + ChannelShuffle(g2), + ChannelShuffle(oup // 2) if y3 != 0 else nn.Identity()) + + def forward(self, x): + out = self.layers(x) + if self.identity: + out = out + x + return out + + +class MicroNet(nn.Layer): + def __init__(self, + net_cfg, + activation_cfg, + input_size=224, + class_num=1000, + stem_ch=16, + stem_groups=[4, 8], + out_ch=1024, + dropout_rate=0.0): + super().__init__() + + # building first layer + assert input_size % 32 == 0 + input_channel = stem_ch + layers = [StemLayer(3, input_channel, stride=2, groups=stem_groups)] + + for s, n, c, ks, c1, c2, g1, g2, c3, g3, g4, y1, y2, y3, r in net_cfg: + for i in range(n): + layers.append( + DYMicroBlock( + input_channel, + c, + kernel_size=ks, + stride=s if i == 0 else 1, + ch_exp=(c1, c2), + ch_per_group=(g1, g2), + groups_1x1=(c3, g3, g4), + dy=[y1, y2, y3], + ratio=r, + activation_cfg=activation_cfg)) + input_channel = c + self.features = nn.Sequential(*layers) + + self.avgpool = nn.Sequential(nn.ReLU6(), + nn.AdaptiveAvgPool2D(1), nn.Hardswish()) + + # building last several layers + self.classifier = nn.Sequential( + SwishLinear(input_channel, out_ch), + nn.Dropout(dropout_rate), SwishLinear(out_ch, class_num)) + + self.apply(self._initialize_weights) + + def _initialize_weights(self, m): + if isinstance(m, nn.Conv2D): + n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + nn.initializer.Normal(std=math.sqrt(2. / n))(m.weight) + elif isinstance(m, nn.Linear): + nn.initializer.Normal(std=0.01)(m.weight) + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = self.classifier(x.flatten(1)) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MicroNet_M0(pretrained=False, use_ssld=False, **kwargs): + model = MicroNet( + NET_CONFIG["msnx_dy6_exp4_4M_221"], + ACTIVATION_CONFIG["msnx_dy6_exp4_4M_221"], + stem_ch=4, + stem_groups=[2, 2], + out_ch=640, + dropout_rate=0.05, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MicroNet_M0"], use_ssld) + return model + + +def MicroNet_M1(pretrained=False, use_ssld=False, **kwargs): + model = MicroNet( + NET_CONFIG["msnx_dy6_exp6_6M_221"], + ACTIVATION_CONFIG["msnx_dy6_exp6_6M_221"], + stem_ch=6, + stem_groups=[3, 2], + out_ch=960, + dropout_rate=0.05, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MicroNet_M1"], use_ssld) + return model + + +def MicroNet_M2(pretrained=False, use_ssld=False, **kwargs): + model = MicroNet( + NET_CONFIG["msnx_dy9_exp6_12M_221"], + ACTIVATION_CONFIG["msnx_dy9_exp6_12M_221"], + stem_ch=8, + stem_groups=[4, 2], + out_ch=1024, + dropout_rate=0.1, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MicroNet_M2"], use_ssld) + return model + + +def MicroNet_M3(pretrained=False, use_ssld=False, **kwargs): + model = MicroNet( + NET_CONFIG["msnx_dy12_exp6_20M_020"], + ACTIVATION_CONFIG["msnx_dy12_exp6_20M_020"], + stem_ch=12, + stem_groups=[4, 3], + out_ch=1024, + dropout_rate=0.1, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["MicroNet_M3"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mixnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mixnet.py new file mode 100644 index 000000000..201630cf9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mixnet.py @@ -0,0 +1,812 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1907.09595 + +import os +from inspect import isfunction +from functools import reduce +import paddle +import paddle.nn as nn + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MixNet_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams", + "MixNet_M": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams", + "MixNet_L": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class Identity(nn.Layer): + """ + Identity block. + """ + + def __init__(self): + super(Identity, self).__init__() + + def forward(self, x): + return x + + +def round_channels(channels, divisor=8): + """ + Round weighted channel number (make divisible operation). + + Parameters: + ---------- + channels : int or float + Original number of channels. + divisor : int, default 8 + Alignment value. + + Returns: + ------- + int + Weighted number of channels. + """ + rounded_channels = max( + int(channels + divisor / 2.0) // divisor * divisor, divisor) + if float(rounded_channels) < 0.9 * channels: + rounded_channels += divisor + return rounded_channels + + +def get_activation_layer(activation): + """ + Create activation layer from string/function. + + Parameters: + ---------- + activation : function, or str, or nn.Module + Activation function or name of activation function. + + Returns: + ------- + nn.Module + Activation layer. + """ + assert activation is not None + if isfunction(activation): + return activation() + elif isinstance(activation, str): + if activation == "relu": + return nn.ReLU() + elif activation == "relu6": + return nn.ReLU6() + elif activation == "swish": + return nn.Swish() + elif activation == "hswish": + return nn.Hardswish() + elif activation == "sigmoid": + return nn.Sigmoid() + elif activation == "hsigmoid": + return nn.Hardsigmoid() + elif activation == "identity": + return Identity() + else: + raise NotImplementedError() + else: + assert isinstance(activation, nn.Layer) + return activation + + +class ConvBlock(nn.Layer): + """ + Standard convolution block with Batch normalization and activation. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int, or tuple/list of 2 int, or tuple/list of 4 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str or None, default nn.ReLU() + Activation function or name of activation function. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + super(ConvBlock, self).__init__() + self.activate = (activation is not None) + self.use_bn = use_bn + self.use_pad = (isinstance(padding, (list, tuple)) and + (len(padding) == 4)) + + if self.use_pad: + self.pad = padding + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=bias, + weight_attr=None) + if self.use_bn: + self.bn = nn.BatchNorm2D(num_features=out_channels, epsilon=bn_eps) + if self.activate: + self.activ = get_activation_layer(activation) + + def forward(self, x): + x = self.conv(x) + if self.use_bn: + x = self.bn(x) + if self.activate: + x = self.activ(x) + return x + + +class SEBlock(nn.Layer): + def __init__(self, + channels, + reduction=16, + mid_channels=None, + round_mid=False, + use_conv=True, + mid_activation=nn.ReLU(), + out_activation=nn.Sigmoid()): + super(SEBlock, self).__init__() + self.use_conv = use_conv + if mid_channels is None: + mid_channels = channels // reduction if not round_mid else round_channels( + float(channels) / reduction) + + self.pool = nn.AdaptiveAvgPool2D(output_size=1) + if use_conv: + self.conv1 = nn.Conv2D( + in_channels=channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + groups=1, + bias_attr=True, + weight_attr=None) + + else: + self.fc1 = nn.Linear( + in_features=channels, out_features=mid_channels) + self.activ = get_activation_layer(mid_activation) + if use_conv: + self.conv2 = nn.Conv2D( + in_channels=mid_channels, + out_channels=channels, + kernel_size=1, + stride=1, + groups=1, + bias_attr=True, + weight_attr=None) + else: + self.fc2 = nn.Linear( + in_features=mid_channels, out_features=channels) + self.sigmoid = get_activation_layer(out_activation) + + def forward(self, x): + w = self.pool(x) + if not self.use_conv: + w = w.reshape(shape=[w.shape[0], -1]) + w = self.conv1(w) if self.use_conv else self.fc1(w) + w = self.activ(w) + w = self.conv2(w) if self.use_conv else self.fc2(w) + w = self.sigmoid(w) + if not self.use_conv: + w = w.unsqueeze(2).unsqueeze(3) + x = x * w + return x + + +class MixConv(nn.Layer): + """ + Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + axis : int, default 1 + The axis on which to concatenate the outputs. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + axis=1): + super(MixConv, self).__init__() + kernel_size = kernel_size if isinstance(kernel_size, + list) else [kernel_size] + padding = padding if isinstance(padding, list) else [padding] + kernel_count = len(kernel_size) + self.splitted_in_channels = self.split_channels(in_channels, + kernel_count) + splitted_out_channels = self.split_channels(out_channels, kernel_count) + for i, kernel_size_i in enumerate(kernel_size): + in_channels_i = self.splitted_in_channels[i] + out_channels_i = splitted_out_channels[i] + padding_i = padding[i] + _ = self.add_sublayer( + name=str(i), + sublayer=nn.Conv2D( + in_channels=in_channels_i, + out_channels=out_channels_i, + kernel_size=kernel_size_i, + stride=stride, + padding=padding_i, + dilation=dilation, + groups=(out_channels_i + if out_channels == groups else groups), + bias_attr=bias, + weight_attr=None)) + self.axis = axis + + def forward(self, x): + xx = paddle.split(x, self.splitted_in_channels, axis=self.axis) + xx = paddle.split(x, self.splitted_in_channels, axis=self.axis) + out = [ + conv_i(x_i) for x_i, conv_i in zip(xx, self._sub_layers.values()) + ] + x = paddle.concat(tuple(out), axis=self.axis) + return x + + @staticmethod + def split_channels(channels, kernel_count): + splitted_channels = [channels // kernel_count] * kernel_count + splitted_channels[0] += channels - sum(splitted_channels) + return splitted_channels + + +class MixConvBlock(nn.Layer): + """ + Mixed convolution block with Batch normalization and activation. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Convolution window size. + stride : int or tuple/list of 2 int + Strides of the convolution. + padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int + Padding value for convolution layer. + dilation : int or tuple/list of 2 int, default 1 + Dilation value for convolution layer. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str or None, default nn.ReLU() + Activation function or name of activation function. + activate : bool, default True + Whether activate the convolution block. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + super(MixConvBlock, self).__init__() + self.activate = (activation is not None) + self.use_bn = use_bn + + self.conv = MixConv( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + if self.use_bn: + self.bn = nn.BatchNorm2D(num_features=out_channels, epsilon=bn_eps) + if self.activate: + self.activ = get_activation_layer(activation) + + def forward(self, x): + x = self.conv(x) + if self.use_bn: + x = self.bn(x) + if self.activate: + x = self.activ(x) + return x + + +def mixconv1x1_block(in_channels, + out_channels, + kernel_count, + stride=1, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU()): + """ + 1x1 version of the mixed convolution block. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + kernel_count : int + Kernel count. + stride : int or tuple/list of 2 int, default 1 + Strides of the convolution. + groups : int, default 1 + Number of groups. + bias : bool, default False + Whether the layer uses a bias vector. + use_bn : bool, default True + Whether to use BatchNorm layer. + bn_eps : float, default 1e-5 + Small float added to variance in Batch norm. + activation : function or str, or None, default nn.ReLU() + Activation function or name of activation function. + """ + return MixConvBlock( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=([1] * kernel_count), + stride=stride, + padding=([0] * kernel_count), + groups=groups, + bias=bias, + use_bn=use_bn, + bn_eps=bn_eps, + activation=activation) + + +class MixUnit(nn.Layer): + """ + MixNet unit. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. exp_channels : int + Number of middle (expanded) channels. + stride : int or tuple/list of 2 int + Strides of the second convolution layer. + exp_kernel_count : int + Expansion convolution kernel count for each unit. + conv1_kernel_count : int + Conv1 kernel count for each unit. + conv2_kernel_count : int + Conv2 kernel count for each unit. + exp_factor : int + Expansion factor for each unit. + se_factor : int + SE reduction factor for each unit. + activation : str + Activation function or name of activation function. + """ + + def __init__(self, in_channels, out_channels, stride, exp_kernel_count, + conv1_kernel_count, conv2_kernel_count, exp_factor, se_factor, + activation): + super(MixUnit, self).__init__() + assert exp_factor >= 1 + assert se_factor >= 0 + self.residual = (in_channels == out_channels) and (stride == 1) + self.use_se = se_factor > 0 + mid_channels = exp_factor * in_channels + self.use_exp_conv = exp_factor > 1 + + if self.use_exp_conv: + if exp_kernel_count == 1: + self.exp_conv = ConvBlock( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=activation) + else: + self.exp_conv = mixconv1x1_block( + in_channels=in_channels, + out_channels=mid_channels, + kernel_count=exp_kernel_count, + activation=activation) + if conv1_kernel_count == 1: + self.conv1 = ConvBlock( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, + stride=stride, + padding=1, + dilation=1, + groups=mid_channels, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=activation) + else: + self.conv1 = MixConvBlock( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)], + stride=stride, + padding=[1 + i for i in range(conv1_kernel_count)], + groups=mid_channels, + activation=activation) + if self.use_se: + self.se = SEBlock( + channels=mid_channels, + reduction=(exp_factor * se_factor), + round_mid=False, + mid_activation=activation) + if conv2_kernel_count == 1: + self.conv2 = ConvBlock( + in_channels=mid_channels, + out_channels=out_channels, + activation=None, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5) + else: + self.conv2 = mixconv1x1_block( + in_channels=mid_channels, + out_channels=out_channels, + kernel_count=conv2_kernel_count, + activation=None) + + def forward(self, x): + if self.residual: + identity = x + if self.use_exp_conv: + x = self.exp_conv(x) + x = self.conv1(x) + if self.use_se: + x = self.se(x) + x = self.conv2(x) + if self.residual: + x = x + identity + return x + + +class MixInitBlock(nn.Layer): + """ + MixNet specific initial block. + + Parameters: + ---------- + in_channels : int + Number of input channels. + out_channels : int + Number of output channels. + """ + + def __init__(self, in_channels, out_channels): + super(MixInitBlock, self).__init__() + self.conv1 = ConvBlock( + in_channels=in_channels, + out_channels=out_channels, + stride=2, + kernel_size=3, + padding=1) + self.conv2 = MixUnit( + in_channels=out_channels, + out_channels=out_channels, + stride=1, + exp_kernel_count=1, + conv1_kernel_count=1, + conv2_kernel_count=1, + exp_factor=1, + se_factor=0, + activation="relu") + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class MixNet(nn.Layer): + """ + MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + + Parameters: + ---------- + channels : list of list of int + Number of output channels for each unit. + init_block_channels : int + Number of output channels for the initial unit. + final_block_channels : int + Number of output channels for the final block of the feature extractor. + exp_kernel_counts : list of list of int + Expansion convolution kernel count for each unit. + conv1_kernel_counts : list of list of int + Conv1 kernel count for each unit. + conv2_kernel_counts : list of list of int + Conv2 kernel count for each unit. + exp_factors : list of list of int + Expansion factor for each unit. + se_factors : list of list of int + SE reduction factor for each unit. + in_channels : int, default 3 + Number of input channels. + in_size : tuple of two ints, default (224, 224) + Spatial size of the expected input image. + class_num : int, default 1000 + Number of classification classes. + """ + + def __init__(self, + channels, + init_block_channels, + final_block_channels, + exp_kernel_counts, + conv1_kernel_counts, + conv2_kernel_counts, + exp_factors, + se_factors, + in_channels=3, + in_size=(224, 224), + class_num=1000): + super(MixNet, self).__init__() + self.in_size = in_size + self.class_num = class_num + + self.features = nn.Sequential() + self.features.add_sublayer( + "init_block", + MixInitBlock( + in_channels=in_channels, out_channels=init_block_channels)) + in_channels = init_block_channels + for i, channels_per_stage in enumerate(channels): + stage = nn.Sequential() + for j, out_channels in enumerate(channels_per_stage): + stride = 2 if ((j == 0) and (i != 3)) or ( + (j == len(channels_per_stage) // 2) and (i == 3)) else 1 + exp_kernel_count = exp_kernel_counts[i][j] + conv1_kernel_count = conv1_kernel_counts[i][j] + conv2_kernel_count = conv2_kernel_counts[i][j] + exp_factor = exp_factors[i][j] + se_factor = se_factors[i][j] + activation = "relu" if i == 0 else "swish" + stage.add_sublayer( + "unit{}".format(j + 1), + MixUnit( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + exp_kernel_count=exp_kernel_count, + conv1_kernel_count=conv1_kernel_count, + conv2_kernel_count=conv2_kernel_count, + exp_factor=exp_factor, + se_factor=se_factor, + activation=activation)) + in_channels = out_channels + self.features.add_sublayer("stage{}".format(i + 1), stage) + self.features.add_sublayer( + "final_block", + ConvBlock( + in_channels=in_channels, + out_channels=final_block_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + use_bn=True, + bn_eps=1e-5, + activation=nn.ReLU())) + in_channels = final_block_channels + self.features.add_sublayer( + "final_pool", nn.AvgPool2D( + kernel_size=7, stride=1)) + + self.output = nn.Linear( + in_features=in_channels, out_features=class_num) + + def forward(self, x): + x = self.features(x) + reshape_dim = reduce(lambda x, y: x * y, x.shape[1:]) + x = x.reshape(shape=[x.shape[0], reshape_dim]) + x = self.output(x) + return x + + +def get_mixnet(version, width_scale, model_name=None, **kwargs): + """ + Create MixNet model with specific parameters. + + Parameters: + ---------- + version : str + Version of MobileNetV3 ('s' or 'm'). + width_scale : float + Scale factor for width of layers. + model_name : str or None, default None + Model name. + """ + + if version == "s": + init_block_channels = 16 + channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], + [120, 120, 120, 200, 200, 200]] + exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], + [2, 2, 2, 1, 1, 1]] + conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], + [3, 4, 4, 5, 4, 4]] + conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], + [2, 2, 2, 1, 2, 2]] + exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]] + se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]] + elif version == "m": + init_block_channels = 24 + channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], + [120, 120, 120, 120, 200, 200, 200, 200]] + exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], + [1, 2, 2, 2, 1, 1, 1, 1]] + conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], + [1, 4, 4, 4, 4, 4, 4, 4]] + conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], + [1, 2, 2, 2, 1, 2, 2, 2]] + exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], + [6, 3, 3, 3, 6, 6, 6, 6]] + se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], + [2, 2, 2, 2, 2, 2, 2, 2]] + else: + raise ValueError("Unsupported MixNet version {}".format(version)) + + final_block_channels = 1536 + + if width_scale != 1.0: + channels = [[round_channels(cij * width_scale) for cij in ci] + for ci in channels] + init_block_channels = round_channels(init_block_channels * width_scale) + + net = MixNet( + channels=channels, + init_block_channels=init_block_channels, + final_block_channels=final_block_channels, + exp_kernel_counts=exp_kernel_counts, + conv1_kernel_counts=conv1_kernel_counts, + conv2_kernel_counts=conv2_kernel_counts, + exp_factors=exp_factors, + se_factors=se_factors, + **kwargs) + + return net + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MixNet_S(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="s", width_scale=1.0, model_name="MixNet_S", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_S"], use_ssld=use_ssld) + return model + + +def MixNet_M(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="m", width_scale=1.0, model_name="MixNet_M", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_M"], use_ssld=use_ssld) + return model + + +def MixNet_L(pretrained=False, use_ssld=False, **kwargs): + """ + MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' + https://arxiv.org/abs/1907.09595. + """ + model = get_mixnet( + version="m", width_scale=1.3, model_name="MixNet_L", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MixNet_L"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilefacenet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilefacenet.py new file mode 100644 index 000000000..7fe78a074 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilefacenet.py @@ -0,0 +1,166 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' +Origin author: wujiyang +Modify from: https://github.com/wujiyang/Face_Pytorch/blob/master/backbone/mobilefacenet.py +''' + +import paddle +from paddle import nn +import math + +__all__ = ['MobileFaceNet'] + +MobileFaceNet_BottleNeck_Setting = [ + # t, c , n ,s + [2, 64, 5, 2], + [4, 128, 1, 2], + [2, 128, 6, 1], + [4, 128, 1, 2], + [2, 128, 2, 1] +] + + +class BottleNeck(nn.Layer): + def __init__(self, inp, oup, stride, expansion, data_format="NCHW"): + super().__init__() + self.connect = stride == 1 and inp == oup + + self.conv = nn.Sequential( + # 1*1 conv + nn.Conv2D( + inp, inp * expansion, 1, 1, 0, bias_attr=False, data_format=data_format), + nn.BatchNorm2D(inp * expansion, data_format=data_format), + nn.PReLU(inp * expansion, data_format=data_format), + + # 3*3 depth wise conv + nn.Conv2D( + inp * expansion, + inp * expansion, + 3, + stride, + 1, + groups=inp * expansion, + bias_attr=False, + data_format=data_format + ), + nn.BatchNorm2D(inp * expansion, data_format=data_format), + nn.PReLU(inp * expansion, data_format=data_format), + + # 1*1 conv + nn.Conv2D( + inp * expansion, oup, 1, 1, 0, bias_attr=False, data_format=data_format), + nn.BatchNorm2D(oup, data_format=data_format), ) + + def forward(self, x): + if self.connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class ConvBlock(nn.Layer): + def __init__(self, inp, oup, k, s, p, dw=False, linear=False, data_format="NCHW"): + super().__init__() + self.linear = linear + if dw: + self.conv = nn.Conv2D( + inp, oup, k, s, p, groups=inp, bias_attr=False, data_format=data_format) + else: + self.conv = nn.Conv2D(inp, oup, k, s, p, bias_attr=False, data_format=data_format) + + self.bn = nn.BatchNorm2D(oup, data_format=data_format) + if not linear: + self.prelu = nn.PReLU(oup, data_format=data_format) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.linear: + return x + else: + return self.prelu(x) + + +class Backbone(nn.Layer): + def __init__(self, + feature_dim=128, + bottleneck_setting=MobileFaceNet_BottleNeck_Setting, + data_format="NCHW", + **args): + super().__init__() + self.data_format = data_format + + self.conv1 = ConvBlock(3, 64, 3, 2, 1, data_format=data_format) + self.dw_conv1 = ConvBlock(64, 64, 3, 1, 1, dw=True, data_format=data_format) + + self.cur_channel = 64 + block = BottleNeck + self.blocks = self._make_layer(block, bottleneck_setting) + + self.conv2 = ConvBlock(128, 512, 1, 1, 0, data_format=data_format) + self.linear7 = ConvBlock(512, 512, 7, 1, 0, dw=True, linear=True, data_format=data_format) + self.linear1 = ConvBlock(512, feature_dim, 1, 1, 0, linear=True, data_format=data_format) + + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + # ks * ks * out_ch + n = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3] + m.weight = paddle.create_parameter( + shape=m.weight.shape, + dtype=m.weight.dtype, + default_initializer=nn.initializer.Normal( + mean=0.0, std=math.sqrt(2.0 / n))) + + elif isinstance(m, (nn.BatchNorm, nn.BatchNorm2D, nn.GroupNorm)): + m.weight = paddle.create_parameter( + shape=m.weight.shape, + dtype=m.weight.dtype, + default_initializer=nn.initializer.Constant(value=1.0)) + m.bias = paddle.create_parameter( + shape=m.bias.shape, + dtype=m.bias.dtype, + default_initializer=nn.initializer.Constant(value=0.0)) + + def _make_layer(self, block, setting): + layers = [] + for t, c, n, s in setting: + for i in range(n): + if i == 0: + layers.append(block(self.cur_channel, c, s, t, data_format=self.data_format)) + else: + layers.append(block(self.cur_channel, c, 1, t, data_format=self.data_format)) + self.cur_channel = c + + return nn.Sequential(*layers) + + def forward(self, x): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.conv1(x) + x = self.dw_conv1(x) + x = self.blocks(x) + x = self.conv2(x) + x = self.linear7(x) + x = self.linear1(x) + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 3, 1, 2]) + x = x.reshape([x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]]) + return x + + +def MobileFaceNet(num_features=128, **args): + model = Backbone(feature_dim=num_features, **args) + return model \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenet_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenet_v2.py new file mode 100644 index 000000000..5d606988f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenet_v2.py @@ -0,0 +1,316 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1801.04381 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileNetV2_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams", + "MobileNetV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams", + "MobileNetV2_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams", + "MobileNetV2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams", + "MobileNetV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams", + "MobileNetV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + name=None, + use_cudnn=True, + data_format="NCHW"): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + + self._batch_norm = BatchNorm( + num_filters, + param_attr=ParamAttr(name=name + "_bn_scale"), + bias_attr=ParamAttr(name=name + "_bn_offset"), + moving_mean_name=name + "_bn_mean", + moving_variance_name=name + "_bn_variance", + data_layout=data_format) + + def forward(self, inputs, if_act=True): + y = self._conv(inputs) + y = self._batch_norm(y) + if if_act: + y = F.relu6(y) + return y + + +class InvertedResidualUnit(nn.Layer): + def __init__(self, + num_channels, + num_in_filter, + num_filters, + stride, + filter_size, + padding, + expansion_factor, + name, + data_format="NCHW"): + super(InvertedResidualUnit, self).__init__() + num_expfilter = int(round(num_in_filter * expansion_factor)) + self._expand_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_expand", + data_format=data_format) + + self._bottleneck_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + use_cudnn=False, + name=name + "_dwise", + data_format=data_format) + + self._linear_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_linear", + data_format=data_format) + + def forward(self, inputs, ifshortcut): + y = self._expand_conv(inputs, if_act=True) + y = self._bottleneck_conv(y, if_act=True) + y = self._linear_conv(y, if_act=False) + if ifshortcut: + y = paddle.add(inputs, y) + return y + + +class InvresiBlocks(nn.Layer): + def __init__(self, in_c, t, c, n, s, name, data_format="NCHW"): + super(InvresiBlocks, self).__init__() + + self._first_block = InvertedResidualUnit( + num_channels=in_c, + num_in_filter=in_c, + num_filters=c, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_1", + data_format=data_format) + + self._block_list = [] + for i in range(1, n): + block = self.add_sublayer( + name + "_" + str(i + 1), + sublayer=InvertedResidualUnit( + num_channels=c, + num_in_filter=c, + num_filters=c, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_" + str(i + 1), + data_format=data_format)) + self._block_list.append(block) + + def forward(self, inputs): + y = self._first_block(inputs, ifshortcut=False) + for block in self._block_list: + y = block(y, ifshortcut=True) + return y + + +class MobileNet(nn.Layer): + def __init__(self, + class_num=1000, + scale=1.0, + prefix_name="", + data_format="NCHW"): + super(MobileNet, self).__init__() + self.scale = scale + self.class_num = class_num + self.data_format = data_format + + bottleneck_params_list = [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] + + self.conv1 = ConvBNLayer( + num_channels=3, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + padding=1, + name=prefix_name + "conv1_1", + data_format=data_format) + + self.block_list = [] + i = 1 + in_c = int(32 * scale) + for layer_setting in bottleneck_params_list: + t, c, n, s = layer_setting + i += 1 + block = self.add_sublayer( + prefix_name + "conv" + str(i), + sublayer=InvresiBlocks( + in_c=in_c, + t=t, + c=int(c * scale), + n=n, + s=s, + name=prefix_name + "conv" + str(i), + data_format=data_format)) + self.block_list.append(block) + in_c = int(c * scale) + + self.out_c = int(1280 * scale) if scale > 1.0 else 1280 + self.conv9 = ConvBNLayer( + num_channels=in_c, + num_filters=self.out_c, + filter_size=1, + stride=1, + padding=0, + name=prefix_name + "conv9", + data_format=data_format) + + self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=data_format) + + self.out = Linear( + self.out_c, + class_num, + weight_attr=ParamAttr(name=prefix_name + "fc10_weights"), + bias_attr=ParamAttr(name=prefix_name + "fc10_offset")) + + def forward(self, inputs): + if self.data_format == "NHWC": + inputs = paddle.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + y = self.conv1(inputs, if_act=True) + for block in self.block_list: + y = block(y) + y = self.conv9(y, if_act=True) + y = self.pool2d_avg(y) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNetV2_x0_25(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.25, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_25"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_5"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x0_75(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=0.75, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x0_75"], use_ssld=use_ssld) + return model + + +def MobileNetV2(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x1_5(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x1_5"], use_ssld=use_ssld) + return model + + +def MobileNetV2_x2_0(pretrained=False, use_ssld=False, **kwargs): + model = MobileNet(scale=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNetV2_x2_0"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenext.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenext.py new file mode 100644 index 000000000..e432d2d7b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilenext.py @@ -0,0 +1,262 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/zhoudaquan/rethinking_bottleneck_design +# reference: https://arxiv.org/abs/2007.02269 + +import math +import paddle.nn as nn + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileNeXt_x0_35": "", # TODO + "MobileNeXt_x0_5": "", # TODO + "MobileNeXt_x0_75": "", # TODO + "MobileNeXt_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNeXt_x1_0_pretrained.pdparams", + "MobileNeXt_x1_4": "", # TODO +} + +__all__ = list(MODEL_URLS.keys()) + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def conv_3x3_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2D( + inp, oup, 3, stride, 1, bias_attr=False), + nn.BatchNorm2D(oup), + nn.ReLU6()) + + +class SGBlock(nn.Layer): + def __init__(self, inp, oup, stride, expand_ratio, keep_3x3=False): + super(SGBlock, self).__init__() + assert stride in [1, 2] + + hidden_dim = inp // expand_ratio + if hidden_dim < oup / 6.: + hidden_dim = math.ceil(oup / 6.) + hidden_dim = _make_divisible(hidden_dim, 16) # + 16 + + self.identity = False + self.identity_div = 1 + self.expand_ratio = expand_ratio + + if expand_ratio == 2: + self.conv = nn.Sequential( + # dw + nn.Conv2D( + inp, inp, 3, 1, 1, groups=inp, bias_attr=False), + nn.BatchNorm2D(inp), + nn.ReLU6(), + # pw-linear + nn.Conv2D( + inp, hidden_dim, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(hidden_dim), + # pw-linear + nn.Conv2D( + hidden_dim, oup, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(oup), + nn.ReLU6(), + # dw + nn.Conv2D( + oup, oup, 3, stride, 1, groups=oup, bias_attr=False), + nn.BatchNorm2D(oup)) + elif inp != oup and stride == 1 and keep_3x3 == False: + self.conv = nn.Sequential( + # pw-linear + nn.Conv2D( + inp, hidden_dim, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(hidden_dim), + # pw-linear + nn.Conv2D( + hidden_dim, oup, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(oup), + nn.ReLU6()) + elif inp != oup and stride == 2 and keep_3x3 == False: + self.conv = nn.Sequential( + # pw-linear + nn.Conv2D( + inp, hidden_dim, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(hidden_dim), + # pw-linear + nn.Conv2D( + hidden_dim, oup, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(oup), + nn.ReLU6(), + # dw + nn.Conv2D( + oup, oup, 3, stride, 1, groups=oup, bias_attr=False), + nn.BatchNorm2D(oup)) + else: + if keep_3x3 == False: + self.identity = True + self.conv = nn.Sequential( + # dw + nn.Conv2D( + inp, inp, 3, 1, 1, groups=inp, bias_attr=False), + nn.BatchNorm2D(inp), + nn.ReLU6(), + # pw + nn.Conv2D( + inp, hidden_dim, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(hidden_dim), + #nn.ReLU6(), + # pw + nn.Conv2D( + hidden_dim, oup, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(oup), + nn.ReLU6(), + # dw + nn.Conv2D( + oup, oup, 3, 1, 1, groups=oup, bias_attr=False), + nn.BatchNorm2D(oup)) + + def forward(self, x): + out = self.conv(x) + + if self.identity: + if self.identity_div == 1: + out = out + x + else: + shape = x.shape + id_tensor = x[:, :shape[1] // self.identity_div, :, :] + out[:, :shape[1] // self.identity_div, :, :] = \ + out[:, :shape[1] // self.identity_div, :, :] + id_tensor + + return out + + +class MobileNeXt(nn.Layer): + def __init__(self, class_num=1000, width_mult=1.00): + super().__init__() + + # setting of inverted residual blocks + self.cfgs = [ + # t, c, n, s + [2, 96, 1, 2], + [6, 144, 1, 1], + [6, 192, 3, 2], + [6, 288, 3, 2], + [6, 384, 4, 1], + [6, 576, 4, 2], + [6, 960, 3, 1], + [6, 1280, 1, 1], + ] + + # building first layer + input_channel = _make_divisible(32 * width_mult, 4 + if width_mult == 0.1 else 8) + layers = [conv_3x3_bn(3, input_channel, 2)] + # building inverted residual blocks + block = SGBlock + for t, c, n, s in self.cfgs: + output_channel = _make_divisible(c * width_mult, 4 + if width_mult == 0.1 else 8) + if c == 1280 and width_mult < 1: + output_channel = 1280 + layers.append( + block(input_channel, output_channel, s, t, n == 1 and s == 1)) + input_channel = output_channel + for _ in range(n - 1): + layers.append(block(input_channel, output_channel, 1, t)) + input_channel = output_channel + self.features = nn.Sequential(*layers) + # building last several layers + input_channel = output_channel + output_channel = _make_divisible(input_channel, 4) + self.avgpool = nn.AdaptiveAvgPool2D((1, 1)) + self.classifier = nn.Sequential( + nn.Dropout(0.2), nn.Linear(output_channel, class_num)) + + self.apply(self._initialize_weights) + + def _initialize_weights(self, m): + if isinstance(m, nn.Conv2D): + n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + nn.initializer.Normal(std=math.sqrt(2. / n))(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.BatchNorm2D): + nn.initializer.Constant(1)(m.weight) + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.Linear): + nn.initializer.Normal(std=0.01)(m.weight) + nn.initializer.Constant(0)(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = x.flatten(1) + x = self.classifier(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileNeXt_x0_35(pretrained=False, use_ssld=False, **kwargs): + model = MobileNeXt(width_mult=0.35, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNeXt_x0_35"], use_ssld=use_ssld) + return model + + +def MobileNeXt_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = MobileNeXt(width_mult=0.50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNeXt_x0_5"], use_ssld=use_ssld) + return model + + +def MobileNeXt_x0_75(pretrained=False, use_ssld=False, **kwargs): + model = MobileNeXt(width_mult=0.75, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNeXt_x0_75"], use_ssld=use_ssld) + return model + + +def MobileNeXt_x1_0(pretrained=False, use_ssld=False, **kwargs): + model = MobileNeXt(width_mult=1.00, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNeXt_x1_0"], use_ssld=use_ssld) + return model + + +def MobileNeXt_x1_4(pretrained=False, use_ssld=False, **kwargs): + model = MobileNeXt(width_mult=1.40, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileNeXt_x1_4"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit.py new file mode 100644 index 000000000..58adaf18e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit.py @@ -0,0 +1,479 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/BR-IDL/PaddleViT/blob/develop/image_classification/MobileViT/mobilevit.py +# and https://github.com/apple/ml-cvnets/blob/main/cvnets/models/classification/mobilevit.py +# reference: https://arxiv.org/abs/2110.02178 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import KaimingUniform, TruncatedNormal, Constant +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileViT_XXS": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams", + "MobileViT_XS": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams", + "MobileViT_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams", +} + + +def _init_weights_linear(): + weight_attr = ParamAttr(initializer=TruncatedNormal(std=.02)) + bias_attr = ParamAttr(initializer=Constant(0.0)) + return weight_attr, bias_attr + + +def _init_weights_layernorm(): + weight_attr = ParamAttr(initializer=Constant(1.0)) + bias_attr = ParamAttr(initializer=Constant(0.0)) + return weight_attr, bias_attr + + +class ConvBnAct(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1): + super().__init__() + self.in_channels = in_channels + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr(initializer=KaimingUniform()), + bias_attr=bias_attr) + self.norm = nn.BatchNorm2D(out_channels) + self.act = nn.Silu() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class Identity(nn.Layer): + """ Identity layer""" + + def __init__(self): + super().__init__() + + def forward(self, inputs): + return inputs + + +class Mlp(nn.Layer): + def __init__(self, embed_dim, mlp_ratio, dropout=0.1): + super().__init__() + w_attr_1, b_attr_1 = _init_weights_linear() + self.fc1 = nn.Linear( + embed_dim, + int(embed_dim * mlp_ratio), + weight_attr=w_attr_1, + bias_attr=b_attr_1) + + w_attr_2, b_attr_2 = _init_weights_linear() + self.fc2 = nn.Linear( + int(embed_dim * mlp_ratio), + embed_dim, + weight_attr=w_attr_2, + bias_attr=b_attr_2) + + self.act = nn.Silu() + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.dropout1(x) + x = self.fc2(x) + x = self.dropout2(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + embed_dim, + num_heads, + qkv_bias=True, + dropout=0.1, + attention_dropout=0.): + super().__init__() + self.num_heads = num_heads + self.attn_head_dim = int(embed_dim / self.num_heads) + self.all_head_dim = self.attn_head_dim * self.num_heads + + w_attr_1, b_attr_1 = _init_weights_linear() + self.qkv = nn.Linear( + embed_dim, + self.all_head_dim * 3, + weight_attr=w_attr_1, + bias_attr=b_attr_1 if qkv_bias else False) + + self.scales = self.attn_head_dim**-0.5 + + w_attr_2, b_attr_2 = _init_weights_linear() + self.proj = nn.Linear( + embed_dim, embed_dim, weight_attr=w_attr_2, bias_attr=b_attr_2) + + self.attn_dropout = nn.Dropout(attention_dropout) + self.proj_dropout = nn.Dropout(dropout) + self.softmax = nn.Softmax(axis=-1) + + def transpose_multihead(self, x): + B, P, N, d = x.shape + x = x.reshape([B, P, N, self.num_heads, d // self.num_heads]) + x = x.transpose([0, 1, 3, 2, 4]) + return x + + def forward(self, x): + b_sz, n_patches, in_channels = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape([ + b_sz, n_patches, 3, self.num_heads, + qkv.shape[-1] // self.num_heads // 3 + ]) + qkv = qkv.transpose([0, 3, 2, 1, 4]) + query, key, value = qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2] + query = query * self.scales + key = key.transpose([0, 1, 3, 2]) + # QK^T + attn = paddle.matmul(query, key) + attn = self.softmax(attn) + attn = self.attn_dropout(attn) + # weighted sum + out = paddle.matmul(attn, value) + out = out.transpose([0, 2, 1, 3]).reshape( + [b_sz, n_patches, out.shape[1] * out.shape[3]]) + out = self.proj(out) + out = self.proj_dropout(out) + return out + + +class EncoderLayer(nn.Layer): + def __init__(self, + embed_dim, + num_heads=4, + qkv_bias=True, + mlp_ratio=2.0, + dropout=0.1, + attention_dropout=0., + droppath=0.): + super().__init__() + w_attr_1, b_attr_1 = _init_weights_layernorm() + w_attr_2, b_attr_2 = _init_weights_layernorm() + + self.attn_norm = nn.LayerNorm( + embed_dim, weight_attr=w_attr_1, bias_attr=b_attr_1) + self.attn = Attention(embed_dim, num_heads, qkv_bias, dropout, + attention_dropout) + self.drop_path = DropPath(droppath) if droppath > 0. else Identity() + self.mlp_norm = nn.LayerNorm( + embed_dim, weight_attr=w_attr_2, bias_attr=b_attr_2) + self.mlp = Mlp(embed_dim, mlp_ratio, dropout) + + def forward(self, x): + h = x + x = self.attn_norm(x) + x = self.attn(x) + x = self.drop_path(x) + x = h + x + h = x + x = self.mlp_norm(x) + x = self.mlp(x) + x = self.drop_path(x) + x = x + h + return x + + +class Transformer(nn.Layer): + """Transformer block for MobileViTBlock""" + + def __init__(self, + embed_dim, + num_heads, + depth, + qkv_bias=True, + mlp_ratio=2.0, + dropout=0.1, + attention_dropout=0., + droppath=0.): + super().__init__() + depth_decay = [x.item() for x in paddle.linspace(0, droppath, depth)] + + layer_list = [] + for i in range(depth): + layer_list.append( + EncoderLayer(embed_dim, num_heads, qkv_bias, mlp_ratio, + dropout, attention_dropout, droppath)) + self.layers = nn.LayerList(layer_list) + + w_attr_1, b_attr_1 = _init_weights_layernorm() + self.norm = nn.LayerNorm( + embed_dim, weight_attr=w_attr_1, bias_attr=b_attr_1, epsilon=1e-6) + + def forward(self, x): + for layer in self.layers: + x = layer(x) + out = self.norm(x) + return out + + +class MobileV2Block(nn.Layer): + """Mobilenet v2 InvertedResidual block""" + + def __init__(self, inp, oup, stride=1, expansion=4): + super().__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = int(round(inp * expansion)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers = [] + if expansion != 1: + layers.append(ConvBnAct(inp, hidden_dim, kernel_size=1)) + + layers.extend([ + # dw + ConvBnAct( + hidden_dim, + hidden_dim, + stride=stride, + groups=hidden_dim, + padding=1), + # pw-linear + nn.Conv2D( + hidden_dim, oup, 1, 1, 0, bias_attr=False), + nn.BatchNorm2D(oup), + ]) + + self.conv = nn.Sequential(*layers) + self.out_channels = oup + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + return self.conv(x) + + +class MobileViTBlock(nn.Layer): + """ MobileViTBlock for MobileViT""" + + def __init__(self, + dim, + hidden_dim, + depth, + num_heads=4, + qkv_bias=True, + mlp_ratio=2.0, + dropout=0.1, + attention_dropout=0., + droppath=0.0, + patch_size=(2, 2)): + super().__init__() + self.patch_h, self.patch_w = patch_size + + # local representations + self.conv1 = ConvBnAct(dim, dim, padding=1) + self.conv2 = nn.Conv2D( + dim, hidden_dim, kernel_size=1, stride=1, bias_attr=False) + # global representations + self.transformer = Transformer( + embed_dim=hidden_dim, + num_heads=num_heads, + depth=depth, + qkv_bias=qkv_bias, + mlp_ratio=mlp_ratio, + dropout=dropout, + attention_dropout=attention_dropout, + droppath=droppath) + + # fusion + self.conv3 = ConvBnAct(hidden_dim, dim, kernel_size=1) + self.conv4 = ConvBnAct(2 * dim, dim, padding=1) + + def forward(self, x): + h = x + x = self.conv1(x) + x = self.conv2(x) + + patch_h = self.patch_h + patch_w = self.patch_w + patch_area = int(patch_w * patch_h) + _, in_channels, orig_h, orig_w = x.shape + new_h = int(math.ceil(orig_h / self.patch_h) * self.patch_h) + new_w = int(math.ceil(orig_w / self.patch_w) * self.patch_w) + interpolate = False + + if new_w != orig_w or new_h != orig_h: + x = F.interpolate(x, size=[new_h, new_w], mode="bilinear") + interpolate = True + + num_patch_w, num_patch_h = new_w // patch_w, new_h // patch_h + num_patches = num_patch_h * num_patch_w + reshaped_x = x.reshape([-1, patch_h, num_patch_w, patch_w]) + transposed_x = reshaped_x.transpose([0, 2, 1, 3]) + reshaped_x = transposed_x.reshape( + [-1, in_channels, num_patches, patch_area]) + transposed_x = reshaped_x.transpose([0, 3, 2, 1]) + + x = transposed_x.reshape([-1, num_patches, in_channels]) + x = self.transformer(x) + x = x.reshape([-1, patch_h * patch_w, num_patches, in_channels]) + + _, pixels, num_patches, channels = x.shape + x = x.transpose([0, 3, 2, 1]) + x = x.reshape([-1, num_patch_w, patch_h, patch_w]) + x = x.transpose([0, 2, 1, 3]) + x = x.reshape( + [-1, channels, num_patch_h * patch_h, num_patch_w * patch_w]) + + if interpolate: + x = F.interpolate(x, size=[orig_h, orig_w]) + x = self.conv3(x) + x = paddle.concat((h, x), axis=1) + x = self.conv4(x) + return x + + +class MobileViT(nn.Layer): + """ MobileViT + A PaddlePaddle impl of : `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - + https://arxiv.org/abs/2110.02178 + """ + + def __init__(self, + in_channels=3, + dims=[16, 32, 48, 48, 48, 64, 80, 96, 384], + hidden_dims=[96, 120, 144], + mv2_expansion=4, + class_num=1000): + super().__init__() + self.conv3x3 = ConvBnAct( + in_channels, dims[0], kernel_size=3, stride=2, padding=1) + self.mv2_block_1 = MobileV2Block( + dims[0], dims[1], expansion=mv2_expansion) + self.mv2_block_2 = MobileV2Block( + dims[1], dims[2], stride=2, expansion=mv2_expansion) + self.mv2_block_3 = MobileV2Block( + dims[2], dims[3], expansion=mv2_expansion) + self.mv2_block_4 = MobileV2Block( + dims[3], dims[4], expansion=mv2_expansion) + + self.mv2_block_5 = MobileV2Block( + dims[4], dims[5], stride=2, expansion=mv2_expansion) + self.mvit_block_1 = MobileViTBlock(dims[5], hidden_dims[0], depth=2) + + self.mv2_block_6 = MobileV2Block( + dims[5], dims[6], stride=2, expansion=mv2_expansion) + self.mvit_block_2 = MobileViTBlock(dims[6], hidden_dims[1], depth=4) + + self.mv2_block_7 = MobileV2Block( + dims[6], dims[7], stride=2, expansion=mv2_expansion) + self.mvit_block_3 = MobileViTBlock(dims[7], hidden_dims[2], depth=3) + self.conv1x1 = ConvBnAct(dims[7], dims[8], kernel_size=1) + + self.pool = nn.AdaptiveAvgPool2D(1) + self.dropout = nn.Dropout(0.1) + self.linear = nn.Linear(dims[8], class_num) + + def forward(self, x): + x = self.conv3x3(x) + x = self.mv2_block_1(x) + x = self.mv2_block_2(x) + x = self.mv2_block_3(x) + x = self.mv2_block_4(x) + + x = self.mv2_block_5(x) + x = self.mvit_block_1(x) + + x = self.mv2_block_6(x) + x = self.mvit_block_2(x) + + x = self.mv2_block_7(x) + x = self.mvit_block_3(x) + x = self.conv1x1(x) + + x = self.pool(x) + x = x.reshape(x.shape[:2]) + + x = self.dropout(x) + x = self.linear(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileViT_XXS(pretrained=False, use_ssld=False, **kwargs): + model = MobileViT( + in_channels=3, + dims=[16, 16, 24, 24, 24, 48, 64, 80, 320], + hidden_dims=[64, 80, 96], + mv2_expansion=2, + **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViT_XXS"], use_ssld=use_ssld) + return model + + +def MobileViT_XS(pretrained=False, use_ssld=False, **kwargs): + model = MobileViT( + in_channels=3, + dims=[16, 32, 48, 48, 48, 64, 80, 96, 384], + hidden_dims=[96, 120, 144], + mv2_expansion=4, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViT_XS"], use_ssld=use_ssld) + return model + + +def MobileViT_S(pretrained=False, use_ssld=False, **kwargs): + model = MobileViT( + in_channels=3, + dims=[16, 32, 64, 64, 64, 96, 128, 160, 640], + hidden_dims=[144, 192, 240], + mv2_expansion=4, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViT_S"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v2.py new file mode 100644 index 000000000..9a57448e9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v2.py @@ -0,0 +1,593 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/apple/ml-cvnets/blob/7be93d3debd45c240a058e3f34a9e88d33c07a7d/cvnets/models/classification/mobilevit_v2.py +# reference: https://arxiv.org/abs/2206.02680 + +from functools import partial +from typing import Dict, Optional, Tuple, Union + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileViTV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV2_x0_5_pretrained.pdparams", + "MobileViTV2_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV2_x1_0_pretrained.pdparams", + "MobileViTV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV2_x1_5_pretrained.pdparams", + "MobileViTV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV2_x2_0_pretrained.pdparams", +} + +layer_norm_2d = partial(nn.GroupNorm, num_groups=1) + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class InvertedResidual(nn.Layer): + """ + Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + dilation=1, + skip_connection=True): + super().__init__() + assert stride in [1, 2] + self.stride = stride + + hidden_dim = make_divisible(int(round(in_channels * expand_ratio)), 8) + self.use_res_connect = self.stride == 1 and in_channels == out_channels and skip_connection + + block = nn.Sequential() + if expand_ratio != 1: + block.add_sublayer( + name="exp_1x1", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + in_channels, hidden_dim, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(hidden_dim)), ('act', nn.Silu()))) + + block.add_sublayer( + name="conv_3x3", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + hidden_dim, + hidden_dim, + 3, + bias_attr=False, + stride=stride, + padding=dilation, + dilation=dilation, + groups=hidden_dim)), ('norm', nn.BatchNorm2D(hidden_dim)), + ('act', nn.Silu()))) + + block.add_sublayer( + name="red_1x1", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + hidden_dim, out_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(out_channels)))) + + self.block = block + self.in_channels = in_channels + self.out_channels = out_channels + self.exp = expand_ratio + self.dilation = dilation + + def forward(self, x): + if self.use_res_connect: + return x + self.block(x) + else: + return self.block(x) + + +class LinearSelfAttention(nn.Layer): + def __init__(self, embed_dim, attn_dropout=0.0, bias=True): + super().__init__() + self.embed_dim = embed_dim + self.qkv_proj = nn.Conv2D( + embed_dim, 1 + (2 * embed_dim), 1, bias_attr=bias) + self.attn_dropout = nn.Dropout(p=attn_dropout) + self.out_proj = nn.Conv2D(embed_dim, embed_dim, 1, bias_attr=bias) + + def forward(self, x): + # [B, C, P, N] --> [B, h + 2d, P, N] + qkv = self.qkv_proj(x) + + # Project x into query, key and value + # Query --> [B, 1, P, N] + # value, key --> [B, d, P, N] + query, key, value = paddle.split( + qkv, [1, self.embed_dim, self.embed_dim], axis=1) + + # apply softmax along N dimension + context_scores = F.softmax(query, axis=-1) + # Uncomment below line to visualize context scores + # self.visualize_context_scores(context_scores=context_scores) + context_scores = self.attn_dropout(context_scores) + + # Compute context vector + # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] + context_vector = key * context_scores + # [B, d, P, N] --> [B, d, P, 1] + context_vector = paddle.sum(context_vector, axis=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector + out = self.out_proj(out) + return out + + +class LinearAttnFFN(nn.Layer): + def __init__(self, + embed_dim, + ffn_latent_dim, + attn_dropout=0.0, + dropout=0.1, + ffn_dropout=0.0, + norm_layer=layer_norm_2d) -> None: + super().__init__() + attn_unit = LinearSelfAttention( + embed_dim=embed_dim, attn_dropout=attn_dropout, bias=True) + + self.pre_norm_attn = nn.Sequential( + norm_layer(num_channels=embed_dim), + attn_unit, + nn.Dropout(p=dropout)) + + self.pre_norm_ffn = nn.Sequential( + norm_layer(num_channels=embed_dim), + nn.Conv2D(embed_dim, ffn_latent_dim, 1), + nn.Silu(), + nn.Dropout(p=ffn_dropout), + nn.Conv2D(ffn_latent_dim, embed_dim, 1), + nn.Dropout(p=dropout)) + + def forward(self, x): + # self-attention + x = x + self.pre_norm_attn(x) + # Feed forward network + x = x + self.pre_norm_ffn(x) + return x + + +class MobileViTV2Block(nn.Layer): + """ + This class defines the `MobileViTV2 block` + """ + + def __init__(self, + in_channels, + attn_unit_dim, + ffn_multiplier=2.0, + n_attn_blocks=2, + attn_dropout=0.0, + dropout=0.0, + ffn_dropout=0.0, + patch_h=8, + patch_w=8, + conv_ksize=3, + dilation=1, + attn_norm_layer=layer_norm_2d): + super().__init__() + cnn_out_dim = attn_unit_dim + padding = (conv_ksize - 1) // 2 * dilation + conv_3x3_in = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + in_channels, + conv_ksize, + bias_attr=False, + padding=padding, + dilation=dilation, + groups=in_channels)), ('norm', nn.BatchNorm2D(in_channels)), + ('act', nn.Silu())) + conv_1x1_in = nn.Sequential(('conv', nn.Conv2D( + in_channels, cnn_out_dim, 1, bias_attr=False))) + + self.local_rep = nn.Sequential(conv_3x3_in, conv_1x1_in) + + self.global_rep, attn_unit_dim = self._build_attn_layer( + d_model=attn_unit_dim, + ffn_mult=ffn_multiplier, + n_layers=n_attn_blocks, + attn_dropout=attn_dropout, + dropout=dropout, + ffn_dropout=ffn_dropout, + attn_norm_layer=attn_norm_layer) + + self.conv_proj = nn.Sequential( + ('conv', nn.Conv2D( + cnn_out_dim, in_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(in_channels))) + + self.patch_h = patch_h + self.patch_w = patch_w + + def _build_attn_layer(self, d_model, ffn_mult, n_layers, attn_dropout, + dropout, ffn_dropout, attn_norm_layer): + # ensure that dims are multiple of 16 + ffn_dims = [ffn_mult * d_model // 16 * 16] * n_layers + + global_rep = [ + LinearAttnFFN( + embed_dim=d_model, + ffn_latent_dim=ffn_dims[block_idx], + attn_dropout=attn_dropout, + dropout=dropout, + ffn_dropout=ffn_dropout, + norm_layer=attn_norm_layer) for block_idx in range(n_layers) + ] + global_rep.append(attn_norm_layer(num_channels=d_model)) + + return nn.Sequential(*global_rep), d_model + + def unfolding(self, feature_map): + batch_size, in_channels, img_h, img_w = feature_map.shape + + # [B, C, H, W] --> [B, C, P, N] + patches = F.unfold( + feature_map, + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) + n_patches = img_h * img_w // (self.patch_h * self.patch_w) + patches = patches.reshape( + [batch_size, in_channels, self.patch_h * self.patch_w, n_patches]) + + return patches, (img_h, img_w) + + def folding(self, patches, output_size): + batch_size, in_dim, patch_size, n_patches = patches.shape + + # [B, C, P, N] + patches = patches.reshape([batch_size, in_dim * patch_size, n_patches]) + + feature_map = F.fold( + patches, + output_size, + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) + + return feature_map + + def forward(self, x): + fm = self.local_rep(x) + + # convert feature map to patches + patches, output_size = self.unfolding(fm) + + # learn global representations on all patches + patches = self.global_rep(patches) + + # [B x Patch x Patches x C] --> [B x C x Patches x Patch] + fm = self.folding(patches=patches, output_size=output_size) + fm = self.conv_proj(fm) + + return fm + + +class MobileViTV2(nn.Layer): + """ + MobileViTV2 + """ + + def __init__(self, mobilevit_config, class_num=1000, output_stride=None): + super().__init__() + self.round_nearest = 8 + self.dilation = 1 + + dilate_l4 = dilate_l5 = False + if output_stride == 8: + dilate_l4 = True + dilate_l5 = True + elif output_stride == 16: + dilate_l5 = True + + # store model configuration in a dictionary + in_channels = mobilevit_config["layer0"]["img_channels"] + out_channels = mobilevit_config["layer0"]["out_channels"] + self.conv_1 = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + out_channels, + 3, + bias_attr=False, + stride=2, + padding=1)), ('norm', nn.BatchNorm2D(out_channels)), + ('act', nn.Silu())) + + in_channels = out_channels + self.layer_1, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer1"]) + + in_channels = out_channels + self.layer_2, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer2"]) + + in_channels = out_channels + self.layer_3, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer3"]) + + in_channels = out_channels + self.layer_4, out_channels = self._make_layer( + input_channel=in_channels, + cfg=mobilevit_config["layer4"], + dilate=dilate_l4) + + in_channels = out_channels + self.layer_5, out_channels = self._make_layer( + input_channel=in_channels, + cfg=mobilevit_config["layer5"], + dilate=dilate_l5) + + self.conv_1x1_exp = nn.Identity() + self.classifier = nn.Sequential() + self.classifier.add_sublayer( + name="global_pool", + sublayer=nn.Sequential(nn.AdaptiveAvgPool2D(1), nn.Flatten())) + self.classifier.add_sublayer( + name="fc", sublayer=nn.Linear(out_channels, class_num)) + + # weight initialization + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Conv2D): + fan_in = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3] + bound = 1.0 / fan_in**0.5 + nn.initializer.Uniform(-bound, bound)(m.weight) + if m.bias is not None: + nn.initializer.Uniform(-bound, bound)(m.bias) + elif isinstance(m, (nn.BatchNorm2D, nn.GroupNorm)): + nn.initializer.Constant(1)(m.weight) + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.Linear): + nn.initializer.XavierUniform()(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + + def _make_layer(self, input_channel, cfg, dilate=False): + block_type = cfg.get("block_type", "mobilevit") + if block_type.lower() == "mobilevit": + return self._make_mit_layer( + input_channel=input_channel, cfg=cfg, dilate=dilate) + else: + return self._make_mobilenet_layer( + input_channel=input_channel, cfg=cfg) + + def _make_mit_layer(self, input_channel, cfg, dilate=False): + prev_dilation = self.dilation + block = [] + stride = cfg.get("stride", 1) + + if stride == 2: + if dilate: + self.dilation *= 2 + stride = 1 + + layer = InvertedResidual( + in_channels=input_channel, + out_channels=cfg.get("out_channels"), + stride=stride, + expand_ratio=cfg.get("mv_expand_ratio", 4), + dilation=prev_dilation) + + block.append(layer) + input_channel = cfg.get("out_channels") + + block.append( + MobileViTV2Block( + in_channels=input_channel, + attn_unit_dim=cfg["attn_unit_dim"], + ffn_multiplier=cfg.get("ffn_multiplier"), + n_attn_blocks=cfg.get("attn_blocks", 1), + ffn_dropout=0., + attn_dropout=0., + dilation=self.dilation, + patch_h=cfg.get("patch_h", 2), + patch_w=cfg.get("patch_w", 2))) + + return nn.Sequential(*block), input_channel + + def _make_mobilenet_layer(self, input_channel, cfg): + output_channels = cfg.get("out_channels") + num_blocks = cfg.get("num_blocks", 2) + expand_ratio = cfg.get("expand_ratio", 4) + block = [] + + for i in range(num_blocks): + stride = cfg.get("stride", 1) if i == 0 else 1 + + layer = InvertedResidual( + in_channels=input_channel, + out_channels=output_channels, + stride=stride, + expand_ratio=expand_ratio) + block.append(layer) + input_channel = output_channels + return nn.Sequential(*block), input_channel + + def extract_features(self, x): + x = self.conv_1(x) + x = self.layer_1(x) + x = self.layer_2(x) + x = self.layer_3(x) + + x = self.layer_4(x) + x = self.layer_5(x) + x = self.conv_1x1_exp(x) + return x + + def forward(self, x): + x = self.extract_features(x) + x = self.classifier(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def get_configuration(width_multiplier): + ffn_multiplier = 2 + mv2_exp_mult = 2 # max(1.0, min(2.0, 2.0 * width_multiplier)) + + layer_0_dim = max(16, min(64, 32 * width_multiplier)) + layer_0_dim = int(make_divisible(layer_0_dim, divisor=8, min_value=16)) + config = { + "layer0": { + "img_channels": 3, + "out_channels": layer_0_dim, + }, + "layer1": { + "out_channels": int(make_divisible(64 * width_multiplier, divisor=16)), + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2", + }, + "layer2": { + "out_channels": int(make_divisible(128 * width_multiplier, divisor=8)), + "expand_ratio": mv2_exp_mult, + "num_blocks": 2, + "stride": 2, + "block_type": "mv2", + }, + "layer3": { # 28x28 + "out_channels": int(make_divisible(256 * width_multiplier, divisor=8)), + "attn_unit_dim": int(make_divisible(128 * width_multiplier, divisor=8)), + "ffn_multiplier": ffn_multiplier, + "attn_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "block_type": "mobilevit", + }, + "layer4": { # 14x14 + "out_channels": int(make_divisible(384 * width_multiplier, divisor=8)), + "attn_unit_dim": int(make_divisible(192 * width_multiplier, divisor=8)), + "ffn_multiplier": ffn_multiplier, + "attn_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "block_type": "mobilevit", + }, + "layer5": { # 7x7 + "out_channels": int(make_divisible(512 * width_multiplier, divisor=8)), + "attn_unit_dim": int(make_divisible(256 * width_multiplier, divisor=8)), + "ffn_multiplier": ffn_multiplier, + "attn_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "block_type": "mobilevit", + }, + "last_layer_exp_factor": 4, + } + + return config + + +def MobileViTV2_x2_0(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 2.0 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x2_0"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x1_75(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 1.75 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x1_75"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x1_5(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 1.5 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x1_5"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x1_25(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 1.25 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x1_25"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x1_0(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 1.0 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x1_0"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x0_75(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 0.75 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x0_75"], use_ssld=use_ssld) + return model + + +def MobileViTV2_x0_5(pretrained=False, use_ssld=False, **kwargs): + width_multiplier = 0.5 + model = MobileViTV2(get_configuration(width_multiplier), **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV2_x0_5"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v3.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v3.py new file mode 100644 index 000000000..652ac5626 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/mobilevit_v3.py @@ -0,0 +1,1445 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/micronDLA/MobileViTv3/blob/main/MobileViTv3-v1/cvnets/models/classification/mobilevit.py +# reference: https://arxiv.org/abs/2209.15159 + +import math +from functools import partial +from typing import Dict, Optional, Tuple, Union + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "MobileViTV3_XXS": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_XXS_pretrained.pdparams", + "MobileViTV3_XS": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_XS_pretrained.pdparams", + "MobileViTV3_S": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_S_pretrained.pdparams", + "MobileViTV3_XXS_L2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_XXS_L2_pretrained.pdparams", + "MobileViTV3_XS_L2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_XS_L2_pretrained.pdparams", + "MobileViTV3_S_L2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_S_L2_pretrained.pdparams", + "MobileViTV3_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_x0_5_pretrained.pdparams", + "MobileViTV3_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_x0_75_pretrained.pdparams", + "MobileViTV3_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViTV3_x1_0_pretrained.pdparams", +} + +layer_norm_2d = partial(nn.GroupNorm, num_groups=1) + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class InvertedResidual(nn.Layer): + """ + Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 + """ + + def __init__(self, + in_channels: int, + out_channels: int, + stride: int, + expand_ratio: Union[int, float], + dilation: int=1) -> None: + assert stride in [1, 2] + super(InvertedResidual, self).__init__() + self.stride = stride + + hidden_dim = make_divisible(int(round(in_channels * expand_ratio)), 8) + self.use_res_connect = self.stride == 1 and in_channels == out_channels + + block = nn.Sequential() + if expand_ratio != 1: + block.add_sublayer( + name="exp_1x1", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + in_channels, hidden_dim, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(hidden_dim)), ('act', nn.Silu()))) + + block.add_sublayer( + name="conv_3x3", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + hidden_dim, + hidden_dim, + 3, + bias_attr=False, + stride=stride, + padding=dilation, + dilation=dilation, + groups=hidden_dim)), ('norm', nn.BatchNorm2D(hidden_dim)), + ('act', nn.Silu()))) + + block.add_sublayer( + name="red_1x1", + sublayer=nn.Sequential( + ('conv', nn.Conv2D( + hidden_dim, out_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(out_channels)))) + + self.block = block + self.in_channels = in_channels + self.out_channels = out_channels + self.exp = expand_ratio + self.dilation = dilation + + def forward(self, x, *args, **kwargs): + if self.use_res_connect: + return x + self.block(x) + else: + return self.block(x) + + +class MultiHeadAttention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv_proj = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.out_proj = nn.Linear(dim, dim, bias_attr=qkv_bias) + + def forward(self, x): + # B = x.shape[0] + N, C = x.shape[1:] + qkv = self.qkv_proj(x).reshape((-1, N, 3, self.num_heads, + C // self.num_heads)).transpose( + (2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((-1, N, C)) + x = self.out_proj(x) + return x + + +class TransformerEncoder(nn.Layer): + """ + This class defines the Transformer encoder (pre-norm) as described in "Attention is all you need" paper + https://arxiv.org/abs/1706.03762 + """ + + def __init__(self, + embed_dim: int, + ffn_latent_dim: int, + num_heads: Optional[int]=8, + attn_dropout: Optional[float]=0.0, + dropout: Optional[float]=0.1, + ffn_dropout: Optional[float]=0.0, + transformer_norm_layer: nn.Layer=nn.LayerNorm): + super(TransformerEncoder, self).__init__() + + self.pre_norm_mha = nn.Sequential( + transformer_norm_layer(embed_dim), + MultiHeadAttention( + embed_dim, num_heads, attn_drop=attn_dropout, qkv_bias=True), + nn.Dropout(p=dropout)) + + self.pre_norm_ffn = nn.Sequential( + transformer_norm_layer(embed_dim), + nn.Linear(embed_dim, ffn_latent_dim), + nn.Silu(), + nn.Dropout(p=ffn_dropout), + nn.Linear(ffn_latent_dim, embed_dim), + nn.Dropout(p=dropout)) + self.embed_dim = embed_dim + self.ffn_dim = ffn_latent_dim + self.ffn_dropout = ffn_dropout + + def forward(self, x): + # Multi-head attention + x = x + self.pre_norm_mha(x) + + # Feed forward network + x = x + self.pre_norm_ffn(x) + return x + + +class MobileViTV3Block(nn.Layer): + """ + MobileViTV3 block + """ + + def __init__(self, + in_channels: int, + transformer_dim: int, + ffn_dim: int, + n_transformer_blocks: Optional[int]=2, + head_dim: Optional[int]=32, + attn_dropout: Optional[float]=0.1, + dropout: Optional[int]=0.1, + ffn_dropout: Optional[int]=0.1, + patch_h: Optional[int]=8, + patch_w: Optional[int]=8, + transformer_norm_layer: nn.Layer=nn.LayerNorm, + conv_ksize: Optional[int]=3, + dilation: Optional[int]=1, + var_ffn: Optional[bool]=False, + no_fusion: Optional[bool]=False): + + # For MobileViTV3: Normal 3x3 convolution --> Depthwise 3x3 convolution + padding = (conv_ksize - 1) // 2 * dilation + conv_3x3_in = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + in_channels, + conv_ksize, + bias_attr=False, + padding=padding, + dilation=dilation, + groups=in_channels)), ('norm', nn.BatchNorm2D(in_channels)), + ('act', nn.Silu())) + conv_1x1_in = nn.Sequential(('conv', nn.Conv2D( + in_channels, transformer_dim, 1, bias_attr=False))) + + conv_1x1_out = nn.Sequential( + ('conv', nn.Conv2D( + transformer_dim, in_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(in_channels)), ('act', nn.Silu())) + conv_3x3_out = None + + # For MobileViTV3: input+global --> local+global + if not no_fusion: + #input_ch = tr_dim + in_ch + conv_3x3_out = nn.Sequential( + ('conv', nn.Conv2D( + transformer_dim + in_channels, + in_channels, + 1, + bias_attr=False)), ('norm', nn.BatchNorm2D(in_channels)), + ('act', nn.Silu())) + + super().__init__() + self.local_rep = nn.Sequential() + self.local_rep.add_sublayer(name="conv_3x3", sublayer=conv_3x3_in) + self.local_rep.add_sublayer(name="conv_1x1", sublayer=conv_1x1_in) + + assert transformer_dim % head_dim == 0 + num_heads = transformer_dim // head_dim + + ffn_dims = [ffn_dim] * n_transformer_blocks + + global_rep = [ + TransformerEncoder( + embed_dim=transformer_dim, + ffn_latent_dim=ffn_dims[block_idx], + num_heads=num_heads, + attn_dropout=attn_dropout, + dropout=dropout, + ffn_dropout=ffn_dropout, + transformer_norm_layer=transformer_norm_layer) + for block_idx in range(n_transformer_blocks) + ] + global_rep.append(transformer_norm_layer(transformer_dim)) + self.global_rep = nn.Sequential(*global_rep) + + self.conv_proj = conv_1x1_out + + self.fusion = conv_3x3_out + + self.patch_h = patch_h + self.patch_w = patch_w + self.patch_area = self.patch_w * self.patch_h + + self.cnn_in_dim = in_channels + self.cnn_out_dim = transformer_dim + self.n_heads = num_heads + self.ffn_dim = ffn_dim + self.dropout = dropout + self.attn_dropout = attn_dropout + self.ffn_dropout = ffn_dropout + self.dilation = dilation + self.ffn_max_dim = ffn_dims[0] + self.ffn_min_dim = ffn_dims[-1] + self.var_ffn = var_ffn + self.n_blocks = n_transformer_blocks + self.conv_ksize = conv_ksize + + def unfolding(self, feature_map): + patch_w, patch_h = self.patch_w, self.patch_h + patch_area = int(patch_w * patch_h) + batch_size, in_channels, orig_h, orig_w = feature_map.shape + + new_h = int(math.ceil(orig_h / self.patch_h) * self.patch_h) + new_w = int(math.ceil(orig_w / self.patch_w) * self.patch_w) + + interpolate = False + if new_w != orig_w or new_h != orig_h: + # Note: Padding can be done, but then it needs to be handled in attention function. + feature_map = F.interpolate( + feature_map, + size=(new_h, new_w), + mode="bilinear", + align_corners=False) + interpolate = True + + # number of patches along width and height + num_patch_w = new_w // patch_w # n_w + num_patch_h = new_h // patch_h # n_h + num_patches = num_patch_h * num_patch_w # N + + # [B, C, H, W] --> [B * C * n_h, p_h, n_w, p_w] + reshaped_fm = feature_map.reshape([ + batch_size * in_channels * num_patch_h, patch_h, num_patch_w, + patch_w + ]) + # [B * C * n_h, p_h, n_w, p_w] --> [B * C * n_h, n_w, p_h, p_w] + transposed_fm = reshaped_fm.transpose([0, 2, 1, 3]) + # [B * C * n_h, n_w, p_h, p_w] --> [B, C, N, P] where P = p_h * p_w and N = n_h * n_w + reshaped_fm = transposed_fm.reshape( + [batch_size, in_channels, num_patches, patch_area]) + # [B, C, N, P] --> [B, P, N, C] + transposed_fm = reshaped_fm.transpose([0, 3, 2, 1]) + # [B, P, N, C] --> [BP, N, C] + patches = transposed_fm.reshape( + [batch_size * patch_area, num_patches, in_channels]) + + info_dict = { + "orig_size": (orig_h, orig_w), + "batch_size": batch_size, + "interpolate": interpolate, + "total_patches": num_patches, + "num_patches_w": num_patch_w, + "num_patches_h": num_patch_h + } + + return patches, info_dict + + def folding(self, patches, info_dict): + n_dim = patches.dim() + assert n_dim == 3, "Tensor should be of shape BPxNxC. Got: {}".format( + patches.shape) + # [BP, N, C] --> [B, P, N, C] + patches = patches.reshape([ + info_dict["batch_size"], self.patch_area, + info_dict["total_patches"], patches.shape[2] + ]) + + batch_size, pixels, num_patches, channels = patches.shape + num_patch_h = info_dict["num_patches_h"] + num_patch_w = info_dict["num_patches_w"] + + # [B, P, N, C] --> [B, C, N, P] + patches = patches.transpose([0, 3, 2, 1]) + + # [B, C, N, P] --> [B*C*n_h, n_w, p_h, p_w] + feature_map = patches.reshape([ + batch_size * channels * num_patch_h, num_patch_w, self.patch_h, + self.patch_w + ]) + # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] + feature_map = feature_map.transpose([0, 2, 1, 3]) + # [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] + feature_map = feature_map.reshape([ + batch_size, channels, num_patch_h * self.patch_h, + num_patch_w * self.patch_w + ]) + if info_dict["interpolate"]: + feature_map = F.interpolate( + feature_map, + size=info_dict["orig_size"], + mode="bilinear", + align_corners=False) + return feature_map + + def forward(self, x): + res = x + + # For MobileViTV3: Normal 3x3 convolution --> Depthwise 3x3 convolution + fm_conv = self.local_rep(x) + + # convert feature map to patches + patches, info_dict = self.unfolding(fm_conv) + + # learn global representations + patches = self.global_rep(patches) + + # [B x Patch x Patches x C] --> [B x C x Patches x Patch] + fm = self.folding(patches=patches, info_dict=info_dict) + + fm = self.conv_proj(fm) + + if self.fusion is not None: + # For MobileViTV3: input+global --> local+global + fm = self.fusion(paddle.concat((fm_conv, fm), axis=1)) + + # For MobileViTV3: Skip connection + fm = fm + res + + return fm + + +class LinearSelfAttention(nn.Layer): + def __init__(self, embed_dim, attn_dropout=0.0, bias=True): + super().__init__() + self.embed_dim = embed_dim + self.qkv_proj = nn.Conv2D( + embed_dim, 1 + (2 * embed_dim), 1, bias_attr=bias) + self.attn_dropout = nn.Dropout(p=attn_dropout) + self.out_proj = nn.Conv2D(embed_dim, embed_dim, 1, bias_attr=bias) + + def forward(self, x): + # [B, C, P, N] --> [B, h + 2d, P, N] + qkv = self.qkv_proj(x) + + # Project x into query, key and value + # Query --> [B, 1, P, N] + # value, key --> [B, d, P, N] + query, key, value = paddle.split( + qkv, [1, self.embed_dim, self.embed_dim], axis=1) + + # apply softmax along N dimension + context_scores = F.softmax(query, axis=-1) + # Uncomment below line to visualize context scores + # self.visualize_context_scores(context_scores=context_scores) + context_scores = self.attn_dropout(context_scores) + + # Compute context vector + # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] + context_vector = key * context_scores + # [B, d, P, N] --> [B, d, P, 1] + context_vector = paddle.sum(context_vector, axis=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector + out = self.out_proj(out) + return out + + +class LinearAttnFFN(nn.Layer): + def __init__(self, + embed_dim: int, + ffn_latent_dim: int, + attn_dropout: Optional[float]=0.0, + dropout: Optional[float]=0.1, + ffn_dropout: Optional[float]=0.0, + norm_layer: Optional[str]=layer_norm_2d) -> None: + super().__init__() + attn_unit = LinearSelfAttention( + embed_dim=embed_dim, attn_dropout=attn_dropout, bias=True) + + self.pre_norm_attn = nn.Sequential( + norm_layer(num_channels=embed_dim), + attn_unit, + nn.Dropout(p=dropout)) + + self.pre_norm_ffn = nn.Sequential( + norm_layer(num_channels=embed_dim), + nn.Conv2D(embed_dim, ffn_latent_dim, 1), + nn.Silu(), + nn.Dropout(p=ffn_dropout), + nn.Conv2D(ffn_latent_dim, embed_dim, 1), + nn.Dropout(p=dropout)) + + def forward(self, x): + # self-attention + x = x + self.pre_norm_attn(x) + # Feed forward network + x = x + self.pre_norm_ffn(x) + return x + + +class MobileViTV3BlockV2(nn.Layer): + """ + This class defines the `MobileViTV3 block` + """ + + def __init__(self, + in_channels: int, + attn_unit_dim: int, + ffn_multiplier: float=2.0, + n_attn_blocks: Optional[int]=2, + attn_dropout: Optional[float]=0.0, + dropout: Optional[float]=0.0, + ffn_dropout: Optional[float]=0.0, + patch_h: Optional[int]=8, + patch_w: Optional[int]=8, + conv_ksize: Optional[int]=3, + dilation: Optional[int]=1, + attn_norm_layer: Optional[str]=layer_norm_2d): + cnn_out_dim = attn_unit_dim + + padding = (conv_ksize - 1) // 2 * dilation + conv_3x3_in = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + in_channels, + conv_ksize, + bias_attr=False, + padding=padding, + dilation=dilation, + groups=in_channels)), ('norm', nn.BatchNorm2D(in_channels)), + ('act', nn.Silu())) + conv_1x1_in = nn.Sequential(('conv', nn.Conv2D( + in_channels, cnn_out_dim, 1, bias_attr=False))) + + super().__init__() + self.local_rep = nn.Sequential(conv_3x3_in, conv_1x1_in) + + self.global_rep, attn_unit_dim = self._build_attn_layer( + d_model=attn_unit_dim, + ffn_mult=ffn_multiplier, + n_layers=n_attn_blocks, + attn_dropout=attn_dropout, + dropout=dropout, + ffn_dropout=ffn_dropout, + attn_norm_layer=attn_norm_layer) + + # MobileViTV3: input changed from just global to local+global + self.conv_proj = nn.Sequential( + ('conv', nn.Conv2D( + 2 * cnn_out_dim, in_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(in_channels))) + + self.patch_h = patch_h + self.patch_w = patch_w + + def _build_attn_layer(self, + d_model: int, + ffn_mult: float, + n_layers: int, + attn_dropout: float, + dropout: float, + ffn_dropout: float, + attn_norm_layer: nn.Layer): + + # ensure that dims are multiple of 16 + ffn_dims = [ffn_mult * d_model // 16 * 16] * n_layers + + global_rep = [ + LinearAttnFFN( + embed_dim=d_model, + ffn_latent_dim=ffn_dims[block_idx], + attn_dropout=attn_dropout, + dropout=dropout, + ffn_dropout=ffn_dropout, + norm_layer=attn_norm_layer) for block_idx in range(n_layers) + ] + global_rep.append(attn_norm_layer(num_channels=d_model)) + + return nn.Sequential(*global_rep), d_model + + def unfolding(self, feature_map): + batch_size, in_channels, img_h, img_w = feature_map.shape + + # [B, C, H, W] --> [B, C, P, N] + patches = F.unfold( + feature_map, + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) + n_patches = img_h * img_w // (self.patch_h * self.patch_w) + patches = patches.reshape( + [batch_size, in_channels, self.patch_h * self.patch_w, n_patches]) + + return patches, (img_h, img_w) + + def folding(self, patches, output_size: Tuple[int, int]): + batch_size, in_dim, patch_size, n_patches = patches.shape + + # [B, C, P, N] + patches = patches.reshape([batch_size, in_dim * patch_size, n_patches]) + + feature_map = F.fold( + patches, + output_size, + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) + + return feature_map + + def forward(self, x): + fm_conv = self.local_rep(x) + + # convert feature map to patches + patches, output_size = self.unfolding(fm_conv) + + # learn global representations on all patches + patches = self.global_rep(patches) + + # [B x Patch x Patches x C] --> [B x C x Patches x Patch] + fm = self.folding(patches=patches, output_size=output_size) + + # MobileViTV3: local+global instead of only global + fm = self.conv_proj(paddle.concat((fm, fm_conv), axis=1)) + + # MobileViTV3: skip connection + fm = fm + x + + return fm + + +class MobileViTV3(nn.Layer): + """ + MobileViTV3: + """ + + def __init__(self, + mobilevit_config: Dict, + dropout=0.1, + class_num=1000, + classifier_dropout=0.1, + output_stride=None, + mobilevit_v2_based=False): + super().__init__() + self.round_nearest = 8 + self.dilation = 1 + self.dropout = dropout + self.mobilevit_v2_based = mobilevit_v2_based + + dilate_l4 = dilate_l5 = False + if output_stride == 8: + dilate_l4 = True + dilate_l5 = True + elif output_stride == 16: + dilate_l5 = True + + # store model configuration in a dictionary + in_channels = mobilevit_config["layer0"]["img_channels"] + out_channels = mobilevit_config["layer0"]["out_channels"] + self.conv_1 = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, + out_channels, + 3, + bias_attr=False, + stride=2, + padding=1)), ('norm', nn.BatchNorm2D(out_channels)), + ('act', nn.Silu())) + + in_channels = out_channels + self.layer_1, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer1"]) + + in_channels = out_channels + self.layer_2, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer2"]) + + in_channels = out_channels + self.layer_3, out_channels = self._make_layer( + input_channel=in_channels, cfg=mobilevit_config["layer3"]) + + in_channels = out_channels + self.layer_4, out_channels = self._make_layer( + input_channel=in_channels, + cfg=mobilevit_config["layer4"], + dilate=dilate_l4) + + in_channels = out_channels + self.layer_5, out_channels = self._make_layer( + input_channel=in_channels, + cfg=mobilevit_config["layer5"], + dilate=dilate_l5) + + if self.mobilevit_v2_based: + self.conv_1x1_exp = nn.Identity() + else: + in_channels = out_channels + out_channels = min(mobilevit_config["last_layer_exp_factor"] * + in_channels, 960) + self.conv_1x1_exp = nn.Sequential( + ('conv', nn.Conv2D( + in_channels, out_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(out_channels)), ('act', nn.Silu())) + + self.classifier = nn.Sequential() + self.classifier.add_sublayer( + name="global_pool", + sublayer=nn.Sequential(nn.AdaptiveAvgPool2D(1), nn.Flatten())) + if 0.0 < classifier_dropout < 1.0: + self.classifier.add_sublayer( + name="dropout", sublayer=nn.Dropout(p=classifier_dropout)) + self.classifier.add_sublayer( + name="fc", sublayer=nn.Linear(out_channels, class_num)) + + # weight initialization + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Conv2D): + fan_in = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3] + fan_out = m.weight.shape[0] * m.weight.shape[2] * m.weight.shape[3] + if self.mobilevit_v2_based: + bound = 1.0 / fan_in**0.5 + nn.initializer.Uniform(-bound, bound)(m.weight) + if m.bias is not None: + nn.initializer.Uniform(-bound, bound)(m.bias) + else: + nn.initializer.KaimingNormal(fan_in=fan_out)(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.BatchNorm2D): + nn.initializer.Constant(1)(m.weight) + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.Linear): + if self.mobilevit_v2_based: + nn.initializer.XavierUniform()(m.weight) + else: + nn.initializer.TruncatedNormal(std=.02)(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + + def _make_layer(self, input_channel, cfg, dilate=False): + block_type = cfg.get("block_type", "mobilevit") + if block_type.lower() == "mobilevit": + return self._make_mit_layer( + input_channel=input_channel, cfg=cfg, dilate=dilate) + else: + return self._make_mobilenet_layer( + input_channel=input_channel, cfg=cfg) + + def _make_mit_layer(self, input_channel, cfg, dilate=False): + prev_dilation = self.dilation + block = [] + stride = cfg.get("stride", 1) + + if stride == 2: + if dilate: + self.dilation *= 2 + stride = 1 + + layer = InvertedResidual( + in_channels=input_channel, + out_channels=cfg.get("out_channels"), + stride=stride, + expand_ratio=cfg.get("mv_expand_ratio", 4), + dilation=prev_dilation) + + block.append(layer) + input_channel = cfg.get("out_channels") + + if self.mobilevit_v2_based: + block.append( + MobileViTV3BlockV2( + in_channels=input_channel, + attn_unit_dim=cfg["attn_unit_dim"], + ffn_multiplier=cfg.get("ffn_multiplier"), + n_attn_blocks=cfg.get("attn_blocks", 1), + ffn_dropout=0., + attn_dropout=0., + dilation=self.dilation, + patch_h=cfg.get("patch_h", 2), + patch_w=cfg.get("patch_w", 2))) + else: + head_dim = cfg.get("head_dim", 32) + transformer_dim = cfg["transformer_channels"] + ffn_dim = cfg.get("ffn_dim") + if head_dim is None: + num_heads = cfg.get("num_heads", 4) + if num_heads is None: + num_heads = 4 + head_dim = transformer_dim // num_heads + + assert transformer_dim % head_dim == 0, ( + "Transformer input dimension should be divisible by head dimension. " + "Got {} and {}.".format(transformer_dim, head_dim)) + + block.append( + MobileViTV3Block( + in_channels=input_channel, + transformer_dim=transformer_dim, + ffn_dim=ffn_dim, + n_transformer_blocks=cfg.get("transformer_blocks", 1), + patch_h=cfg.get("patch_h", 2), + patch_w=cfg.get("patch_w", 2), + dropout=self.dropout, + ffn_dropout=0., + attn_dropout=0., + head_dim=head_dim)) + + return nn.Sequential(*block), input_channel + + def _make_mobilenet_layer(self, input_channel, cfg): + output_channels = cfg.get("out_channels") + num_blocks = cfg.get("num_blocks", 2) + expand_ratio = cfg.get("expand_ratio", 4) + block = [] + + for i in range(num_blocks): + stride = cfg.get("stride", 1) if i == 0 else 1 + + layer = InvertedResidual( + in_channels=input_channel, + out_channels=output_channels, + stride=stride, + expand_ratio=expand_ratio) + block.append(layer) + input_channel = output_channels + return nn.Sequential(*block), input_channel + + def extract_features(self, x): + x = self.conv_1(x) + x = self.layer_1(x) + x = self.layer_2(x) + x = self.layer_3(x) + + x = self.layer_4(x) + x = self.layer_5(x) + x = self.conv_1x1_exp(x) + return x + + def forward(self, x): + x = self.extract_features(x) + x = self.classifier(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def MobileViTV3_S(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 64, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 128, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 256, + "transformer_channels": 192, + "ffn_dim": 384, + "transformer_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 320, + "transformer_channels": 240, + "ffn_dim": 480, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_S"], use_ssld=use_ssld) + return model + + +def MobileViTV3_XS(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 48, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 96, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 160, + "transformer_channels": 120, + "ffn_dim": 240, + "transformer_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 160, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_XS"], use_ssld=use_ssld) + return model + + +def MobileViTV3_XXS(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 2 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 16, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 24, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 64, + "transformer_channels": 64, + "ffn_dim": 128, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 80, + "transformer_channels": 80, + "ffn_dim": 160, + "transformer_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 128, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_XXS"], use_ssld=use_ssld) + return model + + +def MobileViTV3_S_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 64, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 128, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 256, + "transformer_channels": 192, + "ffn_dim": 384, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 320, + "transformer_channels": 240, + "ffn_dim": 480, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_S_L2"], use_ssld=use_ssld) + return model + + +def MobileViTV3_XS_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 48, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 96, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 160, + "transformer_channels": 120, + "ffn_dim": 240, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 160, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_XS_L2"], use_ssld=use_ssld) + return model + + +def MobileViTV3_XXS_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 2 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 16, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 24, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 64, + "transformer_channels": 64, + "ffn_dim": 128, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 80, + "transformer_channels": 80, + "ffn_dim": 160, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 128, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTV3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_XXS_L2"], use_ssld=use_ssld) + return model + + +def MobileViTV3_x1_0(pretrained=False, use_ssld=False, **kwargs): + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 32, + }, + "layer1": { + "out_channels": 64, + "expand_ratio": 2, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2", + }, + "layer2": { + "out_channels": 128, + "expand_ratio": 2, + "num_blocks": 2, + "stride": 2, + "block_type": "mv2", + }, + "layer3": { # 28x28 + "out_channels": 256, + "attn_unit_dim": 128, + "ffn_multiplier": 2, + "attn_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer4": { # 14x14 + "out_channels": 384, + "attn_unit_dim": 192, + "ffn_multiplier": 2, + "attn_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer5": { # 7x7 + "out_channels": 512, + "attn_unit_dim": 256, + "ffn_multiplier": 2, + "attn_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "last_layer_exp_factor": 4, + } + + model = MobileViTV3(mobilevit_config, mobilevit_v2_based=True, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_x1_0"], use_ssld=use_ssld) + return model + + +def MobileViTV3_x0_75(pretrained=False, use_ssld=False, **kwargs): + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 24, + }, + "layer1": { + "out_channels": 48, + "expand_ratio": 2, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2", + }, + "layer2": { + "out_channels": 96, + "expand_ratio": 2, + "num_blocks": 2, + "stride": 2, + "block_type": "mv2", + }, + "layer3": { # 28x28 + "out_channels": 192, + "attn_unit_dim": 96, + "ffn_multiplier": 2, + "attn_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer4": { # 14x14 + "out_channels": 288, + "attn_unit_dim": 144, + "ffn_multiplier": 2, + "attn_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer5": { # 7x7 + "out_channels": 384, + "attn_unit_dim": 192, + "ffn_multiplier": 2, + "attn_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "last_layer_exp_factor": 4, + } + + model = MobileViTV3(mobilevit_config, mobilevit_v2_based=True, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_x0_75"], use_ssld=use_ssld) + return model + + +def MobileViTV3_x0_5(pretrained=False, use_ssld=False, **kwargs): + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": 2, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2", + }, + "layer2": { + "out_channels": 64, + "expand_ratio": 2, + "num_blocks": 2, + "stride": 2, + "block_type": "mv2", + }, + "layer3": { # 28x28 + "out_channels": 128, + "attn_unit_dim": 64, + "ffn_multiplier": 2, + "attn_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer4": { # 14x14 + "out_channels": 192, + "attn_unit_dim": 96, + "ffn_multiplier": 2, + "attn_blocks": 4, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "layer5": { # 7x7 + "out_channels": 256, + "attn_unit_dim": 128, + "ffn_multiplier": 2, + "attn_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": 2, + "block_type": "mobilevit", + }, + "last_layer_exp_factor": 4, + } + + model = MobileViTV3(mobilevit_config, mobilevit_v2_based=True, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTV3_x0_5"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/nextvit.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/nextvit.py new file mode 100644 index 000000000..383d6d1fe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/nextvit.py @@ -0,0 +1,643 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/bytedance/Next-ViT/blob/main/classification/nextvit.py +# reference: https://arxiv.org/abs/2207.05501 + +from functools import partial + +import paddle +from paddle import nn +from paddle.nn.initializer import TruncatedNormal, Constant, Normal +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "NextViT_small_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_small_224_pretrained.pdparams", + "NextViT_base_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_base_224_pretrained.pdparams", + "NextViT_large_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_large_224_pretrained.pdparams", + "NextViT_small_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_small_384_pretrained.pdparams", + "NextViT_base_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_base_384_pretrained.pdparams", + "NextViT_large_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/NextViT_large_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +NORM_EPS = 1e-5 + + +def rearrange(x, pattern, **axes_lengths): + if 'b (h w) c -> b c h w' == pattern: + b, n, c = x.shape + h = axes_lengths.pop('h', -1) + w = axes_lengths.pop('w', -1) + h = h if w == -1 else n // w + w = w if h == -1 else n // h + return x.transpose([0, 2, 1]).reshape([b, c, h, w]) + if 'b c h w -> b (h w) c' == pattern: + b, c, h, w = x.shape + return x.reshape([b, c, h * w]).transpose([0, 2, 1]) + if 'b t (h d) -> b h t d' == pattern: + b, t, h_d = x.shape + h = axes_lengths['h'] + return x.reshape([b, t, h, h_d // h]).transpose([0, 2, 1, 3]) + if 'b h t d -> b t (h d)' == pattern: + b, h, t, d = x.shape + return x.transpose([0, 2, 1, 3]).reshape([b, t, h * d]) + + raise NotImplementedError( + "Rearrangement '{}' has not been implemented.".format(pattern)) + + +def merge_pre_bn(layer, pre_bn_1, pre_bn_2=None): + """ Merge pre BN to reduce inference runtime. + """ + weight = layer.weight + if isinstance(layer, nn.Linear): + weight = weight.transpose([1, 0]) + bias = layer.bias + if pre_bn_2 is None: + scale_invstd = (pre_bn_1._variance + pre_bn_1._epsilon).pow(-0.5) + extra_weight = scale_invstd * pre_bn_1.weight + extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1._mean * scale_invstd + else: + scale_invstd_1 = (pre_bn_1._variance + pre_bn_1._epsilon).pow(-0.5) + scale_invstd_2 = (pre_bn_2._variance + pre_bn_2._epsilon).pow(-0.5) + + extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight + extra_bias = scale_invstd_2 * pre_bn_2.weight * ( + pre_bn_1.bias - pre_bn_1.weight * pre_bn_1._mean * scale_invstd_1 - + pre_bn_2._mean) + pre_bn_2.bias + if isinstance(layer, nn.Linear): + extra_bias = weight @extra_bias + + weight = weight.multiply( + extra_weight.reshape([1, weight.shape[1]]).expand_as(weight)) + weight = weight.transpose([1, 0]) + elif isinstance(layer, nn.Conv2D): + assert weight.shape[2] == 1 and weight.shape[3] == 1 + + weight = weight.reshape([weight.shape[0], weight.shape[1]]) + extra_bias = weight @extra_bias + weight = weight.multiply( + extra_weight.reshape([1, weight.shape[1]]).expand_as(weight)) + weight = weight.reshape([weight.shape[0], weight.shape[1], 1, 1]) + bias = bias.add(extra_bias) + + layer.weight.set_value(weight) + layer.bias.set_value(bias) + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNReLU(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=1, + groups=groups, + bias_attr=False) + self.norm = nn.BatchNorm2D(out_channels, epsilon=NORM_EPS) + self.act = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + x = self.act(x) + return x + + +class PatchEmbed(nn.Layer): + def __init__(self, in_channels, out_channels, stride=1): + super(PatchEmbed, self).__init__() + norm_layer = partial(nn.BatchNorm2D, epsilon=NORM_EPS) + if stride == 2: + self.avgpool = nn.AvgPool2D((2, 2), stride=2, ceil_mode=True) + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias_attr=False) + self.norm = norm_layer(out_channels) + elif in_channels != out_channels: + self.avgpool = nn.Identity() + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias_attr=False) + self.norm = norm_layer(out_channels) + else: + self.avgpool = nn.Identity() + self.conv = nn.Identity() + self.norm = nn.Identity() + + def forward(self, x): + return self.norm(self.conv(self.avgpool(x))) + + +class MHCA(nn.Layer): + """ + Multi-Head Convolutional Attention + """ + + def __init__(self, out_channels, head_dim): + super(MHCA, self).__init__() + norm_layer = partial(nn.BatchNorm2D, epsilon=NORM_EPS) + self.group_conv3x3 = nn.Conv2D( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + groups=out_channels // head_dim, + bias_attr=False) + self.norm = norm_layer(out_channels) + self.act = nn.ReLU() + self.projection = nn.Conv2D( + out_channels, out_channels, kernel_size=1, bias_attr=False) + + def forward(self, x): + out = self.group_conv3x3(x) + out = self.norm(out) + out = self.act(out) + out = self.projection(out) + return out + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + out_features=None, + mlp_ratio=None, + drop=0., + bias=True): + super().__init__() + out_features = out_features or in_features + hidden_dim = _make_divisible(in_features * mlp_ratio, 32) + self.conv1 = nn.Conv2D( + in_features, + hidden_dim, + kernel_size=1, + bias_attr=None if bias == True else False) + self.act = nn.ReLU() + self.conv2 = nn.Conv2D( + hidden_dim, + out_features, + kernel_size=1, + bias_attr=None if bias == True else False) + self.drop = nn.Dropout(drop) + + def merge_bn(self, pre_norm): + merge_pre_bn(self.conv1, pre_norm) + self.is_bn_merged = True + + def forward(self, x): + x = self.conv1(x) + x = self.act(x) + x = self.drop(x) + x = self.conv2(x) + x = self.drop(x) + return x + + +class NCB(nn.Layer): + """ + Next Convolution Block + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + path_dropout=0.0, + drop=0.0, + head_dim=32, + mlp_ratio=3): + super(NCB, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + norm_layer = partial(nn.BatchNorm2D, epsilon=NORM_EPS) + assert out_channels % head_dim == 0 + + self.patch_embed = PatchEmbed(in_channels, out_channels, stride) + self.mhca = MHCA(out_channels, head_dim) + self.attention_path_dropout = DropPath(path_dropout) + + self.norm = norm_layer(out_channels) + self.mlp = Mlp(out_channels, mlp_ratio=mlp_ratio, drop=drop, bias=True) + self.mlp_path_dropout = DropPath(path_dropout) + self.is_bn_merged = False + + def merge_bn(self): + if not self.is_bn_merged: + self.mlp.merge_bn(self.norm) + self.is_bn_merged = True + + def forward(self, x): + x = self.patch_embed(x) + x = x + self.attention_path_dropout(self.mhca(x)) + + if not self.is_bn_merged: + out = self.norm(x) + else: + out = x + x = x + self.mlp_path_dropout(self.mlp(out)) + return x + + +class E_MHSA(nn.Layer): + """ + Efficient Multi-Head Self Attention + """ + + def __init__(self, + dim, + out_dim=None, + head_dim=32, + qkv_bias=True, + qk_scale=None, + attn_drop=0, + proj_drop=0., + sr_ratio=1): + super().__init__() + self.dim = dim + self.out_dim = out_dim if out_dim is not None else dim + self.num_heads = self.dim // head_dim + self.scale = qk_scale or head_dim**-0.5 + self.q = nn.Linear(dim, self.dim, bias_attr=qkv_bias) + self.k = nn.Linear(dim, self.dim, bias_attr=qkv_bias) + self.v = nn.Linear(dim, self.dim, bias_attr=qkv_bias) + self.proj = nn.Linear(self.dim, self.out_dim) + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + self.N_ratio = sr_ratio**2 + if sr_ratio > 1: + self.sr = nn.AvgPool1D( + kernel_size=self.N_ratio, stride=self.N_ratio) + self.norm = nn.BatchNorm1D(dim, epsilon=NORM_EPS) + self.is_bn_merged = False + + def merge_bn(self, pre_bn): + merge_pre_bn(self.q, pre_bn) + if self.sr_ratio > 1: + merge_pre_bn(self.k, pre_bn, self.norm) + merge_pre_bn(self.v, pre_bn, self.norm) + else: + merge_pre_bn(self.k, pre_bn) + merge_pre_bn(self.v, pre_bn) + self.is_bn_merged = True + + def forward(self, x): + B, N, C = x.shape + q = self.q(x) + q = q.reshape( + [B, N, self.num_heads, int(C // self.num_heads)]).transpose( + [0, 2, 1, 3]) + if self.sr_ratio > 1: + x_ = x.transpose([0, 2, 1]) + x_ = self.sr(x_) + if not self.is_bn_merged: + x_ = self.norm(x_) + x_ = x_.transpose([0, 2, 1]) + + k = self.k(x_) + k = k.reshape( + [B, k.shape[1], self.num_heads, int(C // self.num_heads) + ]).transpose([0, 2, 3, 1]) + v = self.v(x_) + v = v.reshape( + [B, v.shape[1], self.num_heads, int(C // self.num_heads) + ]).transpose([0, 2, 1, 3]) + else: + k = self.k(x) + k = k.reshape( + [B, k.shape[1], self.num_heads, int(C // self.num_heads) + ]).transpose([0, 2, 3, 1]) + v = self.v(x) + v = v.reshape( + [B, v.shape[1], self.num_heads, int(C // self.num_heads) + ]).transpose([0, 2, 1, 3]) + attn = (q @k) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn @v).transpose([0, 2, 1, 3]).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class NTB(nn.Layer): + """ + Next Transformer Block + """ + + def __init__( + self, + in_channels, + out_channels, + path_dropout, + stride=1, + sr_ratio=1, + mlp_ratio=2, + head_dim=32, + mix_block_ratio=0.75, + attn_drop=0.0, + drop=0.0, ): + super(NTB, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.mix_block_ratio = mix_block_ratio + norm_func = partial(nn.BatchNorm2D, epsilon=NORM_EPS) + + self.mhsa_out_channels = _make_divisible( + int(out_channels * mix_block_ratio), 32) + self.mhca_out_channels = out_channels - self.mhsa_out_channels + + self.patch_embed = PatchEmbed(in_channels, self.mhsa_out_channels, + stride) + self.norm1 = norm_func(self.mhsa_out_channels) + self.e_mhsa = E_MHSA( + self.mhsa_out_channels, + head_dim=head_dim, + sr_ratio=sr_ratio, + attn_drop=attn_drop, + proj_drop=drop) + self.mhsa_path_dropout = DropPath(path_dropout * mix_block_ratio) + + self.projection = PatchEmbed( + self.mhsa_out_channels, self.mhca_out_channels, stride=1) + self.mhca = MHCA(self.mhca_out_channels, head_dim=head_dim) + self.mhca_path_dropout = DropPath(path_dropout * (1 - mix_block_ratio)) + + self.norm2 = norm_func(out_channels) + self.mlp = Mlp(out_channels, mlp_ratio=mlp_ratio, drop=drop) + self.mlp_path_dropout = DropPath(path_dropout) + + self.is_bn_merged = False + + def merge_bn(self): + if not self.is_bn_merged: + self.e_mhsa.merge_bn(self.norm1) + self.mlp.merge_bn(self.norm2) + self.is_bn_merged = True + + def forward(self, x): + x = self.patch_embed(x) + + B, C, H, W = x.shape + if not self.is_bn_merged: + out = self.norm1(x) + else: + out = x + out = rearrange(out, "b c h w -> b (h w) c") # b n c + out = self.e_mhsa(out) + out = self.mhsa_path_dropout(out) + x = x + rearrange(out, "b (h w) c -> b c h w", h=H) + + out = self.projection(x) + out = out + self.mhca_path_dropout(self.mhca(out)) + x = paddle.concat([x, out], axis=1) + + if not self.is_bn_merged: + out = self.norm2(x) + else: + out = x + x = x + self.mlp_path_dropout(self.mlp(out)) + return x + + +class NextViT(nn.Layer): + def __init__(self, + stem_chs, + depths, + path_dropout, + attn_drop=0, + drop=0, + class_num=1000, + strides=[1, 2, 2, 2], + sr_ratios=[8, 4, 2, 1], + head_dim=32, + mix_block_ratio=0.75): + super(NextViT, self).__init__() + + self.stage_out_channels = [ + [96] * (depths[0]), [192] * (depths[1] - 1) + [256], + [384, 384, 384, 384, 512] * (depths[2] // 5), + [768] * (depths[3] - 1) + [1024] + ] + + # Next Hybrid Strategy + self.stage_block_types = [[NCB] * depths[0], + [NCB] * (depths[1] - 1) + [NTB], + [NCB, NCB, NCB, NCB, NTB] * (depths[2] // 5), + [NCB] * (depths[3] - 1) + [NTB]] + + self.stem = nn.Sequential( + ConvBNReLU( + 3, stem_chs[0], kernel_size=3, stride=2), + ConvBNReLU( + stem_chs[0], stem_chs[1], kernel_size=3, stride=1), + ConvBNReLU( + stem_chs[1], stem_chs[2], kernel_size=3, stride=1), + ConvBNReLU( + stem_chs[2], stem_chs[2], kernel_size=3, stride=2), ) + input_channel = stem_chs[-1] + features = [] + idx = 0 + dpr = [ + x.item() for x in paddle.linspace(0, path_dropout, sum(depths)) + ] # stochastic depth decay rule + for stage_id in range(len(depths)): + numrepeat = depths[stage_id] + output_channels = self.stage_out_channels[stage_id] + block_types = self.stage_block_types[stage_id] + for block_id in range(numrepeat): + if strides[stage_id] == 2 and block_id == 0: + stride = 2 + else: + stride = 1 + output_channel = output_channels[block_id] + block_type = block_types[block_id] + if block_type is NCB: + layer = NCB(input_channel, + output_channel, + stride=stride, + path_dropout=dpr[idx + block_id], + drop=drop, + head_dim=head_dim) + features.append(layer) + elif block_type is NTB: + layer = NTB(input_channel, + output_channel, + path_dropout=dpr[idx + block_id], + stride=stride, + sr_ratio=sr_ratios[stage_id], + head_dim=head_dim, + mix_block_ratio=mix_block_ratio, + attn_drop=attn_drop, + drop=drop) + features.append(layer) + input_channel = output_channel + idx += numrepeat + self.features = nn.Sequential(*features) + + self.norm = nn.BatchNorm2D(output_channel, epsilon=NORM_EPS) + + self.avgpool = nn.AdaptiveAvgPool2D((1, 1)) + self.proj_head = nn.Sequential(nn.Linear(output_channel, class_num), ) + + self.stage_out_idx = [ + sum(depths[:idx + 1]) - 1 for idx in range(len(depths)) + ] + self._initialize_weights() + + def merge_bn(self): + self.eval() + for idx, layer in self.named_sublayers(): + if isinstance(layer, NCB) or isinstance(layer, NTB): + layer.merge_bn() + + def _initialize_weights(self): + for n, m in self.named_sublayers(): + if isinstance(m, (nn.BatchNorm2D, nn.GroupNorm, nn.LayerNorm, + nn.BatchNorm1D)): + ones_(m.weight) + zeros_(m.bias) + elif isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if hasattr(m, 'bias') and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.Conv2D): + trunc_normal_(m.weight) + if hasattr(m, 'bias') and m.bias is not None: + zeros_(m.bias) + + def forward(self, x): + x = self.stem(x) + for layer in self.features: + x = layer(x) + x = self.norm(x) + x = self.avgpool(x) + x = paddle.flatten(x, 1) + x = self.proj_head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def NextViT_small_224(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 10, 3], + path_dropout=0.1, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_small_224"], use_ssld=use_ssld) + return model + + +def NextViT_base_224(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 20, 3], + path_dropout=0.2, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_base_224"], use_ssld=use_ssld) + return model + + +def NextViT_large_224(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 30, 3], + path_dropout=0.2, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_large_224"], use_ssld=use_ssld) + return model + + +def NextViT_small_384(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 10, 3], + path_dropout=0.1, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_small_384"], use_ssld=use_ssld) + return model + + +def NextViT_base_384(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 20, 3], + path_dropout=0.2, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_base_384"], use_ssld=use_ssld) + return model + + +def NextViT_large_384(pretrained=False, use_ssld=False, **kwargs): + model = NextViT( + stem_chs=[64, 32, 64], + depths=[3, 4, 30, 3], + path_dropout=0.2, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["NextViT_large_384"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/peleenet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/peleenet.py new file mode 100644 index 000000000..0584c9794 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/peleenet.py @@ -0,0 +1,264 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Code was heavily based on https://github.com/Robert-JunWang/PeleeNet +# reference: https://arxiv.org/pdf/1804.06882.pdf + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import Normal, Constant + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PeleeNet": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PeleeNet_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + +normal_ = lambda x, mean=0, std=1: Normal(mean, std)(x) +constant_ = lambda x, value=0: Constant(value)(x) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class _DenseLayer(nn.Layer): + def __init__(self, num_input_features, growth_rate, bottleneck_width, + drop_rate): + super(_DenseLayer, self).__init__() + + growth_rate = int(growth_rate / 2) + inter_channel = int(growth_rate * bottleneck_width / 4) * 4 + + if inter_channel > num_input_features / 2: + inter_channel = int(num_input_features / 8) * 4 + print('adjust inter_channel to ', inter_channel) + + self.branch1a = BasicConv2D( + num_input_features, inter_channel, kernel_size=1) + self.branch1b = BasicConv2D( + inter_channel, growth_rate, kernel_size=3, padding=1) + + self.branch2a = BasicConv2D( + num_input_features, inter_channel, kernel_size=1) + self.branch2b = BasicConv2D( + inter_channel, growth_rate, kernel_size=3, padding=1) + self.branch2c = BasicConv2D( + growth_rate, growth_rate, kernel_size=3, padding=1) + + def forward(self, x): + branch1 = self.branch1a(x) + branch1 = self.branch1b(branch1) + + branch2 = self.branch2a(x) + branch2 = self.branch2b(branch2) + branch2 = self.branch2c(branch2) + + return paddle.concat([x, branch1, branch2], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, + drop_rate): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, + growth_rate, bn_size, drop_rate) + setattr(self, 'denselayer%d' % (i + 1), layer) + + +class _StemBlock(nn.Layer): + def __init__(self, num_input_channels, num_init_features): + super(_StemBlock, self).__init__() + + num_stem_features = int(num_init_features / 2) + + self.stem1 = BasicConv2D( + num_input_channels, + num_init_features, + kernel_size=3, + stride=2, + padding=1) + self.stem2a = BasicConv2D( + num_init_features, + num_stem_features, + kernel_size=1, + stride=1, + padding=0) + self.stem2b = BasicConv2D( + num_stem_features, + num_init_features, + kernel_size=3, + stride=2, + padding=1) + self.stem3 = BasicConv2D( + 2 * num_init_features, + num_init_features, + kernel_size=1, + stride=1, + padding=0) + self.pool = nn.MaxPool2D(kernel_size=2, stride=2) + + def forward(self, x): + out = self.stem1(x) + + branch2 = self.stem2a(out) + branch2 = self.stem2b(branch2) + branch1 = self.pool(out) + + out = paddle.concat([branch1, branch2], 1) + out = self.stem3(out) + + return out + + +class BasicConv2D(nn.Layer): + def __init__(self, in_channels, out_channels, activation=True, **kwargs): + super(BasicConv2D, self).__init__() + self.conv = nn.Conv2D( + in_channels, out_channels, bias_attr=False, **kwargs) + self.norm = nn.BatchNorm2D(out_channels) + self.activation = activation + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + if self.activation: + return F.relu(x) + else: + return x + + +class PeleeNetDY(nn.Layer): + r"""PeleeNet model class, based on + `"Densely Connected Convolutional Networks" and + "Pelee: A Real-Time Object Detection System on Mobile Devices" ` + + Args: + growth_rate (int or list of 4 ints) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bottleneck_width (int or list of 4 ints) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + class_num (int) - number of classification classes + """ + + def __init__(self, + growth_rate=32, + block_config=[3, 4, 8, 6], + num_init_features=32, + bottleneck_width=[1, 2, 4, 4], + drop_rate=0.05, + class_num=1000): + + super(PeleeNetDY, self).__init__() + + self.features = nn.Sequential(* [('stemblock', _StemBlock( + 3, num_init_features)), ]) + + if type(growth_rate) is list: + growth_rates = growth_rate + assert len(growth_rates) == 4, \ + 'The growth rate must be the list and the size must be 4' + else: + growth_rates = [growth_rate] * 4 + + if type(bottleneck_width) is list: + bottleneck_widths = bottleneck_width + assert len(bottleneck_widths) == 4, \ + 'The bottleneck width must be the list and the size must be 4' + else: + bottleneck_widths = [bottleneck_width] * 4 + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bottleneck_widths[i], + growth_rate=growth_rates[i], + drop_rate=drop_rate) + setattr(self.features, 'denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rates[i] + + setattr( + self.features, + 'transition%d' % (i + 1), + BasicConv2D( + num_features, + num_features, + kernel_size=1, + stride=1, + padding=0)) + + if i != len(block_config) - 1: + setattr( + self.features, + 'transition%d_pool' % (i + 1), + nn.AvgPool2D( + kernel_size=2, stride=2)) + num_features = num_features + + # Linear layer + self.classifier = nn.Linear(num_features, class_num) + self.drop_rate = drop_rate + + self.apply(self._initialize_weights) + + def forward(self, x): + features = self.features(x) + out = F.avg_pool2d( + features, kernel_size=features.shape[2:4]).flatten(1) + if self.drop_rate > 0: + out = F.dropout(out, p=self.drop_rate, training=self.training) + out = self.classifier(out) + return out + + def _initialize_weights(self, m): + if isinstance(m, nn.Conv2D): + n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + normal_(m.weight, std=math.sqrt(2. / n)) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2D): + ones_(m.weight) + zeros_(m.bias) + elif isinstance(m, nn.Linear): + normal_(m.weight, std=0.01) + zeros_(m.bias) + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PeleeNet(pretrained=False, use_ssld=False, **kwargs): + model = PeleeNetDY(**kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PeleeNet"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/pvt_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/pvt_v2.py new file mode 100644 index 000000000..9e9f8d5fd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/pvt_v2.py @@ -0,0 +1,493 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/whai362/PVT +# reference: https://arxiv.org/abs/2106.13797 + +from functools import partial +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant + +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity, drop_path + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "PVT_V2_B0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B0_pretrained.pdparams", + "PVT_V2_B1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B1_pretrained.pdparams", + "PVT_V2_B2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2_pretrained.pdparams", + "PVT_V2_B2_Linear": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2_Linear_pretrained.pdparams", + "PVT_V2_B3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B3_pretrained.pdparams", + "PVT_V2_B4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B4_pretrained.pdparams", + "PVT_V2_B5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +@paddle.jit.not_to_static +def swapdim(x, dim1, dim2): + a = list(range(len(x.shape))) + a[dim1], a[dim2] = a[dim2], a[dim1] + return x.transpose(a) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0., + linear=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + self.linear = linear + if self.linear: + self.relu = nn.ReLU() + + def forward(self, x, H, W): + x = self.fc1(x) + if self.linear: + x = self.relu(x) + x = self.dwconv(x, H, W) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + sr_ratio=1, + linear=False): + super().__init__() + assert dim % num_heads == 0 + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q = nn.Linear(dim, dim, bias_attr=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.linear = linear + self.sr_ratio = sr_ratio + if not linear: + if sr_ratio > 1: + self.sr = nn.Conv2D( + dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.pool = nn.AdaptiveAvgPool2D(7) + self.sr = nn.Conv2D(dim, dim, kernel_size=1, stride=1) + self.norm = nn.LayerNorm(dim) + self.act = nn.GELU() + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape( + [B, N, self.num_heads, C // self.num_heads]).transpose( + [0, 2, 1, 3]) + + if not self.linear: + if self.sr_ratio > 1: + x_ = x.transpose([0, 2, 1]).reshape([B, C, H, W]) + x_ = self.sr(x_) + h_, w_ = x_.shape[-2:] + x_ = x_.reshape([B, C, h_ * w_]).transpose([0, 2, 1]) + x_ = self.norm(x_) + kv = self.kv(x_) + kv = kv.reshape([ + B, kv.shape[2] * kv.shape[1] // 2 // C, 2, self.num_heads, + C // self.num_heads + ]).transpose([2, 0, 3, 1, 4]) + else: + kv = self.kv(x) + kv = kv.reshape([ + B, kv.shape[2] * kv.shape[1] // 2 // C, 2, self.num_heads, + C // self.num_heads + ]).transpose([2, 0, 3, 1, 4]) + else: + x_ = x.transpose([0, 2, 1]).reshape([B, C, H, W]) + x_ = self.sr(self.pool(x_)) + x_ = x_.reshape([B, C, x_.shape[2] * x_.shape[3]]).transpose( + [0, 2, 1]) + x_ = self.norm(x_) + x_ = self.act(x_) + kv = self.kv(x_) + kv = kv.reshape([ + B, kv.shape[2] * kv.shape[1] // 2 // C, 2, self.num_heads, + C // self.num_heads + ]).transpose([2, 0, 3, 1, 4]) + k, v = kv[0], kv[1] + + attn = (q @swapdim(k, -2, -1)) * self.scale + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = swapdim((attn @v), 1, 2).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1, + linear=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + sr_ratio=sr_ratio, + linear=linear) + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + linear=linear) + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) + + return x + + +class OverlapPatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=224, + patch_size=7, + stride=4, + in_chans=3, + embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[ + 1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + x = self.proj(x) + _, _, H, W = x.shape + x = x.flatten(2) + x = swapdim(x, 1, 2) + x = self.norm(x) + + return x, H, W + + +class PyramidVisionTransformerV2(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + num_stages=4, + linear=False): + super().__init__() + self.class_num = class_num + self.depths = depths + self.num_stages = num_stages + + dpr = [x for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + + for i in range(num_stages): + patch_embed = OverlapPatchEmbed( + img_size=img_size if i == 0 else img_size // (2**(i + 1)), + patch_size=7 if i == 0 else 3, + stride=4 if i == 0 else 2, + in_chans=in_chans if i == 0 else embed_dims[i - 1], + embed_dim=embed_dims[i]) + + block = nn.LayerList([ + Block( + dim=embed_dims[i], + num_heads=num_heads[i], + mlp_ratio=mlp_ratios[i], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + j].item(), + norm_layer=norm_layer, + sr_ratio=sr_ratios[i], + linear=linear) for j in range(depths[i]) + ]) + norm = norm_layer(embed_dims[i]) + cur += depths[i] + + setattr(self, f"patch_embed{i + 1}", patch_embed) + setattr(self, f"block{i + 1}", block) + setattr(self, f"norm{i + 1}", norm) + + # classification head + self.head = nn.Linear(embed_dims[3], + class_num) if class_num > 0 else Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = x.shape[0] + + for i in range(self.num_stages): + patch_embed = getattr(self, f"patch_embed{i + 1}") + block = getattr(self, f"block{i + 1}") + norm = getattr(self, f"norm{i + 1}") + x, H, W = patch_embed(x) + for blk in block: + x = blk(x, H, W) + x = norm(x) + if i != self.num_stages - 1: + x = x.reshape([B, H, W, x.shape[2]]).transpose([0, 3, 1, 2]) + + return x.mean(axis=1) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + + return x + + +class DWConv(nn.Layer): + def __init__(self, dim=768): + super().__init__() + self.dwconv = nn.Conv2D(dim, dim, 3, 1, 1, bias_attr=True, groups=dim) + + def forward(self, x, H, W): + B, N, C = x.shape + x = swapdim(x, 1, 2) + x = x.reshape([B, C, H, W]) + x = self.dwconv(x) + x = x.flatten(2) + x = swapdim(x, 1, 2) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PVT_V2_B0(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[32, 64, 160, 256], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 2, 2], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B0"], use_ssld=use_ssld) + return model + + +def PVT_V2_B1(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 2, 2], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B1"], use_ssld=use_ssld) + return model + + +def PVT_V2_B2(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B2"], use_ssld=use_ssld) + return model + + +def PVT_V2_B3(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 18, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B3"], use_ssld=use_ssld) + return model + + +def PVT_V2_B4(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 8, 27, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B4"], use_ssld=use_ssld) + return model + + +def PVT_V2_B5(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 6, 40, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B5"], use_ssld=use_ssld) + return model + + +def PVT_V2_B2_Linear(pretrained=False, use_ssld=False, **kwargs): + model = PyramidVisionTransformerV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + linear=True, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["PVT_V2_B2_Linear"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rednet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rednet.py new file mode 100644 index 000000000..70a8a2c67 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rednet.py @@ -0,0 +1,204 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/d-li14/involution +# reference: https://arxiv.org/abs/2103.06255 + +import paddle +import paddle.nn as nn + +from paddle.vision.models import resnet + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "RedNet26": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams", + "RedNet38": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams", + "RedNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams", + "RedNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams", + "RedNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + + +class Involution(nn.Layer): + def __init__(self, channels, kernel_size, stride): + super(Involution, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.channels = channels + reduction_ratio = 4 + self.group_channels = 16 + self.groups = self.channels // self.group_channels + self.conv1 = nn.Sequential( + ('conv', nn.Conv2D( + in_channels=channels, + out_channels=channels // reduction_ratio, + kernel_size=1, + bias_attr=False)), + ('bn', nn.BatchNorm2D(channels // reduction_ratio)), + ('activate', nn.ReLU())) + self.conv2 = nn.Sequential(('conv', nn.Conv2D( + in_channels=channels // reduction_ratio, + out_channels=kernel_size**2 * self.groups, + kernel_size=1, + stride=1))) + if stride > 1: + self.avgpool = nn.AvgPool2D(stride, stride) + + def forward(self, x): + weight = self.conv2( + self.conv1(x if self.stride == 1 else self.avgpool(x))) + b, c, h, w = weight.shape + weight = weight.reshape( + (b, self.groups, self.kernel_size**2, h, w)).unsqueeze(2) + + out = nn.functional.unfold(x, self.kernel_size, self.stride, + (self.kernel_size - 1) // 2, 1) + out = out.reshape( + (b, self.groups, self.group_channels, self.kernel_size**2, h, w)) + out = (weight * out).sum(axis=3).reshape((b, self.channels, h, w)) + return out + + +class BottleneckBlock(resnet.BottleneckBlock): + def __init__(self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None): + super(BottleneckBlock, self).__init__(inplanes, planes, stride, + downsample, groups, base_width, + dilation, norm_layer) + width = int(planes * (base_width / 64.)) * groups + self.conv2 = Involution(width, 7, stride) + + +class RedNet(resnet.ResNet): + def __init__(self, block, depth, class_num=1000, with_pool=True): + super(RedNet, self).__init__( + block=block, depth=50, num_classes=class_num, with_pool=with_pool) + layer_cfg = { + 26: [1, 2, 4, 1], + 38: [2, 3, 5, 2], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3] + } + layers = layer_cfg[depth] + + self.conv1 = None + self.bn1 = None + self.relu = None + self.inplanes = 64 + self.class_num = class_num + self.stem = nn.Sequential( + nn.Sequential( + ('conv', nn.Conv2D( + in_channels=3, + out_channels=self.inplanes // 2, + kernel_size=3, + stride=2, + padding=1, + bias_attr=False)), + ('bn', nn.BatchNorm2D(self.inplanes // 2)), + ('activate', nn.ReLU())), + Involution(self.inplanes // 2, 3, 1), + nn.BatchNorm2D(self.inplanes // 2), + nn.ReLU(), + nn.Sequential( + ('conv', nn.Conv2D( + in_channels=self.inplanes // 2, + out_channels=self.inplanes, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False)), ('bn', nn.BatchNorm2D(self.inplanes)), + ('activate', nn.ReLU()))) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + def forward(self, x): + x = self.stem(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + if self.with_pool: + x = self.avgpool(x) + + if self.class_num > 0: + x = paddle.flatten(x, 1) + x = self.fc(x) + + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RedNet26(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 26, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet26"]) + return model + + +def RedNet38(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 38, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet38"]) + return model + + +def RedNet50(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 50, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet50"]) + return model + + +def RedNet101(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 101, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet101"]) + return model + + +def RedNet152(pretrained=False, **kwargs): + model = RedNet(BottleneckBlock, 152, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["RedNet152"]) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/regnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/regnet.py new file mode 100644 index 000000000..12f60012a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/regnet.py @@ -0,0 +1,531 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/facebookresearch/pycls +# reference: https://arxiv.org/abs/1905.13214 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "RegNetX_200MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_200MF_pretrained.pdparams", + "RegNetX_400MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_400MF_pretrained.pdparams", + "RegNetX_600MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_600MF_pretrained.pdparams", + "RegNetX_800MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_800MF_pretrained.pdparams", + "RegNetX_1600MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_1600MF_pretrained.pdparams", + "RegNetX_3200MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_3200MF_pretrained.pdparams", + "RegNetX_4GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_4GF_pretrained.pdparams", + "RegNetX_6400MF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_6400MF_pretrained.pdparams", + "RegNetX_8GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_8GF_pretrained.pdparams", + "RegNetX_12GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_12GF_pretrained.pdparams", + "RegNetX_16GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_16GF_pretrained.pdparams", + "RegNetX_32GF": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_32GF_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_ws_gs_comp(ws, bms, gs): + """Adjusts the compatibility of widths and groups.""" + ws_bot = [int(w * b) for w, b in zip(ws, bms)] + gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)] + ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)] + ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)] + return ws, gs + + +def get_stages_from_blocks(ws, rs): + """Gets ws/ds of network at each stage from per block values.""" + ts = [ + w != wp or r != rp + for w, wp, r, rp in zip(ws + [0], [0] + ws, rs + [0], [0] + rs) + ] + s_ws = [w for w, t in zip(ws, ts[:-1]) if t] + s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist() + return s_ws, s_ds + + +def generate_regnet(w_a, w_0, w_m, d, q=8): + """Generates per block ws from RegNet parameters.""" + assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 + ws_cont = np.arange(d) * w_a + w_0 + ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) + ws = w_0 * np.power(w_m, ks) + ws = np.round(np.divide(ws, q)) * q + num_stages, max_stage = len(np.unique(ws)), ks.max() + 1 + ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist() + return ws, num_stages, max_stage, ws_cont + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + padding=0, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr(name=name + ".conv2d.output.1.w_0"), + bias_attr=False) + bn_name = name + "_bn" + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + ".output.1.w_0"), + bias_attr=ParamAttr(bn_name + ".output.1.b_0"), + moving_mean_name=bn_name + "_mean", + moving_variance_name=bn_name + "_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + bm, + gw, + se_on, + se_r, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + # Compute the bottleneck width + w_b = int(round(num_filters * bm)) + # Compute the number of groups + num_gs = w_b // gw + self.se_on = se_on + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=w_b, + filter_size=1, + padding=0, + act="relu", + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=w_b, + num_filters=w_b, + filter_size=3, + stride=stride, + padding=1, + groups=num_gs, + act="relu", + name=name + "_branch2b") + if se_on: + w_se = int(round(num_channels * se_r)) + self.se_block = SELayer( + num_channels=w_b, + num_filters=w_b, + reduction_ratio=w_se, + name=name + "_branch2se") + self.conv2 = ConvBNLayer( + num_channels=w_b, + num_filters=num_filters, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + if self.se_on: + conv1 = self.se_block(conv1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + "_sqz_offset")) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + "_exc_offset")) + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.reshape(pool, shape=[-1, self._num_channels]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.reshape( + excitation, shape=[-1, self._num_channels, 1, 1]) + out = input * excitation + return out + + +class RegNet(nn.Layer): + def __init__(self, + w_a, + w_0, + w_m, + d, + group_w, + bot_mul, + q=8, + se_on=False, + class_num=1000): + super(RegNet, self).__init__() + + # Generate RegNet ws per block + b_ws, num_s, max_s, ws_cont = generate_regnet(w_a, w_0, w_m, d, q) + # Convert to per stage format + ws, ds = get_stages_from_blocks(b_ws, b_ws) + # Generate group widths and bot muls + gws = [group_w for _ in range(num_s)] + bms = [bot_mul for _ in range(num_s)] + # Adjust the compatibility of ws and gws + ws, gws = adjust_ws_gs_comp(ws, bms, gws) + # Use the same stride for each stage + ss = [2 for _ in range(num_s)] + # Use SE for RegNetY + se_r = 0.25 + # Construct the model + # Group params by stage + stage_params = list(zip(ds, ws, ss, bms, gws)) + # Construct the stem + stem_type = "simple_stem_in" + stem_w = 32 + block_type = "res_bottleneck_block" + + self.conv = ConvBNLayer( + num_channels=3, + num_filters=stem_w, + filter_size=3, + stride=2, + padding=1, + act="relu", + name="stem_conv") + + self.block_list = [] + for block, (d, w_out, stride, bm, gw) in enumerate(stage_params): + shortcut = False + for i in range(d): + num_channels = stem_w if block == i == 0 else in_channels + # Stride apply to the first block of the stage + b_stride = stride if i == 0 else 1 + conv_name = "s" + str(block + 1) + "_b" + str(i + + 1) # chr(97 + i) + bottleneck_block = self.add_sublayer( + conv_name, + BottleneckBlock( + num_channels=num_channels, + num_filters=w_out, + stride=b_stride, + bm=bm, + gw=gw, + se_on=se_on, + se_r=se_r, + shortcut=shortcut, + name=conv_name)) + in_channels = w_out + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = w_out + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv(inputs) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RegNetX_200MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=36.44, + w_0=24, + w_m=2.49, + d=13, + group_w=8, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_200MF"], use_ssld=use_ssld) + return model + + +def RegNetX_400MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=24.48, + w_0=24, + w_m=2.54, + d=22, + group_w=16, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_400MF"], use_ssld=use_ssld) + return model + + +def RegNetX_600MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=36.97, + w_0=48, + w_m=2.24, + d=16, + group_w=24, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_600MF"], use_ssld=use_ssld) + return model + + +def RegNetX_800MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=35.73, + w_0=56, + w_m=2.28, + d=16, + group_w=16, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_800MF"], use_ssld=use_ssld) + return model + + +def RegNetX_1600MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=34.01, + w_0=80, + w_m=2.25, + d=18, + group_w=24, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_1600MF"], use_ssld=use_ssld) + return model + + +def RegNetX_3200MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=26.31, + w_0=88, + w_m=2.25, + d=25, + group_w=48, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_3200MF"], use_ssld=use_ssld) + return model + + +def RegNetX_4GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=38.65, + w_0=96, + w_m=2.43, + d=23, + group_w=40, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_4GF"], use_ssld=use_ssld) + return model + + +def RegNetX_6400MF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=60.83, + w_0=184, + w_m=2.07, + d=17, + group_w=56, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_6400MF"], use_ssld=use_ssld) + return model + + +def RegNetX_8GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=49.56, + w_0=80, + w_m=2.88, + d=23, + group_w=120, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_8GF"], use_ssld=use_ssld) + return model + + +def RegNetX_12GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=73.36, + w_0=168, + w_m=2.37, + d=19, + group_w=112, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_12GF"], use_ssld=use_ssld) + return model + + +def RegNetX_16GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=55.59, + w_0=216, + w_m=2.1, + d=22, + group_w=128, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_16GF"], use_ssld=use_ssld) + return model + + +def RegNetX_32GF(pretrained=False, use_ssld=False, **kwargs): + model = RegNet( + w_a=69.86, + w_0=320, + w_m=2.0, + d=23, + group_w=168, + bot_mul=1.0, + q=8, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RegNetX_32GF"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/repvgg.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/repvgg.py new file mode 100644 index 000000000..94309f578 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/repvgg.py @@ -0,0 +1,451 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/DingXiaoH/RepVGG +# reference: https://arxiv.org/abs/2101.03697 + +import paddle.nn as nn +import paddle +import paddle.nn.functional as F +import numpy as np + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "RepVGG_A0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A0_pretrained.pdparams", + "RepVGG_A1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A1_pretrained.pdparams", + "RepVGG_A2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A2_pretrained.pdparams", + "RepVGG_B0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B0_pretrained.pdparams", + "RepVGG_B1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1_pretrained.pdparams", + "RepVGG_B2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2_pretrained.pdparams", + "RepVGG_B1g2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g2_pretrained.pdparams", + "RepVGG_B1g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g4_pretrained.pdparams", + "RepVGG_B2g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams", + "RepVGG_B3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3_pretrained.pdparams", + "RepVGG_B3g4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams", + "RepVGG_D2se": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_D2se_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + +optional_groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26] +g2_map = {l: 2 for l in optional_groupwise_layers} +g4_map = {l: 4 for l in optional_groupwise_layers} + + +class ConvBN(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1): + super(ConvBN, self).__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias_attr=False) + self.bn = nn.BatchNorm2D(num_features=out_channels) + + def forward(self, x): + y = self.conv(x) + y = self.bn(y) + return y + + +class SEBlock(nn.Layer): + def __init__(self, input_channels, internal_neurons): + super(SEBlock, self).__init__() + self.down = nn.Conv2D( + in_channels=input_channels, + out_channels=internal_neurons, + kernel_size=1, + stride=1, + bias_attr=True) + self.up = nn.Conv2D( + in_channels=internal_neurons, + out_channels=input_channels, + kernel_size=1, + stride=1, + bias_attr=True) + self.input_channels = input_channels + + def forward(self, inputs): + x = F.avg_pool2d(inputs, kernel_size=inputs.shape[3]) + x = self.down(x) + x = F.relu(x) + x = self.up(x) + x = F.sigmoid(x) + x = x.reshape([-1, self.input_channels, 1, 1]) + return inputs * x + + +class RepVGGBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + use_se=False): + super(RepVGGBlock, self).__init__() + self.is_repped = False + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.padding_mode = padding_mode + + assert kernel_size == 3 + assert padding == 1 + + padding_11 = padding - kernel_size // 2 + + self.nonlinearity = nn.ReLU() + + if use_se: + self.se = SEBlock( + out_channels, internal_neurons=out_channels // 16) + else: + self.se = nn.Identity() + self.rbr_identity = nn.BatchNorm2D( + num_features=in_channels + ) if out_channels == in_channels and stride == 1 else None + self.rbr_dense = ConvBN( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups) + self.rbr_1x1 = ConvBN( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=padding_11, + groups=groups) + + def forward(self, inputs): + if self.is_repped: + return self.nonlinearity(self.rbr_reparam(inputs)) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + return self.nonlinearity( + self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)) + + def re_parameterize(self): + if not hasattr(self, 'rbr_reparam'): + self.rbr_reparam = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + padding_mode=self.padding_mode) + kernel, bias = self.get_equivalent_kernel_bias() + self.rbr_reparam.weight.set_value(kernel) + self.rbr_reparam.bias.set_value(bias) + self.is_repped = True + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return kernel3x3 + self._pad_1x1_to_3x3_tensor( + kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if isinstance(branch, ConvBN): + kernel = branch.conv.weight + running_mean = branch.bn._mean + running_var = branch.bn._variance + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn._epsilon + else: + assert isinstance(branch, nn.BatchNorm2D) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros( + (self.in_channels, input_dim, 3, 3), dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = paddle.to_tensor(kernel_value) + kernel = self.id_tensor + running_mean = branch._mean + running_var = branch._variance + gamma = branch.weight + beta = branch.bias + eps = branch._epsilon + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + +class RepVGG(nn.Layer): + def __init__(self, + num_blocks, + width_multiplier=None, + override_groups_map=None, + class_num=1000, + use_se=False): + super(RepVGG, self).__init__() + assert len(width_multiplier) == 4 + self.override_groups_map = override_groups_map or dict() + assert 0 not in self.override_groups_map + self.in_planes = min(64, int(64 * width_multiplier[0])) + + self.stage0 = RepVGGBlock( + in_channels=3, + out_channels=self.in_planes, + kernel_size=3, + stride=2, + padding=1, + use_se=use_se) + self.cur_layer_idx = 1 + self.stage1 = self._make_stage( + int(64 * width_multiplier[0]), + num_blocks[0], + stride=2, + use_se=use_se) + self.stage2 = self._make_stage( + int(128 * width_multiplier[1]), + num_blocks[1], + stride=2, + use_se=use_se) + self.stage3 = self._make_stage( + int(256 * width_multiplier[2]), + num_blocks[2], + stride=2, + use_se=use_se) + self.stage4 = self._make_stage( + int(512 * width_multiplier[3]), + num_blocks[3], + stride=2, + use_se=use_se) + self.gap = nn.AdaptiveAvgPool2D(output_size=1) + self.linear = nn.Linear(int(512 * width_multiplier[3]), class_num) + + def _make_stage(self, planes, num_blocks, stride, use_se=False): + strides = [stride] + [1] * (num_blocks - 1) + blocks = [] + for stride in strides: + cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1) + blocks.append( + RepVGGBlock( + in_channels=self.in_planes, + out_channels=planes, + kernel_size=3, + stride=stride, + padding=1, + groups=cur_groups, + use_se=use_se)) + self.in_planes = planes + self.cur_layer_idx += 1 + return nn.Sequential(*blocks) + + def forward(self, x): + out = self.stage0(x) + out = self.stage1(out) + out = self.stage2(out) + out = self.stage3(out) + out = self.stage4(out) + out = self.gap(out) + out = paddle.flatten(out, start_axis=1) + out = self.linear(out) + return out + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def RepVGG_A0(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[0.75, 0.75, 0.75, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A0"], use_ssld=use_ssld) + return model + + +def RepVGG_A1(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[1, 1, 1, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A1"], use_ssld=use_ssld) + return model + + +def RepVGG_A2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[2, 4, 14, 1], + width_multiplier=[1.5, 1.5, 1.5, 2.75], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_A2"], use_ssld=use_ssld) + return model + + +def RepVGG_B0(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[1, 1, 1, 2.5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B0"], use_ssld=use_ssld) + return model + + +def RepVGG_B1(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1"], use_ssld=use_ssld) + return model + + +def RepVGG_B1g2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=g2_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1g2"], use_ssld=use_ssld) + return model + + +def RepVGG_B1g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2, 2, 2, 4], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B1g4"], use_ssld=use_ssld) + return model + + +def RepVGG_B2(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B2"], use_ssld=use_ssld) + return model + + +def RepVGG_B2g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B2g4"], use_ssld=use_ssld) + return model + + +def RepVGG_B3(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[3, 3, 3, 5], + override_groups_map=None, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B3"], use_ssld=use_ssld) + return model + + +def RepVGG_B3g4(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[4, 6, 16, 1], + width_multiplier=[3, 3, 3, 5], + override_groups_map=g4_map, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_B3g4"], use_ssld=use_ssld) + return model + + +def RepVGG_D2se(pretrained=False, use_ssld=False, **kwargs): + model = RepVGG( + num_blocks=[8, 14, 24, 1], + width_multiplier=[2.5, 2.5, 2.5, 5], + override_groups_map=None, + use_se=True, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["RepVGG_D2se"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net.py new file mode 100644 index 000000000..33261fca7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net.py @@ -0,0 +1,266 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1904.01169 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "Res2Net50_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams", + "Res2Net50_14w_8s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels1, + num_channels2, + num_filters, + stride, + scales, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + self.stride = stride + self.scales = scales + self.conv0 = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1_list = [] + for s in range(scales - 1): + conv1 = self.add_sublayer( + name + '_branch2b_' + str(s + 1), + ConvBNLayer( + num_channels=num_filters // scales, + num_filters=num_filters // scales, + filter_size=3, + stride=stride, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + self.conv1_list.append(conv1) + self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1) + + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_channels2, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_channels2, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + xs = paddle.split(y, self.scales, 1) + ys = [] + for s, conv1 in enumerate(self.conv1_list): + if s == 0 or self.stride == 2: + ys.append(conv1(xs[s])) + else: + ys.append(conv1(paddle.add(xs[s], ys[-1]))) + if self.stride == 1: + ys.append(xs[-1]) + else: + ys.append(self.pool2d_avg(xs[-1])) + conv1 = paddle.concat(ys, axis=1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class Res2Net(nn.Layer): + def __init__(self, layers=50, scales=4, width=26, class_num=1000): + super(Res2Net, self).__init__() + + self.layers = layers + self.scales = scales + self.width = width + basic_width = self.width * self.scales + supported_layers = [50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024] + num_channels2 = [256, 512, 1024, 2048] + num_filters = [basic_width * t for t in [1, 2, 4, 8]] + + self.conv1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels1=num_channels[block] + if i == 0 else num_channels2[block], + num_channels2=num_channels2[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + scales=scales, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1(inputs) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Res2Net50_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net(layers=50, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Res2Net50_26w_4s"], use_ssld=use_ssld) + return model + + +def Res2Net50_14w_8s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net(layers=50, scales=8, width=14, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Res2Net50_14w_8s"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net_vd.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net_vd.py new file mode 100644 index 000000000..206d06028 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/res2net_vd.py @@ -0,0 +1,308 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1904.01169 & https://arxiv.org/abs/1812.01187 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "Res2Net50_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams", + "Res2Net101_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams", + "Res2Net200_vd_26w_4s": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels1, + num_channels2, + num_filters, + stride, + scales, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + self.stride = stride + self.scales = scales + self.conv0 = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1_list = [] + for s in range(scales - 1): + conv1 = self.add_sublayer( + name + '_branch2b_' + str(s + 1), + ConvBNLayer( + num_channels=num_filters // scales, + num_filters=num_filters // scales, + filter_size=3, + stride=stride, + act='relu', + name=name + '_branch2b_' + str(s + 1))) + self.conv1_list.append(conv1) + self.pool2d_avg = AvgPool2D(kernel_size=3, stride=stride, padding=1) + + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_channels2, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels1, + num_filters=num_channels2, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + xs = paddle.split(y, self.scales, 1) + ys = [] + for s, conv1 in enumerate(self.conv1_list): + if s == 0 or self.stride == 2: + ys.append(conv1(xs[s])) + else: + ys.append(conv1(xs[s] + ys[-1])) + if self.stride == 1: + ys.append(xs[-1]) + else: + ys.append(self.pool2d_avg(xs[-1])) + conv1 = paddle.concat(ys, axis=1) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class Res2Net_vd(nn.Layer): + def __init__(self, layers=50, scales=4, width=26, class_num=1000, + **kwargs): + super(Res2Net_vd, self).__init__() + + self.layers = layers + self.scales = scales + self.width = width + basic_width = self.width * self.scales + supported_layers = [50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024] + num_channels2 = [256, 512, 1024, 2048] + num_filters = [basic_width * t for t in [1, 2, 4, 8]] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels1=num_channels[block] + if i == 0 else num_channels2[block], + num_channels2=num_channels2[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + scales=scales, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Res2Net50_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=50, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net50_vd_26w_4s"], + use_ssld=use_ssld) + return model + + +def Res2Net101_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=101, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net101_vd_26w_4s"], + use_ssld=use_ssld) + return model + + +def Res2Net200_vd_26w_4s(pretrained=False, use_ssld=False, **kwargs): + model = Res2Net_vd(layers=200, scales=4, width=26, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["Res2Net200_vd_26w_4s"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnest.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnest.py new file mode 100644 index 000000000..171aa1f94 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnest.py @@ -0,0 +1,780 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/zhanghang1989/ResNeSt +# reference: https://arxiv.org/abs/2004.08955 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +import math +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn.initializer import KaimingNormal +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.regularizer import L2Decay + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNeSt50_fast_1s1x64d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams", + "ResNeSt50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams", + "ResNeSt101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt101_pretrained.pdparams", + "ResNeSt200": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt200_pretrained.pdparams", + "ResNeSt269": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt269_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + dilation=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + bn_decay = 0.0 + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + dilation=dilation, + groups=groups, + weight_attr=ParamAttr(name=name + "_weight"), + bias_attr=False) + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr( + name=name + "_scale", regularizer=L2Decay(bn_decay)), + bias_attr=ParamAttr( + name + "_offset", regularizer=L2Decay(bn_decay)), + moving_mean_name=name + "_mean", + moving_variance_name=name + "_variance") + + def forward(self, x): + x = self._conv(x) + x = self._batch_norm(x) + return x + + +class rSoftmax(nn.Layer): + def __init__(self, radix, cardinality): + super(rSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + cardinality = self.cardinality + radix = self.radix + + batch, r, h, w = x.shape + if self.radix > 1: + x = paddle.reshape( + x=x, + shape=[ + batch, cardinality, radix, + int(r * h * w / cardinality / radix) + ]) + x = paddle.transpose(x=x, perm=[0, 2, 1, 3]) + x = nn.functional.softmax(x, axis=1) + x = paddle.reshape(x=x, shape=[batch, r * h * w, 1, 1]) + else: + x = nn.functional.sigmoid(x) + return x + + +class SplatConv(nn.Layer): + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + radix=2, + reduction_factor=4, + rectify_avg=False, + name=None): + super(SplatConv, self).__init__() + + self.radix = radix + + self.conv1 = ConvBNLayer( + num_channels=in_channels, + num_filters=channels * radix, + filter_size=kernel_size, + stride=stride, + groups=groups * radix, + act="relu", + name=name + "_1_weights") + + self.avg_pool2d = AdaptiveAvgPool2D(1) + + inter_channels = int(max(in_channels * radix // reduction_factor, 32)) + + # to calc gap + self.conv2 = ConvBNLayer( + num_channels=channels, + num_filters=inter_channels, + filter_size=1, + stride=1, + groups=groups, + act="relu", + name=name + "_2_weights") + + # to calc atten + self.conv3 = Conv2D( + in_channels=inter_channels, + out_channels=channels * radix, + kernel_size=1, + stride=1, + padding=0, + groups=groups, + weight_attr=ParamAttr( + name=name + "_weights", initializer=KaimingNormal())) + + self.rsoftmax = rSoftmax(radix=radix, cardinality=groups) + + def forward(self, x): + x = self.conv1(x) + + if self.radix > 1: + splited = paddle.split(x, num_or_sections=self.radix, axis=1) + gap = paddle.add_n(splited) + else: + gap = x + + gap = self.avg_pool2d(gap) + gap = self.conv2(gap) + + atten = self.conv3(gap) + atten = self.rsoftmax(atten) + + if self.radix > 1: + attens = paddle.split(atten, num_or_sections=self.radix, axis=1) + y = paddle.add_n([ + paddle.multiply(split, att) + for (att, split) in zip(attens, splited) + ]) + else: + y = paddle.multiply(x, atten) + + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + inplanes, + planes, + stride=1, + radix=1, + cardinality=1, + bottleneck_width=64, + avd=False, + avd_first=False, + dilation=1, + is_first=False, + rectify_avg=False, + last_gamma=False, + avg_down=False, + name=None): + super(BottleneckBlock, self).__init__() + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.radix = radix + self.cardinality = cardinality + self.avd = avd + self.avd_first = avd_first + self.dilation = dilation + self.is_first = is_first + self.rectify_avg = rectify_avg + self.last_gamma = last_gamma + self.avg_down = avg_down + + group_width = int(planes * (bottleneck_width / 64.)) * cardinality + + self.conv1 = ConvBNLayer( + num_channels=self.inplanes, + num_filters=group_width, + filter_size=1, + stride=1, + groups=1, + act="relu", + name=name + "_conv1") + + if avd and avd_first and (stride > 1 or is_first): + self.avg_pool2d_1 = AvgPool2D( + kernel_size=3, stride=stride, padding=1) + + if radix >= 1: + self.conv2 = SplatConv( + in_channels=group_width, + channels=group_width, + kernel_size=3, + stride=1, + padding=dilation, + dilation=dilation, + groups=cardinality, + bias=False, + radix=radix, + rectify_avg=rectify_avg, + name=name + "_splat") + else: + self.conv2 = ConvBNLayer( + num_channels=group_width, + num_filters=group_width, + filter_size=3, + stride=1, + dilation=dilation, + groups=cardinality, + act="relu", + name=name + "_conv2") + + if avd and avd_first == False and (stride > 1 or is_first): + self.avg_pool2d_2 = AvgPool2D( + kernel_size=3, stride=stride, padding=1) + + self.conv3 = ConvBNLayer( + num_channels=group_width, + num_filters=planes * 4, + filter_size=1, + stride=1, + groups=1, + act=None, + name=name + "_conv3") + + if stride != 1 or self.inplanes != self.planes * 4: + if avg_down: + if dilation == 1: + self.avg_pool2d_3 = AvgPool2D( + kernel_size=stride, stride=stride, padding=0) + else: + self.avg_pool2d_3 = AvgPool2D( + kernel_size=1, stride=1, padding=0, ceil_mode=True) + + self.conv4 = Conv2D( + in_channels=self.inplanes, + out_channels=planes * 4, + kernel_size=1, + stride=1, + padding=0, + groups=1, + weight_attr=ParamAttr( + name=name + "_weights", initializer=KaimingNormal()), + bias_attr=False) + else: + self.conv4 = Conv2D( + in_channels=self.inplanes, + out_channels=planes * 4, + kernel_size=1, + stride=stride, + padding=0, + groups=1, + weight_attr=ParamAttr( + name=name + "_shortcut_weights", + initializer=KaimingNormal()), + bias_attr=False) + + bn_decay = 0.0 + self._batch_norm = BatchNorm( + planes * 4, + act=None, + param_attr=ParamAttr( + name=name + "_shortcut_scale", + regularizer=L2Decay(bn_decay)), + bias_attr=ParamAttr( + name + "_shortcut_offset", regularizer=L2Decay(bn_decay)), + moving_mean_name=name + "_shortcut_mean", + moving_variance_name=name + "_shortcut_variance") + + def forward(self, x): + short = x + + x = self.conv1(x) + if self.avd and self.avd_first and (self.stride > 1 or self.is_first): + x = self.avg_pool2d_1(x) + + x = self.conv2(x) + + if self.avd and self.avd_first == False and (self.stride > 1 or + self.is_first): + x = self.avg_pool2d_2(x) + + x = self.conv3(x) + + if self.stride != 1 or self.inplanes != self.planes * 4: + if self.avg_down: + short = self.avg_pool2d_3(short) + + short = self.conv4(short) + + short = self._batch_norm(short) + + y = paddle.add(x=short, y=x) + y = F.relu(y) + return y + + +class ResNeStLayer(nn.Layer): + def __init__(self, + inplanes, + planes, + blocks, + radix, + cardinality, + bottleneck_width, + avg_down, + avd, + avd_first, + rectify_avg, + last_gamma, + stride=1, + dilation=1, + is_first=True, + name=None): + super(ResNeStLayer, self).__init__() + self.inplanes = inplanes + self.planes = planes + self.blocks = blocks + self.radix = radix + self.cardinality = cardinality + self.bottleneck_width = bottleneck_width + self.avg_down = avg_down + self.avd = avd + self.avd_first = avd_first + self.rectify_avg = rectify_avg + self.last_gamma = last_gamma + self.is_first = is_first + + if dilation == 1 or dilation == 2: + bottleneck_func = self.add_sublayer( + name + "_bottleneck_0", + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + stride=stride, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=1, + is_first=is_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=name + "_bottleneck_0")) + elif dilation == 4: + bottleneck_func = self.add_sublayer( + name + "_bottleneck_0", + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + stride=stride, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=2, + is_first=is_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=name + "_bottleneck_0")) + else: + raise RuntimeError("=>unknown dilation size") + + self.inplanes = planes * 4 + self.bottleneck_block_list = [bottleneck_func] + for i in range(1, blocks): + curr_name = name + "_bottleneck_" + str(i) + + bottleneck_func = self.add_sublayer( + curr_name, + BottleneckBlock( + inplanes=self.inplanes, + planes=planes, + radix=radix, + cardinality=cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + dilation=dilation, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + name=curr_name)) + self.bottleneck_block_list.append(bottleneck_func) + + def forward(self, x): + for bottleneck_block in self.bottleneck_block_list: + x = bottleneck_block(x) + return x + + +class ResNeSt(nn.Layer): + def __init__(self, + layers, + radix=1, + groups=1, + bottleneck_width=64, + dilated=False, + dilation=1, + deep_stem=False, + stem_width=64, + avg_down=False, + rectify_avg=False, + avd=False, + avd_first=False, + final_drop=0.0, + last_gamma=False, + class_num=1000): + super(ResNeSt, self).__init__() + + self.cardinality = groups + self.bottleneck_width = bottleneck_width + # ResNet-D params + self.inplanes = stem_width * 2 if deep_stem else 64 + self.avg_down = avg_down + self.last_gamma = last_gamma + # ResNeSt params + self.radix = radix + self.avd = avd + self.avd_first = avd_first + + self.deep_stem = deep_stem + self.stem_width = stem_width + self.layers = layers + self.final_drop = final_drop + self.dilated = dilated + self.dilation = dilation + + self.rectify_avg = rectify_avg + + if self.deep_stem: + self.stem = nn.Sequential( + ("conv1", ConvBNLayer( + num_channels=3, + num_filters=stem_width, + filter_size=3, + stride=2, + act="relu", + name="conv1")), ("conv2", ConvBNLayer( + num_channels=stem_width, + num_filters=stem_width, + filter_size=3, + stride=1, + act="relu", + name="conv2")), ("conv3", ConvBNLayer( + num_channels=stem_width, + num_filters=stem_width * 2, + filter_size=3, + stride=1, + act="relu", + name="conv3"))) + else: + self.stem = ConvBNLayer( + num_channels=3, + num_filters=stem_width, + filter_size=7, + stride=2, + act="relu", + name="conv1") + + self.max_pool2d = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.layer1 = ResNeStLayer( + inplanes=self.stem_width * 2 + if self.deep_stem else self.stem_width, + planes=64, + blocks=self.layers[0], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=1, + is_first=False, + name="layer1") + + # return + + self.layer2 = ResNeStLayer( + inplanes=256, + planes=128, + blocks=self.layers[1], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer2") + + if self.dilated or self.dilation == 4: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=2, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=4, + name="layer4") + elif self.dilation == 2: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + dilation=1, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=1, + dilation=2, + name="layer4") + else: + self.layer3 = ResNeStLayer( + inplanes=512, + planes=256, + blocks=self.layers[2], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer3") + self.layer4 = ResNeStLayer( + inplanes=1024, + planes=512, + blocks=self.layers[3], + radix=radix, + cardinality=self.cardinality, + bottleneck_width=bottleneck_width, + avg_down=self.avg_down, + avd=avd, + avd_first=avd_first, + rectify_avg=rectify_avg, + last_gamma=last_gamma, + stride=2, + name="layer4") + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.out_channels = 2048 + + stdv = 1.0 / math.sqrt(self.out_channels * 1.0) + + self.out = Linear( + self.out_channels, + class_num, + weight_attr=ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv), + name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, x): + x = self.stem(x) + x = self.max_pool2d(x) + x = self.layer1(x) + x = self.layer2(x) + + x = self.layer3(x) + + x = self.layer4(x) + x = self.pool2d_avg(x) + x = paddle.reshape(x, shape=[-1, self.out_channels]) + x = self.out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeSt50_fast_1s1x64d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 6, 3], + radix=1, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=32, + avg_down=True, + avd=True, + avd_first=True, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeSt50_fast_1s1x64d"], + use_ssld=use_ssld) + return model + + +def ResNeSt50(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 6, 3], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=32, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt50"], use_ssld=use_ssld) + return model + + +def ResNeSt101(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 4, 23, 3], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=64, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt101"], use_ssld=use_ssld) + return model + + +def ResNeSt200(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 24, 36, 3], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=64, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt200"], use_ssld=use_ssld) + return model + + +def ResNeSt269(pretrained=False, use_ssld=False, **kwargs): + model = ResNeSt( + layers=[3, 30, 48, 8], + radix=2, + groups=1, + bottleneck_width=64, + deep_stem=True, + stem_width=64, + avg_down=True, + avd=True, + avd_first=False, + final_drop=0.0, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeSt269"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnet_vc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnet_vc.py new file mode 100644 index 000000000..ba44a2ce0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnet_vc.py @@ -0,0 +1,311 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1812.01187 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNet50_vc": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vc_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + return y + + +class ResNet_vc(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(ResNet_vc, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_0.w_0"), + bias_attr=ParamAttr(name="fc_0.b_0")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet50_vc(pretrained=False, use_ssld=False, **kwargs): + model = ResNet_vc(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNet50_vc"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext.py new file mode 100644 index 000000000..53f65b631 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext.py @@ -0,0 +1,303 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1611.05431 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNeXt50_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams", + "ResNeXt50_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams", + "ResNeXt101_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams", + "ResNeXt101_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams", + "ResNeXt152_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_32x4d_pretrained.pdparams", + "ResNeXt152_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format="NCHW"): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + data_layout=data_format) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + shortcut=True, + name=None, + data_format="NCHW"): + super(BottleneckBlock, self).__init__() + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a", + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name=name + "_branch2b", + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name=name + "_branch2c", + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=stride, + name=name + "_branch1", + data_format=data_format) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class ResNeXt(nn.Layer): + def __init__(self, + layers=50, + class_num=1000, + cardinality=32, + input_image_channel=3, + data_format="NCHW"): + super(ResNeXt, self).__init__() + + self.layers = layers + self.data_format = data_format + self.input_image_channel = input_image_channel + self.cardinality = cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="res_conv1", + data_format=self.data_format) + self.pool2d_max = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=self.data_format) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + shortcut=shortcut, + name=conv_name, + data_format=self.data_format)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + with paddle.static.amp.fp16_guard(): + return self._forward(inputs) + + def _forward(self, inputs): + if self.data_format == "NHWC": + inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + y = self.conv(inputs) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt50_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt50_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt101_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt101_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt152_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt152_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt152_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt152_64x4d"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext101_wsl.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext101_wsl.py new file mode 100644 index 000000000..a4478e70a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext101_wsl.py @@ -0,0 +1,506 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1805.00932 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNeXt101_32x8d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x8d_wsl_pretrained.pdparams", + "ResNeXt101_32x16d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x16_wsl_pretrained.pdparams", + "ResNeXt101_32x32d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x32d_wsl_pretrained.pdparams", + "ResNeXt101_32x48d_wsl": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x48d_wsl_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + if "downsample" in name: + conv_name = name + ".0" + else: + conv_name = name + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=conv_name + ".weight"), + bias_attr=False) + if "downsample" in name: + bn_name = name[:9] + "downsample.1" + else: + if "conv1" == name: + bn_name = "bn" + name[-1] + else: + bn_name = (name[:10] if name[7:9].isdigit() else name[:9] + ) + "bn" + name[-1] + self._bn = BatchNorm( + num_channels=output_channels, + act=act, + param_attr=ParamAttr(name=bn_name + ".weight"), + bias_attr=ParamAttr(name=bn_name + ".bias"), + moving_mean_name=bn_name + ".running_mean", + moving_variance_name=bn_name + ".running_var") + + def forward(self, inputs): + x = self._conv(inputs) + x = self._bn(x) + return x + + +class ShortCut(nn.Layer): + def __init__(self, input_channels, output_channels, stride, name=None): + super(ShortCut, self).__init__() + + self.input_channels = input_channels + self.output_channels = output_channels + self.stride = stride + if input_channels != output_channels or stride != 1: + self._conv = ConvBNLayer( + input_channels, + output_channels, + filter_size=1, + stride=stride, + name=name) + + def forward(self, inputs): + if self.input_channels != self.output_channels or self.stride != 1: + return self._conv(inputs) + return inputs + + +class BottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels, stride, cardinality, + width, name): + super(BottleneckBlock, self).__init__() + + self._conv0 = ConvBNLayer( + input_channels, + output_channels, + filter_size=1, + act="relu", + name=name + ".conv1") + self._conv1 = ConvBNLayer( + output_channels, + output_channels, + filter_size=3, + act="relu", + stride=stride, + groups=cardinality, + name=name + ".conv2") + self._conv2 = ConvBNLayer( + output_channels, + output_channels // (width // 8), + filter_size=1, + act=None, + name=name + ".conv3") + self._short = ShortCut( + input_channels, + output_channels // (width // 8), + stride=stride, + name=name + ".downsample") + + def forward(self, inputs): + x = self._conv0(inputs) + x = self._conv1(x) + x = self._conv2(x) + y = self._short(inputs) + y = paddle.add(x, y) + y = F.relu(y) + return y + + +class ResNeXt101WSL(nn.Layer): + def __init__(self, layers=101, cardinality=32, width=48, class_num=1000): + super(ResNeXt101WSL, self).__init__() + + self.class_num = class_num + + self.layers = layers + self.cardinality = cardinality + self.width = width + self.scale = width // 8 + + self.depth = [3, 4, 23, 3] + self.base_width = cardinality * width + num_filters = [self.base_width * i + for i in [1, 2, 4, 8]] # [256, 512, 1024, 2048] + self._conv_stem = ConvBNLayer( + 3, 64, 7, stride=2, act="relu", name="conv1") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self._conv1_0 = BottleneckBlock( + 64, + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.0") + self._conv1_1 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.1") + self._conv1_2 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[0], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer1.2") + + self._conv2_0 = BottleneckBlock( + num_filters[0] // (width // 8), + num_filters[1], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer2.0") + self._conv2_1 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.1") + self._conv2_2 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.2") + self._conv2_3 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[1], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer2.3") + + self._conv3_0 = BottleneckBlock( + num_filters[1] // (width // 8), + num_filters[2], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer3.0") + self._conv3_1 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.1") + self._conv3_2 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.2") + self._conv3_3 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.3") + self._conv3_4 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.4") + self._conv3_5 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.5") + self._conv3_6 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.6") + self._conv3_7 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.7") + self._conv3_8 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.8") + self._conv3_9 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.9") + self._conv3_10 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.10") + self._conv3_11 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.11") + self._conv3_12 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.12") + self._conv3_13 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.13") + self._conv3_14 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.14") + self._conv3_15 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.15") + self._conv3_16 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.16") + self._conv3_17 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.17") + self._conv3_18 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.18") + self._conv3_19 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.19") + self._conv3_20 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.20") + self._conv3_21 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.21") + self._conv3_22 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[2], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer3.22") + + self._conv4_0 = BottleneckBlock( + num_filters[2] // (width // 8), + num_filters[3], + stride=2, + cardinality=self.cardinality, + width=self.width, + name="layer4.0") + self._conv4_1 = BottleneckBlock( + num_filters[3] // (width // 8), + num_filters[3], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer4.1") + self._conv4_2 = BottleneckBlock( + num_filters[3] // (width // 8), + num_filters[3], + stride=1, + cardinality=self.cardinality, + width=self.width, + name="layer4.2") + + self._avg_pool = AdaptiveAvgPool2D(1) + self._out = Linear( + num_filters[3] // (width // 8), + class_num, + weight_attr=ParamAttr(name="fc.weight"), + bias_attr=ParamAttr(name="fc.bias")) + + def forward(self, inputs): + x = self._conv_stem(inputs) + x = self._pool(x) + + x = self._conv1_0(x) + x = self._conv1_1(x) + x = self._conv1_2(x) + + x = self._conv2_0(x) + x = self._conv2_1(x) + x = self._conv2_2(x) + x = self._conv2_3(x) + + x = self._conv3_0(x) + x = self._conv3_1(x) + x = self._conv3_2(x) + x = self._conv3_3(x) + x = self._conv3_4(x) + x = self._conv3_5(x) + x = self._conv3_6(x) + x = self._conv3_7(x) + x = self._conv3_8(x) + x = self._conv3_9(x) + x = self._conv3_10(x) + x = self._conv3_11(x) + x = self._conv3_12(x) + x = self._conv3_13(x) + x = self._conv3_14(x) + x = self._conv3_15(x) + x = self._conv3_16(x) + x = self._conv3_17(x) + x = self._conv3_18(x) + x = self._conv3_19(x) + x = self._conv3_20(x) + x = self._conv3_21(x) + x = self._conv3_22(x) + + x = self._conv4_0(x) + x = self._conv4_1(x) + x = self._conv4_2(x) + + x = self._avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._out(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt101_32x8d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=8, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x8d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x16d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=16, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x16d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x32d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x32d_wsl"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_32x48d_wsl(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt101WSL(cardinality=32, width=48, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_32x48d_wsl"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext_vd.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext_vd.py new file mode 100644 index 000000000..b2c7d7777 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/resnext_vd.py @@ -0,0 +1,319 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1611.05431 & https://arxiv.org/abs/1812.01187 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNeXt50_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams", + "ResNeXt50_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams", + "ResNeXt101_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams", + "ResNeXt101_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams", + "ResNeXt152_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams", + "ResNeXt152_vd_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class ResNeXt(nn.Layer): + def __init__(self, layers=50, class_num=1000, cardinality=32): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc_weights"), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNeXt50_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_vd_32x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt50_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ResNeXt50_vd_64x4d"], use_ssld=use_ssld) + return model + + +def ResNeXt101_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt101_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt101_vd_64x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt152_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt152_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def ResNeXt152_vd_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ResNeXt152_vd_64x4d"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rexnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rexnet.py new file mode 100644 index 000000000..098fb2876 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/rexnet.py @@ -0,0 +1,283 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2007.00992 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from math import ceil + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ReXNet_1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams", + "ReXNet_1_3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams", + "ReXNet_1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_pretrained.pdparams", + "ReXNet_2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams", + "ReXNet_3_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def conv_bn_act(out, + in_channels, + channels, + kernel=1, + stride=1, + pad=0, + num_group=1, + active=True, + relu6=False): + out.append( + nn.Conv2D( + in_channels, + channels, + kernel, + stride, + pad, + groups=num_group, + bias_attr=False)) + out.append(nn.BatchNorm2D(channels)) + if active: + out.append(nn.ReLU6() if relu6 else nn.ReLU()) + + +def conv_bn_swish(out, + in_channels, + channels, + kernel=1, + stride=1, + pad=0, + num_group=1): + out.append( + nn.Conv2D( + in_channels, + channels, + kernel, + stride, + pad, + groups=num_group, + bias_attr=False)) + out.append(nn.BatchNorm2D(channels)) + out.append(nn.Swish()) + + +class SE(nn.Layer): + def __init__(self, in_channels, channels, se_ratio=12): + super(SE, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2D(1) + self.fc = nn.Sequential( + nn.Conv2D( + in_channels, channels // se_ratio, kernel_size=1, padding=0), + nn.BatchNorm2D(channels // se_ratio), + nn.ReLU(), + nn.Conv2D( + channels // se_ratio, channels, kernel_size=1, padding=0), + nn.Sigmoid()) + + def forward(self, x): + y = self.avg_pool(x) + y = self.fc(y) + return x * y + + +class LinearBottleneck(nn.Layer): + def __init__(self, + in_channels, + channels, + t, + stride, + use_se=True, + se_ratio=12, + **kwargs): + super(LinearBottleneck, self).__init__(**kwargs) + self.use_shortcut = stride == 1 and in_channels <= channels + self.in_channels = in_channels + self.out_channels = channels + + out = [] + if t != 1: + dw_channels = in_channels * t + conv_bn_swish(out, in_channels=in_channels, channels=dw_channels) + else: + dw_channels = in_channels + + conv_bn_act( + out, + in_channels=dw_channels, + channels=dw_channels, + kernel=3, + stride=stride, + pad=1, + num_group=dw_channels, + active=False) + + if use_se: + out.append(SE(dw_channels, dw_channels, se_ratio)) + + out.append(nn.ReLU6()) + conv_bn_act( + out, + in_channels=dw_channels, + channels=channels, + active=False, + relu6=True) + self.out = nn.Sequential(*out) + + def forward(self, x): + out = self.out(x) + if self.use_shortcut: + out[:, 0:self.in_channels] += x + + return out + + +class ReXNetV1(nn.Layer): + def __init__(self, + input_ch=16, + final_ch=180, + width_mult=1.0, + depth_mult=1.0, + class_num=1000, + use_se=True, + se_ratio=12, + dropout_ratio=0.2, + bn_momentum=0.9): + super(ReXNetV1, self).__init__() + + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + use_ses = [False, False, True, True, True, True] + + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) + for idx, element in enumerate(strides)], []) + if use_se: + use_ses = sum([[element] * layers[idx] + for idx, element in enumerate(use_ses)], []) + else: + use_ses = [False] * sum(layers[:]) + ts = [1] * layers[0] + [6] * sum(layers[1:]) + + self.depth = sum(layers[:]) * 3 + stem_channel = 32 / width_mult if width_mult < 1.0 else 32 + inplanes = input_ch / width_mult if width_mult < 1.0 else input_ch + + features = [] + in_channels_group = [] + channels_group = [] + + # The following channel configuration is a simple instance to make each layer become an expand layer. + for i in range(self.depth // 3): + if i == 0: + in_channels_group.append(int(round(stem_channel * width_mult))) + channels_group.append(int(round(inplanes * width_mult))) + else: + in_channels_group.append(int(round(inplanes * width_mult))) + inplanes += final_ch / (self.depth // 3 * 1.0) + channels_group.append(int(round(inplanes * width_mult))) + + conv_bn_swish( + features, + 3, + int(round(stem_channel * width_mult)), + kernel=3, + stride=2, + pad=1) + + for block_idx, (in_c, c, t, s, se) in enumerate( + zip(in_channels_group, channels_group, ts, strides, use_ses)): + features.append( + LinearBottleneck( + in_channels=in_c, + channels=c, + t=t, + stride=s, + use_se=se, + se_ratio=se_ratio)) + + pen_channels = int(1280 * width_mult) + conv_bn_swish(features, c, pen_channels) + + features.append(nn.AdaptiveAvgPool2D(1)) + self.features = nn.Sequential(*features) + self.output = nn.Sequential( + nn.Dropout(dropout_ratio), + nn.Conv2D( + pen_channels, class_num, 1, bias_attr=True)) + + def forward(self, x): + x = self.features(x) + x = self.output(x).squeeze(axis=-1).squeeze(axis=-1) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ReXNet_1_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_0"], use_ssld=use_ssld) + return model + + +def ReXNet_1_3(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.3, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_3"], use_ssld=use_ssld) + return model + + +def ReXNet_1_5(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_1_5"], use_ssld=use_ssld) + return model + + +def ReXNet_2_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_2_0"], use_ssld=use_ssld) + return model + + +def ReXNet_3_0(pretrained=False, use_ssld=False, **kwargs): + model = ReXNetV1(width_mult=3.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ReXNet_3_0"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnet_vd.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnet_vd.py new file mode 100644 index 000000000..e08399eac --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnet_vd.py @@ -0,0 +1,392 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1812.01187 & https://arxiv.org/abs/1709.01507 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "SE_ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams", + "SE_ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams", + "SE_ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + reduction_ratio=16, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + name=name + "_branch2c") + self.scale = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=reduction_ratio, + name='fc_' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + reduction_ratio=16, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + + self.scale = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=reduction_ratio, + name='fc_' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + scale = self.scale(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = input * excitation + return out + + +class SE_ResNet_vd(nn.Layer): + def __init__(self, layers=50, class_num=1000): + super(SE_ResNet_vd, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=32, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=32, + num_filters=32, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=32, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(bottleneck_block) + shortcut = True + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name)) + self.block_list.append(basic_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=18, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet18_vd"], use_ssld=use_ssld) + return model + + +def SE_ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=34, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet34_vd"], use_ssld=use_ssld) + return model + + +def SE_ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + model = SE_ResNet_vd(layers=50, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNet50_vd"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext.py new file mode 100644 index 000000000..79556356f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext.py @@ -0,0 +1,369 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1611.05431 & https://arxiv.org/abs/1709.01507 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "SE_ResNeXt50_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams", + "SE_ResNeXt101_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams", + "SE_ResNeXt152_64x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt152_64x4d_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None, + data_format='NCHW'): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False, + data_format=data_format) + bn_name = name + '_bn' + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + data_layout=data_format) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + if_first=False, + name=None, + data_format="NCHW"): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1', + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name='conv' + name + '_x2', + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name='conv' + name + '_x3', + data_format=data_format) + self.scale = SELayer( + num_channels=num_filters * 2 if cardinality == 32 else num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + reduction_ratio=reduction_ratio, + name='fc' + name, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=stride, + name='conv' + name + '_prj', + data_format=data_format) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + reduction_ratio, + name=None, + data_format="NCHW"): + super(SELayer, self).__init__() + + self.data_format = data_format + self.pool2d_gap = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + self.sigmoid = nn.Sigmoid() + + def forward(self, input): + pool = self.pool2d_gap(input) + if self.data_format == "NHWC": + pool = paddle.squeeze(pool, axis=[1, 2]) + else: + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = self.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = self.sigmoid(excitation) + if self.data_format == "NHWC": + excitation = paddle.unsqueeze(excitation, axis=[1, 2]) + else: + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = input * excitation + return out + + +class ResNeXt(nn.Layer): + def __init__(self, + layers=50, + class_num=1000, + cardinality=32, + input_image_channel=3, + data_format="NCHW"): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + self.reduction_ratio = 16 + self.data_format = data_format + self.input_image_channel = input_image_channel + + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + if layers < 152: + self.conv = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + name="conv1", + data_format=self.data_format) + else: + self.conv1_1 = ConvBNLayer( + num_channels=self.input_image_channel, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name="conv1", + data_format=self.data_format) + self.conv1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv2", + data_format=self.data_format) + self.conv1_3 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name="conv3", + data_format=self.data_format) + + self.pool2d_max = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=self.data_format) + + self.block_list = [] + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + shortcut = False + for i in range(depth[block]): + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + reduction_ratio=self.reduction_ratio, + shortcut=shortcut, + if_first=block == 0, + name=str(n) + '_' + str(i + 1), + data_format=self.data_format)) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + with paddle.static.amp.fp16_guard(): + return self._forward(inputs) + + def _forward(self, inputs): + if self.data_format == "NHWC": + inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1]) + inputs.stop_gradient = True + if self.layers < 152: + y = self.conv(inputs) + else: + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for i, block in enumerate(self.block_list): + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNeXt50_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SE_ResNeXt50_32x4d"], use_ssld=use_ssld) + return model + + +def SE_ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=101, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt101_32x4d"], + use_ssld=use_ssld) + return model + + +def SE_ResNeXt152_64x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt152_64x4d"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext_vd.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext_vd.py new file mode 100644 index 000000000..659e190c8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/se_resnext_vd.py @@ -0,0 +1,311 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1611.05431 & https://arxiv.org/abs/1812.01187 & https://arxiv.org/abs/1709.01507 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform + +import math + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "SE_ResNeXt50_vd_32x4d": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams", + "SENet154_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = name + '_bn' + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + name='conv' + name + '_x1') + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + groups=cardinality, + stride=stride, + act='relu', + name='conv' + name + '_x2') + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + filter_size=1, + act=None, + name='conv' + name + '_x3') + self.scale = SELayer( + num_channels=num_filters * 2 if cardinality == 32 else num_filters, + num_filters=num_filters * 2 if cardinality == 32 else num_filters, + reduction_ratio=reduction_ratio, + name='fc' + name) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2 + if cardinality == 32 else num_filters, + filter_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name='conv' + name + '_prj') + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + scale = self.scale(conv2) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=scale) + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = Linear( + num_channels, + med_ch, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"), + bias_attr=ParamAttr(name=name + '_sqz_offset')) + self.relu = nn.ReLU() + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = Linear( + med_ch, + num_filters, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"), + bias_attr=ParamAttr(name=name + '_exc_offset')) + self.sigmoid = nn.Sigmoid() + + def forward(self, input): + pool = self.pool2d_gap(input) + pool = paddle.squeeze(pool, axis=[2, 3]) + squeeze = self.squeeze(pool) + squeeze = self.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = self.sigmoid(excitation) + excitation = paddle.unsqueeze(excitation, axis=[2, 3]) + out = paddle.multiply(input, excitation) + return out + + +class ResNeXt(nn.Layer): + def __init__(self, layers=50, class_num=1000, cardinality=32): + super(ResNeXt, self).__init__() + + self.layers = layers + self.cardinality = cardinality + self.reduction_ratio = 16 + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + supported_cardinality = [32, 64] + assert cardinality in supported_cardinality, \ + "supported cardinality is {} but input cardinality is {}" \ + .format(supported_cardinality, cardinality) + if layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [128, 256, 512, 1024] + num_filters = [128, 256, 512, + 1024] if cardinality == 32 else [256, 512, 1024, 2048] + + self.conv1_1 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + + self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) + + self.block_list = [] + n = 1 if layers == 50 or layers == 101 else 3 + for block in range(len(depth)): + n += 1 + shortcut = False + for i in range(depth[block]): + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + num_channels=num_channels[block] if i == 0 else + num_filters[block] * int(64 // self.cardinality), + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=self.cardinality, + reduction_ratio=self.reduction_ratio, + shortcut=shortcut, + if_first=block == 0, + name=str(n) + '_' + str(i + 1))) + self.block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = AdaptiveAvgPool2D(1) + + self.pool2d_avg_channels = num_channels[-1] * 2 + + stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0) + + self.out = Linear( + self.pool2d_avg_channels, + class_num, + weight_attr=ParamAttr( + initializer=Uniform(-stdv, stdv), name="fc6_weights"), + bias_attr=ParamAttr(name="fc6_offset")) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.pool2d_avg(y) + y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels]) + y = self.out(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SE_ResNeXt50_vd_32x4d(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=50, cardinality=32, **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SE_ResNeXt50_vd_32x4d"], + use_ssld=use_ssld) + return model + + +def SENet154_vd(pretrained=False, use_ssld=False, **kwargs): + model = ResNeXt(layers=152, cardinality=64, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SENet154_vd"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/shufflenet_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/shufflenet_v2.py new file mode 100644 index 000000000..e1058bb08 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/shufflenet_v2.py @@ -0,0 +1,364 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1807.11164 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from paddle import ParamAttr, reshape, transpose, concat, split +from paddle.nn import Layer, Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm, Linear +from paddle.nn.initializer import KaimingNormal +from paddle.nn.functional import swish + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ShuffleNetV2_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams", + "ShuffleNetV2_x0_33": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams", + "ShuffleNetV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams", + "ShuffleNetV2_x1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams", + "ShuffleNetV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams", + "ShuffleNetV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams", + "ShuffleNetV2_swish": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def channel_shuffle(x, groups): + batch_size, num_channels, height, width = x.shape[0:4] + channels_per_group = num_channels // groups + + # reshape + x = reshape( + x=x, shape=[batch_size, groups, channels_per_group, height, width]) + + # transpose + x = transpose(x=x, perm=[0, 2, 1, 3, 4]) + + # flatten + x = reshape(x=x, shape=[batch_size, num_channels, height, width]) + return x + + +class ConvBNLayer(Layer): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=ParamAttr( + initializer=KaimingNormal(), name=name + "_weights"), + bias_attr=False) + + self._batch_norm = BatchNorm( + out_channels, + param_attr=ParamAttr(name=name + "_bn_scale"), + bias_attr=ParamAttr(name=name + "_bn_offset"), + act=act, + moving_mean_name=name + "_bn_mean", + moving_variance_name=name + "_bn_variance") + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class InvertedResidual(Layer): + def __init__(self, + in_channels, + out_channels, + stride, + act="relu", + name=None): + super(InvertedResidual, self).__init__() + self._conv_pw = ConvBNLayer( + in_channels=in_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv1') + self._conv_dw = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + act=None, + name='stage_' + name + '_conv2') + self._conv_linear = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv3') + + def forward(self, inputs): + x1, x2 = split( + inputs, + num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2], + axis=1) + x2 = self._conv_pw(x2) + x2 = self._conv_dw(x2) + x2 = self._conv_linear(x2) + out = concat([x1, x2], axis=1) + return channel_shuffle(out, 2) + + +class InvertedResidualDS(Layer): + def __init__(self, + in_channels, + out_channels, + stride, + act="relu", + name=None): + super(InvertedResidualDS, self).__init__() + + # branch1 + self._conv_dw_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + act=None, + name='stage_' + name + '_conv4') + self._conv_linear_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv5') + # branch2 + self._conv_pw_2 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv1') + self._conv_dw_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + act=None, + name='stage_' + name + '_conv2') + self._conv_linear_2 = ConvBNLayer( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + act=act, + name='stage_' + name + '_conv3') + + def forward(self, inputs): + x1 = self._conv_dw_1(inputs) + x1 = self._conv_linear_1(x1) + x2 = self._conv_pw_2(inputs) + x2 = self._conv_dw_2(x2) + x2 = self._conv_linear_2(x2) + out = concat([x1, x2], axis=1) + + return channel_shuffle(out, 2) + + +class ShuffleNet(Layer): + def __init__(self, class_num=1000, scale=1.0, act="relu"): + super(ShuffleNet, self).__init__() + self.scale = scale + self.class_num = class_num + stage_repeats = [4, 8, 4] + + if scale == 0.25: + stage_out_channels = [-1, 24, 24, 48, 96, 512] + elif scale == 0.33: + stage_out_channels = [-1, 24, 32, 64, 128, 512] + elif scale == 0.5: + stage_out_channels = [-1, 24, 48, 96, 192, 1024] + elif scale == 1.0: + stage_out_channels = [-1, 24, 116, 232, 464, 1024] + elif scale == 1.5: + stage_out_channels = [-1, 24, 176, 352, 704, 1024] + elif scale == 2.0: + stage_out_channels = [-1, 24, 244, 488, 976, 2048] + else: + raise NotImplementedError("This scale size:[" + str(scale) + + "] is not implemented!") + # 1. conv1 + self._conv1 = ConvBNLayer( + in_channels=3, + out_channels=stage_out_channels[1], + kernel_size=3, + stride=2, + padding=1, + act=act, + name='stage1_conv') + self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + # 2. bottleneck sequences + self._block_list = [] + for stage_id, num_repeat in enumerate(stage_repeats): + for i in range(num_repeat): + if i == 0: + block = self.add_sublayer( + name=str(stage_id + 2) + '_' + str(i + 1), + sublayer=InvertedResidualDS( + in_channels=stage_out_channels[stage_id + 1], + out_channels=stage_out_channels[stage_id + 2], + stride=2, + act=act, + name=str(stage_id + 2) + '_' + str(i + 1))) + else: + block = self.add_sublayer( + name=str(stage_id + 2) + '_' + str(i + 1), + sublayer=InvertedResidual( + in_channels=stage_out_channels[stage_id + 2], + out_channels=stage_out_channels[stage_id + 2], + stride=1, + act=act, + name=str(stage_id + 2) + '_' + str(i + 1))) + self._block_list.append(block) + # 3. last_conv + self._last_conv = ConvBNLayer( + in_channels=stage_out_channels[-2], + out_channels=stage_out_channels[-1], + kernel_size=1, + stride=1, + padding=0, + act=act, + name='conv5') + # 4. pool + self._pool2d_avg = AdaptiveAvgPool2D(1) + self._out_c = stage_out_channels[-1] + # 5. fc + self._fc = Linear( + stage_out_channels[-1], + class_num, + weight_attr=ParamAttr(name='fc6_weights'), + bias_attr=ParamAttr(name='fc6_offset')) + + def forward(self, inputs): + y = self._conv1(inputs) + y = self._max_pool(y) + for inv in self._block_list: + y = inv(y) + y = self._last_conv(y) + y = self._pool2d_avg(y) + y = paddle.flatten(y, start_axis=1, stop_axis=-1) + y = self._fc(y) + return y + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ShuffleNetV2_x0_25(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.25, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_25"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x0_33(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.33, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_33"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x0_5(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=0.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x0_5"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x1_0(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x1_0"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x1_5(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.5, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x1_5"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_x2_0(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=2.0, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_x2_0"], use_ssld=use_ssld) + return model + + +def ShuffleNetV2_swish(pretrained=False, use_ssld=False, **kwargs): + model = ShuffleNet(scale=1.0, act="swish", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ShuffleNetV2_swish"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/squeezenet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/squeezenet.py new file mode 100644 index 000000000..5edfaa0ad --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/squeezenet.py @@ -0,0 +1,196 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1709.01507 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "SqueezeNet1_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams", + "SqueezeNet1_1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class MakeFireConv(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + padding=0, + name=None): + super(MakeFireConv, self).__init__() + self._conv = Conv2D( + input_channels, + output_channels, + filter_size, + padding=padding, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=ParamAttr(name=name + "_offset")) + + def forward(self, x): + x = self._conv(x) + x = F.relu(x) + return x + + +class MakeFire(nn.Layer): + def __init__(self, + input_channels, + squeeze_channels, + expand1x1_channels, + expand3x3_channels, + name=None): + super(MakeFire, self).__init__() + self._conv = MakeFireConv( + input_channels, squeeze_channels, 1, name=name + "_squeeze1x1") + self._conv_path1 = MakeFireConv( + squeeze_channels, expand1x1_channels, 1, name=name + "_expand1x1") + self._conv_path2 = MakeFireConv( + squeeze_channels, + expand3x3_channels, + 3, + padding=1, + name=name + "_expand3x3") + + def forward(self, inputs): + x = self._conv(inputs) + x1 = self._conv_path1(x) + x2 = self._conv_path2(x) + return paddle.concat([x1, x2], axis=1) + + +class SqueezeNet(nn.Layer): + def __init__(self, version, class_num=1000): + super(SqueezeNet, self).__init__() + self.version = version + + if self.version == "1.0": + self._conv = Conv2D( + 3, + 96, + 7, + stride=2, + weight_attr=ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name="conv1_offset")) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv1 = MakeFire(96, 16, 64, 64, name="fire2") + self._conv2 = MakeFire(128, 16, 64, 64, name="fire3") + self._conv3 = MakeFire(128, 32, 128, 128, name="fire4") + + self._conv4 = MakeFire(256, 32, 128, 128, name="fire5") + self._conv5 = MakeFire(256, 48, 192, 192, name="fire6") + self._conv6 = MakeFire(384, 48, 192, 192, name="fire7") + self._conv7 = MakeFire(384, 64, 256, 256, name="fire8") + + self._conv8 = MakeFire(512, 64, 256, 256, name="fire9") + else: + self._conv = Conv2D( + 3, + 64, + 3, + stride=2, + padding=1, + weight_attr=ParamAttr(name="conv1_weights"), + bias_attr=ParamAttr(name="conv1_offset")) + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) + self._conv1 = MakeFire(64, 16, 64, 64, name="fire2") + self._conv2 = MakeFire(128, 16, 64, 64, name="fire3") + + self._conv3 = MakeFire(128, 32, 128, 128, name="fire4") + self._conv4 = MakeFire(256, 32, 128, 128, name="fire5") + + self._conv5 = MakeFire(256, 48, 192, 192, name="fire6") + self._conv6 = MakeFire(384, 48, 192, 192, name="fire7") + self._conv7 = MakeFire(384, 64, 256, 256, name="fire8") + self._conv8 = MakeFire(512, 64, 256, 256, name="fire9") + + self._drop = Dropout(p=0.5, mode="downscale_in_infer") + self._conv9 = Conv2D( + 512, + class_num, + 1, + weight_attr=ParamAttr(name="conv10_weights"), + bias_attr=ParamAttr(name="conv10_offset")) + self._avg_pool = AdaptiveAvgPool2D(1) + + def forward(self, inputs): + x = self._conv(inputs) + x = F.relu(x) + x = self._pool(x) + if self.version == "1.0": + x = self._conv1(x) + x = self._conv2(x) + x = self._conv3(x) + x = self._pool(x) + x = self._conv4(x) + x = self._conv5(x) + x = self._conv6(x) + x = self._conv7(x) + x = self._pool(x) + x = self._conv8(x) + else: + x = self._conv1(x) + x = self._conv2(x) + x = self._pool(x) + x = self._conv3(x) + x = self._conv4(x) + x = self._pool(x) + x = self._conv5(x) + x = self._conv6(x) + x = self._conv7(x) + x = self._conv8(x) + x = self._drop(x) + x = self._conv9(x) + x = F.relu(x) + x = self._avg_pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SqueezeNet1_0(pretrained=False, use_ssld=False, **kwargs): + model = SqueezeNet(version="1.0", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SqueezeNet1_0"], use_ssld=use_ssld) + return model + + +def SqueezeNet1_1(pretrained=False, use_ssld=False, **kwargs): + model = SqueezeNet(version="1.1", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["SqueezeNet1_1"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/starnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/starnet.py new file mode 100644 index 000000000..4832be783 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/starnet.py @@ -0,0 +1,197 @@ +# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2403.19967 + +import paddle +import paddle.nn as nn + +from ....utils.save_load import load_dygraph_pretrain +from ..model_zoo.vision_transformer import DropPath + +MODEL_URLS = { + "StarNet_S1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/StarNet_S1_pretrained.pdparams", + "StarNet_S2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/StarNet_S2_pretrained.pdparams", + "StarNet_S3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/StarNet_S3_pretrained.pdparams", + "StarNet_S4": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/StarNet_S4_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() + +NET_CONFIG = { + "StarNet_S1": [24, [2, 2, 8, 3]], + "StarNet_S2": [32, [1, 2, 6, 2]], + "StarNet_S3": [32, [2, 2, 8, 4]], + "StarNet_S4": [32, [3, 3, 12, 5]], +} + + +class ConvBN(nn.Sequential): + def __init__(self, + in_planes, + out_planes, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + with_bn=True): + super().__init__() + self.add_sublayer( + name='conv', + sublayer=nn.Conv2D( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups)) + if with_bn: + self.add_sublayer( + name='bn', sublayer=nn.BatchNorm2D(num_features=out_planes)) + init_Constant = nn.initializer.Constant(value=1) + init_Constant(self.bn.weight) + init_Constant = nn.initializer.Constant(value=0) + init_Constant(self.bn.bias) + + +class Block(nn.Layer): + def __init__(self, dim, mlp_ratio=3, drop_path=0.0): + super().__init__() + self.dwconv = ConvBN( + dim, dim, 7, 1, (7 - 1) // 2, groups=dim, with_bn=True) + self.f1 = ConvBN(dim, mlp_ratio * dim, 1, with_bn=False) + self.f2 = ConvBN(dim, mlp_ratio * dim, 1, with_bn=False) + self.g = ConvBN(mlp_ratio * dim, dim, 1, with_bn=True) + self.dwconv2 = ConvBN( + dim, dim, 7, 1, (7 - 1) // 2, groups=dim, with_bn=False) + self.act = nn.ReLU6() + self.drop_path = (DropPath(drop_path) + if drop_path > 0. else nn.Identity()) + + def forward(self, x): + input = x + x = self.dwconv(x) + x1, x2 = self.f1(x), self.f2(x) + x = self.act(x1) * x2 + x = self.dwconv2(self.g(x)) + x = input + self.drop_path(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError("pretrained type is not available. ") + + +class StarNet(nn.Layer): + """ + StarNet: StarNet for Image Classification + Args: + base_dim: int, base dimension of the model, default 32. + depths: list, number of blocks in each stage, default [3, 3, 12, 5]. + mlp_ratio: int, ratio of hidden dim to mlp_dim, default 4. + drop_path_rate: float, default 0.0, stochastic depth rate. + class_num: int, default 1000, number of classes. + """ + def __init__(self, + base_dim=32, + depths=[3, 3, 12, 5], + mlp_ratio=4, + drop_path_rate=0.0, + class_num=1000, + **kwargs): + super().__init__() + self.class_num = class_num + self.in_channel = 32 + self.stem = nn.Sequential( + ConvBN( + 3, self.in_channel, kernel_size=3, stride=2, padding=1), + nn.ReLU6()) + dpr = [ + x.item() + for x in paddle.linspace( + start=0, stop=drop_path_rate, num=sum(depths)) + ] + self.stages = nn.LayerList() + cur = 0 + for i_layer in range(len(depths)): + embed_dim = base_dim * 2**i_layer + down_sampler = ConvBN(self.in_channel, embed_dim, 3, 2, 1) + self.in_channel = embed_dim + blocks = [ + Block(self.in_channel, mlp_ratio, dpr[cur + i]) + for i in range(depths[i_layer]) + ] + cur += depths[i_layer] + self.stages.append(nn.Sequential(down_sampler, *blocks)) + self.norm = nn.BatchNorm2D(num_features=self.in_channel) + self.avgpool = nn.AdaptiveAvgPool2D(output_size=1) + self.head = nn.Linear( + in_features=self.in_channel, out_features=class_num) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear or nn.Conv2D): + pass + if isinstance(m, nn.Linear) and m.bias is not None: + init_Constant = nn.initializer.Constant(value=0) + init_Constant(m.bias) + elif isinstance(m, nn.LayerNorm or nn.BatchNorm2D): + init_Constant = nn.initializer.Constant(value=0) + init_Constant(m.bias) + init_Constant = nn.initializer.Constant(value=1.0) + init_Constant(m.weight) + + def forward(self, x): + x = self.stem(x) + for stage in self.stages: + x = stage(x) + x = paddle.flatten(x=self.avgpool(self.norm(x)), start_axis=1) + return self.head(x) + + +def StarNet_S1(pretrained=False, use_ssld=False, **kwargs): + model = StarNet(*NET_CONFIG["StarNet_S1"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["StarNet_S1"], use_ssld) + return model + + +def StarNet_S2(pretrained=False, use_ssld=False, **kwargs): + model = StarNet(*NET_CONFIG["StarNet_S2"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["StarNet_S2"], use_ssld) + return model + + +def StarNet_S3(pretrained=False, use_ssld=False, **kwargs): + model = StarNet(*NET_CONFIG["StarNet_S3"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["StarNet_S3"], use_ssld) + return model + + +def StarNet_S4(pretrained=False, use_ssld=False, **kwargs): + model = StarNet(*NET_CONFIG["StarNet_S4"], **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["StarNet_S4"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/svtrnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/svtrnet.py new file mode 100644 index 000000000..1ee4ab9fc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/svtrnet.py @@ -0,0 +1,699 @@ +from paddle import ParamAttr +from paddle.nn.initializer import KaimingNormal +import numpy as np +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant, Normal +from paddle.nn import functional as F + +trunc_normal_ = TruncatedNormal(std=.02) +normal_ = Normal +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + +def resize_pos_embed(pos_embed, + src_shape, + dst_shape, + mode='bicubic', + num_extra_tokens=1): + """Resize pos_embed weights. + + Args: + pos_embed (paddle.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (tuple): The resolution of downsampled new training + image, in format (H, W). + mode (str): Algorithm used for upsampling. Choose one from 'nearest', + 'linear', 'bilinear', 'bicubic' and 'trilinear'. + Defaults to 'bicubic'. + num_extra_tokens (int): The number of extra tokens, such as cls_token. + Defaults to 1. + + Returns: + paddle.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]: + return pos_embed + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_h, src_w = src_shape + assert L == src_h * src_w + num_extra_tokens, \ + f"The length of `pos_embed` ({L}) doesn't match the expected " \ + f'shape ({src_h}*{src_w}+{num_extra_tokens}). Please check the' \ + '`img_size` argument.' + extra_tokens = pos_embed[:, :num_extra_tokens] + + src_weight = pos_embed[:, num_extra_tokens:] + src_weight = src_weight.reshape([-1, src_h, src_w, C]).transpose( + [0, 3, 1, 2]) + + # The cubic interpolate algorithm only accepts float32 + dst_weight = F.interpolate( + paddle.cast(src_weight, paddle.float32), + size=dst_shape, + align_corners=False, + mode=mode) + dst_weight = paddle.flatten(dst_weight, 2).transpose([0, 2, 1]) + dst_weight = paddle.cast(dst_weight, src_weight.dtype) + + return paddle.concat((extra_tokens, dst_weight), axis=1) + +def pading_for_not_divisible(pixel_values, + height, + width, + patch_size, + format="NCHW", + function="split"): + if isinstance(patch_size, int): + patch_size = (patch_size, patch_size) + if height % patch_size[0] == 0 and width % patch_size[1] == 0: + return pixel_values, (0, 0, 0, 0, 0, 0, 0, 0) + if function == "split": + pading_width = patch_size[1] - width % patch_size[1] + pading_height = patch_size[0] - height % patch_size[0] + elif function == "merge": + pading_width = width % 2 + pading_height = height % 2 + if format == "NCHW": + pad_index = [0, 0, 0, 0, 0, pading_height, 0, pading_width] + elif format == "NHWC": + pad_index = [0, 0, 0, pading_height, 0, pading_width, 0, 0] + else: + assert ("vaild format") + + return F.pad(pixel_values, pad_index), pad_index + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class ConvBNLayer(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act=nn.GELU): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform()), + bias_attr=bias_attr) + self.norm = nn.BatchNorm2D(out_channels) + self.act = act() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMixer(nn.Layer): + def __init__( + self, + dim, + num_heads=8, + HW=[8, 25], + local_k=[3, 3], ): + super().__init__() + self.HW = HW + self.dim = dim + self.local_mixer = nn.Conv2D( + dim, + dim, + local_k, + 1, [local_k[0] // 2, local_k[1] // 2], + groups=num_heads, + weight_attr=ParamAttr(initializer=KaimingNormal())) + + def forward(self, x, input_dimension): + h, w = input_dimension + x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) + x = self.local_mixer(x) + x = x.flatten(2).transpose([0, 2, 1]) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + mixer='Global', + HW=None, + local_k=[7, 11], + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.dim = dim + self.head_dim = dim // num_heads + self.scale = qk_scale or self.head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.HW = HW + self.local_k = local_k + self.mixer = mixer + def get_mask(self,input_dimension): + if self.HW is not None: + H = input_dimension[0] + W = input_dimension[1] + self.N = H * W + self.C = self.dim + if self.mixer == 'Local' and self.HW is not None: + hk = self.local_k[0] + wk = self.local_k[1] + mask = paddle.ones( + [H * W, H + hk - 1, W + wk - 1], dtype='float32') + for h in range(0, H): + for w in range(0, W): + mask[h * W + w, h:h + hk, w:w + wk] = 0. + mask_paddle = mask[:, hk // 2:H + hk // 2, wk // 2:W + wk // + 2].flatten(1) + mask_inf = paddle.full([H * W, H * W], '-inf', dtype='float32') + mask = paddle.where(mask_paddle < 1, mask_paddle, mask_inf) + return mask + return None + def forward(self, x, input_dimension): + qkv = self.qkv(x).reshape( + (0, -1, 3, self.num_heads, self.head_dim)).transpose( + (2, 0, 3, 1, 4)) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) + if self.mixer == 'Local': + attn += self.get_mask(input_dimension) + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((0, -1, self.dim)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mixer='Global', + local_mixer=[7, 11], + HW=None, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-6, + prenorm=True): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, epsilon=epsilon) + else: + self.norm1 = norm_layer(dim) + if mixer == 'Global' or mixer == 'Local': + self.mixer = Attention( + dim, + num_heads=num_heads, + mixer=mixer, + HW=HW, + local_k=local_mixer, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + elif mixer == 'Conv': + self.mixer = ConvMixer( + dim, num_heads=num_heads, HW=HW, local_k=local_mixer) + else: + raise TypeError("The mixer must be one of [Global, Local, Conv]") + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, epsilon=epsilon) + else: + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp_ratio = mlp_ratio + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.prenorm = prenorm + + def forward(self, x, input_dimension): + if self.prenorm: + x = self.norm1(x + self.drop_path(self.mixer(x,input_dimension))) + x = self.norm2(x + self.drop_path(self.mlp(x))) + else: + x = x + self.drop_path(self.mixer(self.norm1(x),input_dimension)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=[32, 100], + in_channels=3, + embed_dim=768, + sub_num=2, + patch_size=[4, 4], + mode='pope'): + super().__init__() + num_patches = (img_size[1] // (2 ** sub_num)) * \ + (img_size[0] // (2 ** sub_num)) + self.img_size = img_size + self.num_patches = num_patches + self.embed_dim = embed_dim + self.patch_size = patch_size + self.window_size = ((img_size[0] // (2 ** sub_num), (img_size[1] // (2 ** sub_num)))) + self.norm = None + if mode == 'pope': + if sub_num == 2: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None)) + if sub_num == 3: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 4, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 4, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=None)) + elif mode == 'linear': + self.proj = nn.Conv2D( + 1, embed_dim, kernel_size=patch_size, stride=patch_size) + self.num_patches = img_size[0] // patch_size[0] * img_size[ + 1] // patch_size[1] + + def forward(self, x): + B, C, H, W = x.shape + + x, _ = pading_for_not_divisible(x, H, W, self.patch_size, "BCHW") + x = self.proj(x) + _, _, height, width = x.shape + output_dimensions = (height, width) + x = x.flatten(2).transpose((0, 2, 1)) + return x, output_dimensions + + +class SubSample(nn.Layer): + def __init__(self, + in_channels, + out_channels, + types='Pool', + stride=[2, 1], + sub_norm='nn.LayerNorm', + act=None): + super().__init__() + self.types = types + if types == 'Pool': + self.avgpool = nn.AvgPool2D( + kernel_size=[3, 5], stride=stride, padding=[1, 2]) + self.maxpool = nn.MaxPool2D( + kernel_size=[3, 5], stride=stride, padding=[1, 2]) + self.proj = nn.Linear(in_channels, out_channels) + else: + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + weight_attr=ParamAttr(initializer=KaimingNormal())) + self.norm = eval(sub_norm)(out_channels) + if act is not None: + self.act = act() + else: + self.act = None + + def forward(self, x): + + if self.types == 'Pool': + x1 = self.avgpool(x) + x2 = self.maxpool(x) + x = (x1 + x2) * 0.5 + output_dimension = (x.shape[2],x.shape[3]) + out = self.proj(x.flatten(2).transpose((0, 2, 1))) + else: + x = self.conv(x) + output_dimension = (x.shape[2],x.shape[3]) + out = x.flatten(2).transpose((0, 2, 1)) + out = self.norm(out) + if self.act is not None: + out = self.act(out) + + return out, output_dimension + + +class SVTRNet(nn.Layer): + def __init__( + self, + class_num=1000, + img_size=[48, 320], + in_channels=3, + embed_dim=[192, 256, 512], + depth=[6, 6, 9], + num_heads=[6, 8, 16], + mixer=['Conv'] * 9 + ['Global'] * + 12, # Local atten, Global atten, Conv + local_mixer=[[5, 5], [5, 5], [5, 5]], + patch_merging='Conv', # Conv, Pool, None + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + last_drop=0.1, + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer='nn.LayerNorm', + sub_norm='nn.LayerNorm', + epsilon=1e-6, + out_channels=512, + out_char_num=40, + block_unit='Block', + act='nn.GELU', + last_stage=False, + sub_num=2, + prenorm=True, + use_lenhead=False, + **kwargs): + super().__init__() + self.img_size = img_size + self.embed_dim = embed_dim + self.out_channels = out_channels + self.prenorm = prenorm + patch_merging = None if patch_merging != 'Conv' and patch_merging != 'Pool' else patch_merging + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim[0], + sub_num=sub_num) + num_patches = self.patch_embed.num_patches + self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] + self.pos_embed = self.create_parameter( + shape=[1, num_patches, embed_dim[0]], default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + self.pos_drop = nn.Dropout(p=drop_rate) + Block_unit = eval(block_unit) + + dpr = np.linspace(0, drop_path_rate, sum(depth)) + self.blocks1 = nn.LayerList([ + Block_unit( + dim=embed_dim[0], + num_heads=num_heads[0], + mixer=mixer[0:depth[0]][i], + HW=self.HW, + local_mixer=local_mixer[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[0:depth[0]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[0]) + ]) + if patch_merging is not None: + self.sub_sample1 = SubSample( + embed_dim[0], + embed_dim[1], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 2, self.HW[1]] + else: + HW = self.HW + self.patch_merging = patch_merging + self.blocks2 = nn.LayerList([ + Block_unit( + dim=embed_dim[1], + num_heads=num_heads[1], + mixer=mixer[depth[0]:depth[0] + depth[1]][i], + HW=HW, + local_mixer=local_mixer[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0]:depth[0] + depth[1]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[1]) + ]) + if patch_merging is not None: + self.sub_sample2 = SubSample( + embed_dim[1], + embed_dim[2], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 4, self.HW[1]] + else: + HW = self.HW + self.blocks3 = nn.LayerList([ + Block_unit( + dim=embed_dim[2], + num_heads=num_heads[2], + mixer=mixer[depth[0] + depth[1]:][i], + HW=HW, + local_mixer=local_mixer[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0] + depth[1]:][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[2]) + ]) + self.flatten = nn.Flatten(start_axis=0, stop_axis=1) + self.fc = nn.Linear(embed_dim[2], class_num) + + self.last_stage = last_stage + if last_stage: + self.avg_pool = nn.AdaptiveAvgPool2D([1, out_char_num]) + self.last_conv = nn.Conv2D( + in_channels=embed_dim[2], + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.hardswish = nn.Hardswish() + self.dropout = nn.Dropout(p=last_drop, mode="downscale_in_infer") + if not prenorm: + self.norm = eval(norm_layer)(embed_dim[-1], epsilon=epsilon) + self.use_lenhead = use_lenhead + if use_lenhead: + self.len_conv = nn.Linear(embed_dim[2], self.out_channels) + self.hardswish_len = nn.Hardswish() + self.dropout_len = nn.Dropout( + p=last_drop, mode="downscale_in_infer") + + trunc_normal_(self.pos_embed) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x,output_dimensions = self.patch_embed(x) + x = x + resize_pos_embed(self.pos_embed,self.patch_embed.window_size,output_dimensions,num_extra_tokens=0) + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x, output_dimensions) + if self.patch_merging is not None: + x, output_dimensions = self.sub_sample1( + x.transpose([0, 2, 1]).reshape( + [0, self.embed_dim[0], output_dimensions[0], output_dimensions[1]])) + for blk in self.blocks2: + x = blk(x, output_dimensions) + if self.patch_merging is not None: + x, output_dimensions = self.sub_sample2( + x.transpose([0, 2, 1]).reshape( + [0, self.embed_dim[1], output_dimensions[0], output_dimensions[1]])) + for blk in self.blocks3: + x = blk(x, output_dimensions) + if not self.prenorm: + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.mean(1) + x = self.fc(x) + return x + + +def SVTR_tiny(pretrained=False, use_ssld=False, **kwargs): + model = SVTRNet( + img_size=[48, 320], + embed_dim=[64, 128, 256], + depth=[3, 6, 3], + num_heads=[2, 4, 8], + mixer=['Conv'] * 6 + ['Global'] * 6, + local_mixer=[[5, 5], [5, 5], [5, 5]], + mlp_ratio=4, + qkv_bias=True, + out_channels=256, + out_char_num=40, + epsilon=1e-6, + **kwargs) + return model + + +def SVTR_base(pretrained=False, use_ssld=False, **kwargs): + model = SVTRNet( + img_size=[48, 320], + embed_dim=[128, 256, 384], + depth=[6, 6, 6], + num_heads=[4, 8, 12], + mixer=['Conv'] * 9 + ['Global'] * 12, + local_mixer=[[5, 5], [5, 5], [5, 5]], + mlp_ratio=4, + qkv_bias=True, + out_channels=384, + out_char_num=40, + epsilon=1e-6, + **kwargs) + return model + + +def SVTR_large(pretrained=False, use_ssld=False, **kwargs): + model = SVTRNet( + img_size=[48, 320], + embed_dim=[192, 256, 512], + depth=[6, 6, 9], + num_heads=[6, 8, 16], + mixer=['Conv'] * 9 + ['Global'] * 12, + local_mixer=[[5, 5], [5, 5], [5, 5]], + mlp_ratio=4, + qkv_bias=True, + out_channels=512, + out_char_num=40, + epsilon=1e-6, + **kwargs) + return model \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/swin_transformer_v2.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/swin_transformer_v2.py new file mode 100644 index 000000000..d2f89446a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/swin_transformer_v2.py @@ -0,0 +1,1061 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/microsoft/Swin-Transformer +# reference: https://arxiv.org/abs/2111.09883 + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant, Normal +import numpy as np +import math + +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity +from ..base.theseus_layer import TheseusLayer +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "SwinTransformerV2_tiny_patch4_window8_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_tiny_patch4_window8_256_pretrained.pdparams", + "SwinTransformerV2_tiny_patch4_window16_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_tiny_patch4_window16_256_pretrained.pdparams", + "SwinTransformerV2_small_patch4_window8_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_small_patch4_window8_256_pretrained.pdparams", + "SwinTransformerV2_small_patch4_window16_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_small_patch4_window16_256_pretrained.pdparams", + "SwinTransformerV2_base_patch4_window8_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_base_patch4_window8_256_pretrained.pdparams", + "SwinTransformerV2_base_patch4_window16_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_base_patch4_window16_256_pretrained.pdparams", + "SwinTransformerV2_base_patch4_window24_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_base_patch4_window24_384_pretrained.pdparams", + "SwinTransformerV2_large_patch4_window16_256": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_large_patch4_window16_256_pretrained.pdparams", + "SwinTransformerV2_large_patch4_window24_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformerV2_large_patch4_window24_384_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def masked_fill(x, mask, value): + y = paddle.full(x.shape, value, x.dtype) + return paddle.where(mask, y, x) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def masked_fill(x, mask, value): + y = paddle.full(x.shape, value, x.dtype) + return paddle.where(mask, y, x) + + +def pading_for_not_divisible(pixel_values, + height, + width, + patch_size, + format="BCHW", + function="split"): + if isinstance(patch_size, int): + patch_size = (patch_size, patch_size) + if function == "split": + pading_width = patch_size[1] - width % patch_size[1] + pading_height = patch_size[0] - height % patch_size[0] + elif function == "merge": + pading_width = width % 2 + pading_height = height % 2 + if format == "BCHW": + pad_index = (0, 0, 0, 0, 0, pading_height, 0, pading_width) + elif format == "BHWC": + pad_index = (0, 0, 0, pading_height, 0, pading_width, 0, 0) + else: + assert ("vaild format") + + return F.pad(pixel_values, pad_index), pad_index + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.reshape( + [B, H // window_size, window_size, W // window_size, window_size, C]) + windows = x.transpose(perm=[0, 1, 3, 2, 4, 5]).reshape( + [-1, window_size, window_size, C]) + return windows + + +def pad_patch(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.reshape( + [B, H // window_size, window_size, W // window_size, window_size, C]) + windows = x.transpose(perm=[0, 1, 3, 2, 4, 5]).reshape( + [-1, window_size, window_size, C]) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + C = windows.shape[-1] + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.reshape( + [-1, H // window_size, W // window_size, window_size, window_size, C]) + x = x.transpose(perm=[0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C]) + return x + + +class WindowAttention(nn.Layer): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + pretrained_window_size (tuple[int]): The height and width of the window in pre-training. + """ + + def __init__(self, + dim, + window_size, + num_heads, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + pretrained_window_size=[0, 0]): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = pretrained_window_size + self.num_heads = num_heads + + self.logit_scale = self.create_parameter( + [num_heads, 1, 1], + dtype='float32', + default_initializer=Constant(math.log(10.))) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear( + 2, 512, bias_attr=True), + nn.ReLU(), + nn.Linear( + 512, num_heads, bias_attr=False)) + + # get relative_coords_table + relative_coords_h = paddle.arange( + -(self.window_size[0] - 1), self.window_size[0], dtype='float32') + relative_coords_w = paddle.arange( + -(self.window_size[1] - 1), self.window_size[1], dtype='float32') + relative_coords_table = paddle.stack( + paddle.meshgrid([relative_coords_h, relative_coords_w])).transpose( + perm=[1, 2, 0]).unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= ( + pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= ( + pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = paddle.sign( + relative_coords_table) * paddle.log2( + paddle.abs(relative_coords_table) + 1.0) / np.log2(8) + + self.register_buffer("relative_coords_table", relative_coords_table) + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(self.window_size[0]) + coords_w = paddle.arange(self.window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, + None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.transpose( + perm=[1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", + relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=False) + if qkv_bias: + self.q_bias = self.create_parameter( + [dim], dtype='float32', default_initializer=zeros_) + self.v_bias = self.create_parameter( + [dim], dtype='float32', default_initializer=zeros_) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(axis=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = paddle.concat( + x=[self.q_bias, paddle.zeros_like(self.v_bias), self.v_bias]) + qkv = F.linear(x=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(shape=[ + B_, N, 3, self.num_heads, qkv.shape[-1] // (3 * self.num_heads) + ]).transpose(perm=[2, 0, 3, 1, 4]) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make paddlescript happy (cannot use tensor as tuple) + + # cosine attention + attn = (F.normalize( + q, axis=-1) @F.normalize( + k, axis=-1).transpose(perm=[0, 1, 3, 2])) + logit_scale = paddle.clip( + self.logit_scale, max=math.log(1. / 0.01)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp( + self.relative_coords_table).reshape([-1, self.num_heads]) + relative_position_bias = relative_position_bias_table[ + self.relative_position_index.reshape([-1])].reshape([ + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], -1 + ]) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.transpose( + perm=[2, 0, 1]) # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * F.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N + ]) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.reshape([-1, self.num_heads, N, N]) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @v).transpose(perm=[0, 2, 1, 3]).reshape(shape=[B_, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self): + return f'dim={self.dim}, window_size={self.window_size}, ' \ + f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + flops += N * self.dim * 3 * self.dim + flops += self.num_heads * N * (self.dim // self.num_heads) * N + flops += self.num_heads * N * N * (self.dim // self.num_heads) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Layer): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + pretrained_window_size (int): Window size in pre-training. + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + window_size=8, + shift_size=0, + mlp_ratio=4., + qkv_bias=True, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + pretrained_window_size=0): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + pretrained_window_size=to_2tuple(pretrained_window_size)) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + """ + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = paddle.zeros([1, H, W, 1]) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.reshape( + shape=[-1, self.window_size * self.window_size]) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = masked_fill(attn_mask, attn_mask != 0, float(-100.0)) + attn_mask = masked_fill(attn_mask, attn_mask == 0, float(0.0)) + else: + """ + H, W = self.input_resolution + attn_mask = paddle.zeros([1, H, W, 1]) + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + + x = x.reshape([B, H, W, C]) + + # cyclic shift + if self.shift_size > 0: + shifted_x = paddle.roll( + x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.reshape( + [-1, self.window_size * self.window_size, + C]) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_mask = self.get_attn_mask(height_pad, width_pad, x.dtype) + attn_windows = self.attn( + x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.reshape( + [-1, self.window_size, self.window_size, C]) + shifted_x = window_reverse(attn_windows, self.window_size, height_pad, + width_pad) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = paddle.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + axis=(1, 2)) + else: + x = shifted_x + + was_padded = pad_values[3] > 0 or pad_values[5] > 0 + if was_padded: + x = x[:, :H, :W, :] + + x = x.reshape([B, H * W, C]) + x = shortcut + self.drop_path(self.norm1(x)) + + # FFN + x = x + self.drop_path(self.norm2(self.mlp(x))) + return x + + def extra_repr(self): + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Layer): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) + self.norm = norm_layer(2 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = input_dimensions + B, L, C = x.shape + + x = x.reshape([B, H // 2, 2, W // 2, 2, C]) + x = x.transpose((0, 1, 3, 4, 2, 5)) + x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C + x = self.reduction(x) + x = self.norm(x) + return x + + def extra_repr(self): + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + flops += H * W * self.dim // 2 + return flops + + +class BasicLayer(nn.Layer): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + pretrained_window_size (int): Local window size in pre-training. + """ + + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + qkv_bias=True, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + pretrained_window_size=0): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + + # build blocks + self.blocks = nn.LayerList([ + SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + pretrained_window_size=pretrained_window_size) + for i in range(depth) + ]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, input_dimensions): + H, W = input_dimensions + for blk in self.blocks: + x = blk(x, input_dimensions) + if self.downsample is not None: + H, W = (H + 1) // 2, (W + 1) // 2 + x = self.downsample(x, input_dimensions) + + return x, (H, W) + + def extra_repr(self): + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Layer): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 256. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, + img_size=256, + patch_size=4, + in_chans=3, + embed_dim=96, + norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def maybe_pad(self, pixel_values, height, width): + if width % self.patch_size[1] != 0: + pad_values = (0, 0, 0, 0, 0, 0, 0, + self.patch_size[1] - width % self.patch_size[1]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + if height % self.patch_size[0] != 0: + pad_values = ( + 0, + 0, + 0, + 0, + 0, + self.patch_size[0] - height % self.patch_size[0], + 0, + 0, ) + pixel_values = nn.functional.pad(pixel_values, pad_values) + return pixel_values + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose([0, 2, 1]) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x, output_dimensions + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * ( + self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformerV2(nn.Layer): + r""" Swin TransformerV2 + A PaddlePaddle impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - + https://arxiv.org/abs/2111.09883 + + Args: + img_size (int | tuple(int)): Input image size. Default 256 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + class_num (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + pretrained_window_sizes (tuple(int)): Pretrained window sizes of each layer. + """ + + def __init__(self, + img_size=256, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + pretrained_window_sizes=[0, 0, 0, 0], + **kwargs): + super().__init__() + + self.class_num = class_num + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.img_size = img_size + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2**(self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = self.create_parameter( + shape=(1, num_patches, embed_dim), default_initializer=zeros_) + trunc_normal_(self.absolute_pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.LayerList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + input_resolution=(patches_resolution[0] // (2**i_layer), + patches_resolution[1] // (2**i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging + if (i_layer < self.num_layers - 1) else None, + pretrained_window_size=pretrained_window_sizes[i_layer]) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1D(1) + self.head = nn.Linear(self.num_features, + class_num) if class_num > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x, output_dimensions = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x, output_dimensions = layer(x, input_dimensions=output_dimensions) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose([0, 2, 1])) # B C 1 + x = paddle.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[ + 0] * self.patches_resolution[1] // (2**self.num_layers) + flops += self.num_features * self.class_num + return flops + + +def _load_pretrained(pretrained, + model, + model_url, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain( + model, + model_url, + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained, **kwargs) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SwinTransformerV2_tiny_patch4_window8_256(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=8, + drop_path_rate=0.2, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_tiny_patch4_window8_256"], + use_ssld=use_ssld) + return model + + +def SwinTransformerV2_tiny_patch4_window16_256(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=16, + drop_path_rate=0.2, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_tiny_patch4_window16_256"], + use_ssld=use_ssld) + return model + + +def SwinTransformerV2_small_patch4_window8_256(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=8, + drop_path_rate=0.3, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_small_patch4_window8_256"], + use_ssld=use_ssld) + return model + + +def SwinTransformerV2_small_patch4_window16_256(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=16, + drop_path_rate=0.3, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_small_patch4_window16_256"], + use_ssld=use_ssld) + return model + + +def SwinTransformerV2_base_patch4_window8_256(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=8, + drop_path_rate=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_base_patch4_window8_256"], + use_ssld=use_ssld) + return model + + +def SwinTransformerV2_base_patch4_window16_256( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=16, + drop_path_rate=0.5, # if use imagenet22k or imagenet22kto1k, drop_path_rate=0.2 + **kwargs + ) # if use imagenet22k, set pretrained_window_sizes=[12, 12, 12, 6] + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_base_patch4_window16_256"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformerV2_base_patch4_window24_384( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=True, + **kwargs): + model = SwinTransformerV2( + img_size=384, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=24, + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6], + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_base_patch4_window24_384"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformerV2_large_patch4_window16_256( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=True, + **kwargs): + model = SwinTransformerV2( + img_size=256, + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=16, + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6], + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_large_patch4_window16_256"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model + + +def SwinTransformerV2_large_patch4_window24_384( + pretrained=False, + use_ssld=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=True, + **kwargs): + model = SwinTransformerV2( + img_size=384, + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=24, + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6], + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformerV2_large_patch4_window24_384"], + use_ssld=use_ssld, + use_imagenet22k_pretrained=use_imagenet22k_pretrained, + use_imagenet22kto1k_pretrained=use_imagenet22kto1k_pretrained) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tinynet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tinynet.py new file mode 100644 index 000000000..484004615 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tinynet.py @@ -0,0 +1,196 @@ +# copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://gitee.com/mindspore/models/tree/master/research/cv/tinynet +# reference: https://arxiv.org/abs/2010.14819 + +import paddle.nn as nn + +from .efficientnet import EfficientNet, efficientnet +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "TinyNet_A": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TinyNet_A_pretrained.pdparams", + "TinyNet_B": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TinyNet_B_pretrained.pdparams", + "TinyNet_C": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TinyNet_C_pretrained.pdparams", + "TinyNet_D": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TinyNet_D_pretrained.pdparams", + "TinyNet_E": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TinyNet_E_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +def tinynet_params(model_name): + """ Map TinyNet model name to parameter coefficients. """ + params_dict = { + # Coefficients: width,depth,resolution,dropout + "tinynet-a": (1.00, 1.200, 192, 0.2), + "tinynet-b": (0.75, 1.100, 188, 0.2), + "tinynet-c": (0.54, 0.850, 184, 0.2), + "tinynet-d": (0.54, 0.695, 152, 0.2), + "tinynet-e": (0.51, 0.600, 106, 0.2), + } + return params_dict[model_name] + + +def get_model_params(model_name, override_params): + """ Get the block args and global params for a given model """ + if model_name.startswith('tinynet'): + w, d, _, p = tinynet_params(model_name) + blocks_args, global_params = efficientnet( + width_coefficient=w, depth_coefficient=d, dropout_rate=p) + else: + raise NotImplementedError('model name is not pre-defined: %s' % + model_name) + if override_params: + global_params = global_params._replace(**override_params) + return blocks_args, global_params + + +class TinyNet(EfficientNet): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Conv2D): + fin_in = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3] + std = (2 / fin_in)**0.5 + nn.initializer.Normal(std=std)(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + elif isinstance(m, nn.Linear): + fin_in = m.weight.shape[0] + bound = 1 / fin_in**0.5 + nn.initializer.Uniform(-bound, bound)(m.weight) + if m.bias is not None: + nn.initializer.Constant(0)(m.bias) + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def TinyNet_A(padding_type='DYNAMIC', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("tinynet-a", override_params) + model = TinyNet( + block_args, + global_params, + name='a', + padding_type=padding_type, + use_se=use_se, + fix_stem=True, + num_features=1280, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TinyNet_A"], use_ssld) + return model + + +def TinyNet_B(padding_type='DYNAMIC', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("tinynet-b", override_params) + model = TinyNet( + block_args, + global_params, + name='b', + padding_type=padding_type, + use_se=use_se, + fix_stem=True, + num_features=1280, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TinyNet_B"], use_ssld) + return model + + +def TinyNet_C(padding_type='DYNAMIC', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("tinynet-c", override_params) + model = TinyNet( + block_args, + global_params, + name='c', + padding_type=padding_type, + use_se=use_se, + fix_stem=True, + num_features=1280, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TinyNet_C"], use_ssld) + return model + + +def TinyNet_D(padding_type='DYNAMIC', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("tinynet-d", override_params) + model = TinyNet( + block_args, + global_params, + name='d', + padding_type=padding_type, + use_se=use_se, + fix_stem=True, + num_features=1280, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TinyNet_D"], use_ssld) + return model + + +def TinyNet_E(padding_type='DYNAMIC', + override_params=None, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + block_args, global_params = get_model_params("tinynet-e", override_params) + model = TinyNet( + block_args, + global_params, + name='e', + padding_type=padding_type, + use_se=use_se, + fix_stem=True, + num_features=1280, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["TinyNet_E"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tnt.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tnt.py new file mode 100644 index 000000000..a1841d367 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/tnt.py @@ -0,0 +1,410 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/huawei-noah/CV-Backbones/tree/master/tnt_pytorch +# reference: https://arxiv.org/abs/2103.00112 + +import math +import numpy as np + +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant + +from ..base.theseus_layer import Identity +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "TNT_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams", + "TNT_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_base_pretrained.pdparams" +} + +__all__ = MODEL_URLS.keys() + +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, inputs): + return inputs + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = paddle.add(keep_prob, paddle.rand(shape, dtype=x.dtype)) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + hidden_dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim**-0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias) + self.v = nn.Linear(dim, dim, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape( + (B, N, 2, self.num_heads, self.head_dim)).transpose( + (2, 0, 3, 1, 4)) + + q, k = qk[0], qk[1] + v = self.v(x).reshape( + (B, N, self.num_heads, x.shape[-1] // self.num_heads)).transpose( + (0, 2, 1, 3)) + + attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = paddle.matmul(attn, v) + x = x.transpose((0, 2, 1, 3)).reshape( + (B, N, x.shape[-1] * x.shape[-3])) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + in_dim, + num_pixel, + num_heads=12, + in_num_head=4, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, + in_dim, + num_heads=in_num_head, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, + hidden_features=int(in_dim * 4), + out_features=in_dim, + act_layer=act_layer, + drop=drop) + + self.norm1_proj = norm_layer(in_dim * num_pixel) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias_attr=False) + self.norm2_proj = norm_layer(in_dim * num_pixel) + + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, + hidden_features=int(dim * mlp_ratio), + out_features=dim, + act_layer=act_layer, + drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = paddle.add( + pixel_embed, + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))) + pixel_embed = paddle.add( + pixel_embed, + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))) + # outer + B, N, C = patch_embed.shape + norm1_proj = pixel_embed.reshape(shape=[B, N - 1, C]) + norm1_proj = self.norm1_proj(norm1_proj) + patch_embed[:, 1:] = paddle.add( + patch_embed[:, 1:], self.norm2_proj(self.proj(norm1_proj))) + patch_embed = paddle.add( + patch_embed, + self.drop_path(self.attn_out(self.norm_out(patch_embed)))) + patch_embed = paddle.add( + patch_embed, self.drop_path(self.mlp(self.norm_mlp(patch_embed)))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + in_dim=48, + stride=4): + super().__init__() + self.patch_size = patch_size + num_patches = (img_size // patch_size)**2 + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = math.ceil(patch_size / stride) + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2D( + in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size and W == self.img_size, f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})." + x = nn.functional.unfold(x, self.patch_size, self.patch_size) + x = x.transpose((0, 2, 1)).reshape( + (-1, C, self.patch_size, self.patch_size)) + x = self.proj(x) + x = x.reshape((-1, self.in_dim, self.patch_size)).transpose((0, 2, 1)) + x = x + pixel_pos + return x + + +class TNT(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + in_dim=48, + depth=12, + num_heads=12, + in_num_head=4, + mlp_ratio=4., + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + first_stride=4, + class_num=1000): + super().__init__() + self.class_num = class_num + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + in_dim=in_dim, + stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size**2 + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = self.create_parameter( + shape=(1, 1, embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + + self.patch_pos = self.create_parameter( + shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_) + self.add_parameter("patch_pos", self.patch_pos) + + self.pixel_pos = self.create_parameter( + shape=(1, patch_size, in_dim), default_initializer=zeros_) + self.add_parameter("pixel_pos", self.pixel_pos) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, depth) + + blocks = [] + for i in range(depth): + blocks.append( + Block( + dim=embed_dim, + in_dim=in_dim, + num_pixel=num_pixel, + num_heads=num_heads, + in_num_head=in_num_head, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer)) + self.blocks = nn.LayerList(blocks) + self.norm = norm_layer(embed_dim) + + if class_num > 0: + self.head = nn.Linear(embed_dim, class_num) + + trunc_normal_(self.cls_token) + trunc_normal_(self.patch_pos) + trunc_normal_(self.pixel_pos) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj( + self.proj( + self.norm1_proj( + pixel_embed.reshape((-1, self.num_patches, pixel_embed. + shape[-1] * pixel_embed.shape[-2]))))) + patch_embed = paddle.concat( + (self.cls_token.expand((B, -1, -1)).astype(patch_embed.dtype), + patch_embed), + axis=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + + if self.class_num > 0: + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def TNT_small(pretrained=False, use_ssld=False, **kwargs): + model = TNT(patch_size=16, + embed_dim=384, + in_dim=24, + depth=12, + num_heads=6, + in_num_head=4, + qkv_bias=False, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["TNT_small"], use_ssld=use_ssld) + return model + + +def TNT_base(pretrained=False, use_ssld=False, **kwargs): + model = TNT(patch_size=16, + embed_dim=640, + in_dim=40, + depth=12, + num_heads=10, + in_num_head=4, + qkv_bias=False, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["TNT_base"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/twins.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/twins.py new file mode 100644 index 000000000..3f983d962 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/twins.py @@ -0,0 +1,692 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/Meituan-AutoML/Twins +# reference: https://arxiv.org/abs/2104.13840 + +from functools import partial + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.regularizer import L2Decay + +from .vision_transformer import trunc_normal_, normal_, zeros_, ones_, to_2tuple, DropPath, Identity, Mlp +from .vision_transformer import Block as ViTBlock + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "pcpvt_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams", + "pcpvt_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams", + "pcpvt_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams", + "alt_gvt_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams", + "alt_gvt_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams", + "alt_gvt_large": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class GroupAttention(nn.Layer): + """LSA: self attention within a group. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + ws=1): + super().__init__() + if ws == 1: + raise Exception("ws {ws} should not be 1") + if dim % num_heads != 0: + raise Exception( + "dim {dim} should be divided by num_heads {num_heads}.") + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, H, W): + B, N, C = x.shape + h_group, w_group = H // self.ws, W // self.ws + total_groups = h_group * w_group + x = x.reshape([B, h_group, self.ws, w_group, self.ws, C]).transpose( + [0, 1, 3, 2, 4, 5]) + qkv = self.qkv(x).reshape([ + B, total_groups, self.ws**2, 3, self.num_heads, C // self.num_heads + ]).transpose([3, 0, 1, 4, 2, 5]) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = paddle.matmul(q, k.transpose([0, 1, 2, 4, 3])) * self.scale + + attn = nn.Softmax(axis=-1)(attn) + attn = self.attn_drop(attn) + attn = paddle.matmul(attn, v).transpose([0, 1, 3, 2, 4]).reshape( + [B, h_group, w_group, self.ws, self.ws, C]) + + x = attn.transpose([0, 1, 3, 2, 4, 5]).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Attention(nn.Layer): + """GSA: using a key to summarize the information for a group to be efficient. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q = nn.Linear(dim, dim, bias_attr=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2D( + dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape( + [B, N, self.num_heads, C // self.num_heads]).transpose( + [0, 2, 1, 3]) + + if self.sr_ratio > 1: + x_ = x.transpose([0, 2, 1]).reshape([B, C, H, W]) + tmp_n = H * W // self.sr_ratio**2 + x_ = self.sr(x_).reshape([B, C, tmp_n]).transpose([0, 2, 1]) + x_ = self.norm(x_) + kv = self.kv(x_).reshape( + [B, tmp_n, 2, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + else: + kv = self.kv(x).reshape( + [B, N, 2, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + k, v = kv[0], kv[1] + + attn = paddle.matmul(q, k.transpose([0, 1, 3, 2])) * self.scale + attn = nn.Softmax(axis=-1)(attn) + attn = self.attn_drop(attn) + + x = paddle.matmul(attn, v).transpose([0, 2, 1, 3]).reshape([B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + sr_ratio=sr_ratio) + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class SBlock(ViTBlock): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1): + super().__init__(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, + attn_drop, drop_path, act_layer, norm_layer) + + def forward(self, x, H, W): + return super().forward(x) + + +class GroupBlock(ViTBlock): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1, + ws=1): + super().__init__(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, + attn_drop, drop_path, act_layer, norm_layer) + del self.attn + if ws == 1: + self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, + attn_drop, drop, sr_ratio) + else: + self.attn = GroupAttention(dim, num_heads, qkv_bias, qk_scale, + attn_drop, drop, ws) + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding. + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + if img_size % patch_size != 0: + raise Exception( + f"img_size {img_size} should be divided by patch_size {patch_size}." + ) + + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[ + 1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose([0, 2, 1]) + x = self.norm(x) + H, W = H // self.patch_size[0], W // self.patch_size[1] + return x, (H, W) + + +# borrow from PVT https://github.com/whai362/PVT.git +class PyramidVisionTransformer(nn.Layer): + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + block_cls=Block): + super().__init__() + self.class_num = class_num + self.depths = depths + + # patch_embed + self.patch_embeds = nn.LayerList() + self.pos_embeds = nn.ParameterList() + self.pos_drops = nn.LayerList() + self.blocks = nn.LayerList() + + for i in range(len(depths)): + if i == 0: + self.patch_embeds.append( + PatchEmbed(img_size, patch_size, in_chans, embed_dims[i])) + else: + self.patch_embeds.append( + PatchEmbed(img_size // patch_size // 2**(i - 1), 2, + embed_dims[i - 1], embed_dims[i])) + patch_num = self.patch_embeds[i].num_patches + 1 if i == len( + embed_dims) - 1 else self.patch_embeds[i].num_patches + self.pos_embeds.append( + self.create_parameter( + shape=[1, patch_num, embed_dims[i]], + default_initializer=zeros_)) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + + dpr = [ + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + cur = 0 + for k in range(len(depths)): + _block = nn.LayerList([ + block_cls( + dim=embed_dims[k], + num_heads=num_heads[k], + mlp_ratio=mlp_ratios[k], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + i], + norm_layer=norm_layer, + sr_ratio=sr_ratios[k]) for i in range(depths[k]) + ]) + self.blocks.append(_block) + cur += depths[k] + + self.norm = norm_layer(embed_dims[-1]) + + # cls_token + self.cls_token = self.create_parameter( + shape=[1, 1, embed_dims[-1]], + default_initializer=zeros_, + attr=paddle.ParamAttr(regularizer=L2Decay(0.0))) + + # classification head + self.head = nn.Linear(embed_dims[-1], + class_num) if class_num > 0 else Identity() + + # init weights + for pos_emb in self.pos_embeds: + trunc_normal_(pos_emb) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + B = x.shape[0] + for i in range(len(self.depths)): + x, (H, W) = self.patch_embeds[i](x) + if i == len(self.depths) - 1: + cls_tokens = self.cls_token.expand([B, -1, -1]).astype(x.dtype) + x = paddle.concat([cls_tokens, x], dim=1) + x = x + self.pos_embeds[i] + x = self.pos_drops[i](x) + for blk in self.blocks[i]: + x = blk(x, H, W) + if i < len(self.depths) - 1: + x = x.reshape([B, H, W, -1]).transpose( + [0, 3, 1, 2]).contiguous() + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +# PEG from https://arxiv.org/abs/2102.10882 +class PosCNN(nn.Layer): + def __init__(self, in_chans, embed_dim=768, s=1): + super().__init__() + self.proj = nn.Sequential( + nn.Conv2D( + in_chans, + embed_dim, + 3, + s, + 1, + bias_attr=paddle.ParamAttr(regularizer=L2Decay(0.0)), + groups=embed_dim, + weight_attr=paddle.ParamAttr(regularizer=L2Decay(0.0)), )) + self.s = s + + def forward(self, x, H, W): + B, N, C = x.shape + feat_token = x + cnn_feat = feat_token.transpose([0, 2, 1]).reshape([B, C, H, W]) + if self.s == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose([0, 2, 1]) + return x + + +class CPVTV2(PyramidVisionTransformer): + """ + Use useful results from CPVT. PEG and GAP. + Therefore, cls token is no longer required. + PEG is used to encode the absolute position on the fly, which greatly affects the performance when input resolution + changes during the training (such as segmentation, detection) + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + block_cls=Block): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + del self.pos_embeds + del self.cls_token + self.pos_block = nn.LayerList( + [PosCNN(embed_dim, embed_dim) for embed_dim in embed_dims]) + self.apply(self._init_weights) + + def _init_weights(self, m): + import math + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + elif isinstance(m, nn.Conv2D): + fan_out = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + fan_out //= m._groups + normal_(0, math.sqrt(2.0 / fan_out))(m.weight) + if m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2D): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + + for i in range(len(self.depths)): + x, (H, W) = self.patch_embeds[i](x) + x = self.pos_drops[i](x) + + for j, blk in enumerate(self.blocks[i]): + x = blk(x, H, W) + if j == 0: + x = self.pos_block[i](x, H, W) # PEG here + + if i < len(self.depths) - 1: + x = x.reshape([B, H, W, x.shape[-1]]).transpose([0, 3, 1, 2]) + + x = self.norm(x) + return x.mean(axis=1) # GAP here + + +class PCPVT(CPVTV2): + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + block_cls=SBlock): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + + +class ALTGVT(PCPVT): + """ + alias Twins-SVT + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + block_cls=GroupBlock, + wss=[7, 7, 7]): + super().__init__(img_size, patch_size, in_chans, class_num, embed_dims, + num_heads, mlp_ratios, qkv_bias, qk_scale, drop_rate, + attn_drop_rate, drop_path_rate, norm_layer, depths, + sr_ratios, block_cls) + del self.blocks + self.wss = wss + # transformer encoder + dpr = [ + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + self.blocks = nn.LayerList() + for k in range(len(depths)): + _block = nn.LayerList([ + block_cls( + dim=embed_dims[k], + num_heads=num_heads[k], + mlp_ratio=mlp_ratios[k], + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + i], + norm_layer=norm_layer, + sr_ratio=sr_ratios[k], + ws=1 if i % 2 == 1 else wss[k]) for i in range(depths[k]) + ]) + self.blocks.append(_block) + cur += depths[k] + self.apply(self._init_weights) + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def pcpvt_small(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_small"], use_ssld=use_ssld) + return model + + +def pcpvt_base(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 4, 18, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_base"], use_ssld=use_ssld) + return model + + +def pcpvt_large(pretrained=False, use_ssld=False, **kwargs): + model = CPVTV2( + patch_size=4, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 8, 27, 3], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["pcpvt_large"], use_ssld=use_ssld) + return model + + +def alt_gvt_small(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 10, 4], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_small"], use_ssld=use_ssld) + return model + + +def alt_gvt_base(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 18, 2], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_base"], use_ssld=use_ssld) + return model + + +def alt_gvt_large(pretrained=False, use_ssld=False, **kwargs): + model = ALTGVT( + patch_size=4, + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 18, 2], + wss=[7, 7, 7, 7], + sr_ratios=[8, 4, 2, 1], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["alt_gvt_large"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/uniformer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/uniformer.py new file mode 100644 index 000000000..13e579c36 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/uniformer.py @@ -0,0 +1,552 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/Sense-X/UniFormer +# reference: https://arxiv.org/abs/2201.09450 + +from collections import OrderedDict +from functools import partial +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import math +from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity, Mlp + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "UniFormer_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/UniFormer_small_pretrained.pdparams", + "UniFormer_small_plus": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/UniFormer_small_plus_pretrained.pdparams", + "UniFormer_small_plus_dim64": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/UniFormer_small_plus_dim64_pretrained.pdparams", + "UniFormer_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/UniFormer_base_pretrained.pdparams", + "UniFormer_base_ls": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/UniFormer_base_ls_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +layer_scale = False +init_value = 1e-6 + + +class CMlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1_conv = nn.Conv2D(in_features, hidden_features, 1) + self.act = act_layer() + self.fc2_conv = nn.Conv2D(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1_conv(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2_conv(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape( + shape=[B, N, 3, self.num_heads, C // self.num_heads]).transpose( + perm=[2, 0, 3, 1, 4]) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @k.transpose(perm=[0, 1, 3, 2])) * self.scale + attn = nn.Softmax(axis=-1)(attn) + attn = self.attn_drop(attn) + + x = (attn @v).transpose(perm=[0, 2, 1, 3]).reshape(shape=[B, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CBlock(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.pos_embed = nn.Conv2D(dim, dim, 3, padding=1, groups=dim) + self.norm1 = nn.BatchNorm2D(dim) + self.conv1 = nn.Conv2D(dim, dim, 1) + self.conv2 = nn.Conv2D(dim, dim, 1) + self.attn = nn.Conv2D(dim, dim, 5, padding=2, groups=dim) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = nn.BatchNorm2D(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.pos_embed(x) + x = x + self.drop_path( + self.conv2(self.attn(self.conv1(self.norm1(x))))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class SABlock(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.pos_embed = nn.Conv2D(dim, dim, 3, padding=1, groups=dim) + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + global layer_scale + self.ls = layer_scale + if self.ls: + global init_value + print(f"Use layer_scale: {layer_scale}, init_values: {init_value}") + self.gamma_1 = self.create_parameter( + [dim], + dtype='float32', + default_initializer=nn.initializer.Constant(value=init_value)) + self.gamma_2 = self.create_parameter( + [dim], + dtype='float32', + default_initializer=nn.initializer.Constant(value=init_value)) + + def forward(self, x): + x = x + self.pos_embed(x) + B, N, H, W = x.shape + x = x.flatten(2).transpose(perm=[0, 2, 1]) + if self.ls: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + x = x.transpose(perm=[0, 2, 1]).reshape(shape=[B, N, H, W]) + return x + + +class HeadEmbedding(nn.Layer): + def __init__(self, in_channels, out_channels): + super().__init__() + + self.proj = nn.Sequential( + nn.Conv2D( + in_channels, + out_channels // 2, + kernel_size=(3, 3), + stride=(2, 2), + padding=(1, 1)), + nn.BatchNorm2D(out_channels // 2), + nn.GELU(), + nn.Conv2D( + out_channels // 2, + out_channels, + kernel_size=(3, 3), + stride=(2, 2), + padding=(1, 1)), + nn.BatchNorm2D(out_channels)) + + def forward(self, x): + x = self.proj(x) + return x + + +class MiddleEmbedding(nn.Layer): + def __init__(self, in_channels, out_channels): + super().__init__() + + self.proj = nn.Sequential( + nn.Conv2D( + in_channels, + out_channels, + kernel_size=(3, 3), + stride=(2, 2), + padding=(1, 1)), + nn.BatchNorm2D(out_channels)) + + def forward(self, x): + x = self.proj(x) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // + patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.norm = nn.LayerNorm(embed_dim) + self.proj_conv = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj_conv(x) + B, C, H, W = x.shape + x = x.flatten(2).transpose(perm=[0, 2, 1]) + x = self.norm(x) + x = x.reshape(shape=[B, H, W, C]).transpose(perm=[0, 3, 1, 2]) + return x + + +class UniFormer(nn.Layer): + """ UniFormer + A PaddlePaddle impl of : `UniFormer: Unifying Convolution and Self-attention for Visual Recognition` - + https://arxiv.org/abs/2201.09450 + """ + + def __init__(self, + depth=[3, 4, 8, 3], + img_size=224, + in_chans=3, + class_num=1000, + embed_dim=[64, 128, 320, 512], + head_dim=64, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + representation_size=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=None, + conv_stem=False): + """ + Args: + depth (list): depth of each stage + img_size (int, tuple): input image size + in_chans (int): number of input channels + class_num (int): number of classes for classification head + embed_dim (list): embedding dimension of each stage + head_dim (int): head dimension + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer (nn.Module): normalization layer + conv_stem (bool): whether use overlapped patch stem + """ + super().__init__() + self.class_num = class_num + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + if conv_stem: + self.patch_embed1 = HeadEmbedding( + in_channels=in_chans, out_channels=embed_dim[0]) + self.patch_embed2 = MiddleEmbedding( + in_channels=embed_dim[0], out_channels=embed_dim[1]) + self.patch_embed3 = MiddleEmbedding( + in_channels=embed_dim[1], out_channels=embed_dim[2]) + self.patch_embed4 = MiddleEmbedding( + in_channels=embed_dim[2], out_channels=embed_dim[3]) + else: + self.patch_embed1 = PatchEmbed( + img_size=img_size, + patch_size=4, + in_chans=in_chans, + embed_dim=embed_dim[0]) + self.patch_embed2 = PatchEmbed( + img_size=img_size // 4, + patch_size=2, + in_chans=embed_dim[0], + embed_dim=embed_dim[1]) + self.patch_embed3 = PatchEmbed( + img_size=img_size // 8, + patch_size=2, + in_chans=embed_dim[1], + embed_dim=embed_dim[2]) + self.patch_embed4 = PatchEmbed( + img_size=img_size // 16, + patch_size=2, + in_chans=embed_dim[2], + embed_dim=embed_dim[3]) + + self.pos_drop = nn.Dropout(p=drop_rate) + dpr = [ + x.item() for x in paddle.linspace(0, drop_path_rate, sum(depth)) + ] # stochastic depth decay rule + num_heads = [dim // head_dim for dim in embed_dim] + self.blocks1 = nn.LayerList([ + CBlock( + dim=embed_dim[0], + num_heads=num_heads[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer) for i in range(depth[0]) + ]) + self.blocks2 = nn.LayerList([ + CBlock( + dim=embed_dim[1], + num_heads=num_heads[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i + depth[0]], + norm_layer=norm_layer) for i in range(depth[1]) + ]) + self.blocks3 = nn.LayerList([ + SABlock( + dim=embed_dim[2], + num_heads=num_heads[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i + depth[0] + depth[1]], + norm_layer=norm_layer) for i in range(depth[2]) + ]) + self.blocks4 = nn.LayerList([ + SABlock( + dim=embed_dim[3], + num_heads=num_heads[3], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i + depth[0] + depth[1] + depth[2]], + norm_layer=norm_layer) for i in range(depth[3]) + ]) + self.norm = nn.BatchNorm2D(embed_dim[-1]) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential( + OrderedDict([('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh())])) + else: + self.pre_logits = nn.Identity() + + # Classifier head + self.head = nn.Linear(embed_dim[-1], + class_num) if class_num > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embed1(x) + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x) + x = self.patch_embed2(x) + for blk in self.blocks2: + x = blk(x) + x = self.patch_embed3(x) + for blk in self.blocks3: + x = blk(x) + x = self.patch_embed4(x) + for blk in self.blocks4: + x = blk(x) + x = self.norm(x) + x = self.pre_logits(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x.flatten(2).mean(-1) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def UniFormer_small(pretrained=True, use_ssld=False, **kwargs): + model = UniFormer( + depth=[3, 4, 8, 3], + embed_dim=[64, 128, 320, 512], + head_dim=64, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + drop_path_rate=0.1, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["UniFormer_small"], use_ssld=use_ssld) + return model + + +def UniFormer_small_plus(pretrained=True, use_ssld=False, **kwargs): + model = UniFormer( + depth=[3, 5, 9, 3], + conv_stem=True, + embed_dim=[64, 128, 320, 512], + head_dim=32, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + drop_path_rate=0.1, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["UniFormer_small_plus"], + use_ssld=use_ssld) + return model + + +def UniFormer_small_plus_dim64(pretrained=True, use_ssld=False, **kwargs): + model = UniFormer( + depth=[3, 5, 9, 3], + conv_stem=True, + embed_dim=[64, 128, 320, 512], + head_dim=64, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + drop_path_rate=0.1, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["UniFormer_small_plus_dim64"], + use_ssld=use_ssld) + return model + + +def UniFormer_base(pretrained=True, use_ssld=False, **kwargs): + model = UniFormer( + depth=[5, 8, 20, 7], + embed_dim=[64, 128, 320, 512], + head_dim=64, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + drop_path_rate=0.3, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["UniFormer_base"], use_ssld=use_ssld) + return model + + +def UniFormer_base_ls(pretrained=True, use_ssld=False, **kwargs): + global layer_scale + layer_scale = True + model = UniFormer( + depth=[5, 8, 20, 7], + embed_dim=[64, 128, 320, 512], + head_dim=64, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + drop_path_rate=0.3, + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["UniFormer_base_ls"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/van.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/van.py new file mode 100644 index 000000000..18ac7f7a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/van.py @@ -0,0 +1,362 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was heavily based on https://github.com/Visual-Attention-Network/VAN-Classification +# reference: https://arxiv.org/abs/2202.09741 + +from functools import partial +import math +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "VAN_B0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VAN_B0_pretrained.pdparams", + "VAN_B1": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VAN_B1_pretrained.pdparams", + "VAN_B2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VAN_B2_pretrained.pdparams", + "VAN_B3": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VAN_B3_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +@paddle.jit.not_to_static +def swapdim(x, dim1, dim2): + a = list(range(len(x.shape))) + a[dim1], a[dim2] = a[dim2], a[dim1] + return x.transpose(a) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2D(in_features, hidden_features, 1) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Conv2D(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.dwconv(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class LKA(nn.Layer): + def __init__(self, dim): + super().__init__() + self.conv0 = nn.Conv2D(dim, dim, 5, padding=2, groups=dim) + self.conv_spatial = nn.Conv2D( + dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3) + self.conv1 = nn.Conv2D(dim, dim, 1) + + def forward(self, x): + attn = self.conv0(x) + attn = self.conv_spatial(attn) + attn = self.conv1(attn) + return x * attn + + +class Attention(nn.Layer): + def __init__(self, d_model): + super().__init__() + self.proj_1 = nn.Conv2D(d_model, d_model, 1) + self.activation = nn.GELU() + self.spatial_gating_unit = LKA(d_model) + self.proj_2 = nn.Conv2D(d_model, d_model, 1) + + def forward(self, x): + shorcut = x + x = self.proj_1(x) + x = self.activation(x) + x = self.spatial_gating_unit(x) + x = self.proj_2(x) + x = x + shorcut + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + mlp_ratio=4., + drop=0., + drop_path=0., + act_layer=nn.GELU): + super().__init__() + self.norm1 = nn.BatchNorm2D(dim) + self.attn = Attention(dim) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = nn.BatchNorm2D(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + layer_scale_init_value = 1e-2 + self.layer_scale_1 = self.create_parameter( + shape=[dim, 1, 1], + default_initializer=Constant(value=layer_scale_init_value)) + self.layer_scale_2 = self.create_parameter( + shape=[dim, 1, 1], + default_initializer=Constant(value=layer_scale_init_value)) + + def forward(self, x): + x = x + self.drop_path(self.layer_scale_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.layer_scale_2 * self.mlp(self.norm2(x))) + return x + + +class OverlapPatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=224, + patch_size=7, + stride=4, + in_chans=3, + embed_dim=768): + super().__init__() + self.proj = nn.Conv2D( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=patch_size // 2) + self.norm = nn.BatchNorm2D(embed_dim) + + def forward(self, x): + x = self.proj(x) + _, _, H, W = x.shape + x = self.norm(x) + return x, H, W + + +class VAN(nn.Layer): + r""" VAN + A PaddlePaddle impl of : `Visual Attention Network` - + https://arxiv.org/pdf/2202.09741.pdf + """ + + def __init__(self, + img_size=224, + in_chans=3, + class_num=1000, + embed_dims=[64, 128, 256, 512], + mlp_ratios=[4, 4, 4, 4], + drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], + num_stages=4, + flag=False): + super().__init__() + if flag == False: + self.class_num = class_num + self.depths = depths + self.num_stages = num_stages + + dpr = [x for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + + for i in range(num_stages): + patch_embed = OverlapPatchEmbed( + img_size=img_size if i == 0 else img_size // (2**(i + 1)), + patch_size=7 if i == 0 else 3, + stride=4 if i == 0 else 2, + in_chans=in_chans if i == 0 else embed_dims[i - 1], + embed_dim=embed_dims[i]) + + block = nn.LayerList([ + Block( + dim=embed_dims[i], + mlp_ratio=mlp_ratios[i], + drop=drop_rate, + drop_path=dpr[cur + j]) for j in range(depths[i]) + ]) + norm = norm_layer(embed_dims[i]) + cur += depths[i] + + setattr(self, f"patch_embed{i + 1}", patch_embed) + setattr(self, f"block{i + 1}", block) + setattr(self, f"norm{i + 1}", norm) + + # classification head + self.head = nn.Linear(embed_dims[3], + class_num) if class_num > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + elif isinstance(m, nn.Conv2D): + fan_out = m._kernel_size[0] * m._kernel_size[1] * m._out_channels + fan_out //= m._groups + m.weight.set_value( + paddle.normal( + std=math.sqrt(2.0 / fan_out), shape=m.weight.shape)) + if m.bias is not None: + zeros_(m.bias) + + def forward_features(self, x): + B = x.shape[0] + + for i in range(self.num_stages): + patch_embed = getattr(self, f"patch_embed{i + 1}") + block = getattr(self, f"block{i + 1}") + norm = getattr(self, f"norm{i + 1}") + x, H, W = patch_embed(x) + for blk in block: + x = blk(x) + + x = x.flatten(2) + x = swapdim(x, 1, 2) + x = norm(x) + if i != self.num_stages - 1: + x = x.reshape([B, H, W, x.shape[2]]).transpose([0, 3, 1, 2]) + + return x.mean(axis=1) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + + return x + + +class DWConv(nn.Layer): + def __init__(self, dim=768): + super().__init__() + self.dwconv = nn.Conv2D(dim, dim, 3, 1, 1, bias_attr=True, groups=dim) + + def forward(self, x): + x = self.dwconv(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def VAN_B0(pretrained=False, use_ssld=False, **kwargs): + model = VAN(embed_dims=[32, 64, 160, 256], + mlp_ratios=[8, 8, 4, 4], + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 3, 5, 2], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["VAN_B0"], use_ssld=use_ssld) + return model + + +def VAN_B1(pretrained=False, use_ssld=False, **kwargs): + model = VAN(embed_dims=[64, 128, 320, 512], + mlp_ratios=[8, 8, 4, 4], + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[2, 2, 4, 2], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["VAN_B1"], use_ssld=use_ssld) + return model + + +def VAN_B2(pretrained=False, use_ssld=False, **kwargs): + model = VAN(embed_dims=[64, 128, 320, 512], + mlp_ratios=[8, 8, 4, 4], + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 3, 12, 3], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["VAN_B2"], use_ssld=use_ssld) + return model + + +def VAN_B3(pretrained=False, use_ssld=False, **kwargs): + model = VAN(embed_dims=[64, 128, 320, 512], + mlp_ratios=[8, 8, 4, 4], + norm_layer=partial( + nn.LayerNorm, epsilon=1e-6), + depths=[3, 5, 27, 3], + **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["VAN_B3"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/vision_transformer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/vision_transformer.py new file mode 100644 index 000000000..5a015702c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/vision_transformer.py @@ -0,0 +1,459 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# reference: https://arxiv.org/abs/2010.11929 + +from collections.abc import Callable + +import numpy as np +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant, Normal + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ViT_small_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams", + "ViT_base_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams", + "ViT_base_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams", + "ViT_base_patch32_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams", + "ViT_large_patch16_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams", + "ViT_large_patch16_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams", + "ViT_large_patch32_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +trunc_normal_ = TruncatedNormal(std=.02) +normal_ = Normal +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def to_2tuple(x): + return tuple([x] * 2) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.full(shape=[], fill_value=1 - drop_prob, dtype=x.dtype) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Layer): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Layer): + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + # B= x.shape[0] + N, C = x.shape[1:] + qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C // + self.num_heads)).transpose((2, 0, 3, 1, 4)) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale + attn = nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Layer): + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-5): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm1 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, epsilon=epsilon) + elif isinstance(norm_layer, Callable): + self.norm2 = norm_layer(dim) + else: + raise TypeError( + "The norm_layer must be str or paddle.nn.layer.Layer class") + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * \ + (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + + x = self.proj(x).flatten(2).transpose((0, 2, 1)) + return x + + +class VisionTransformer(nn.Layer): + """ Vision Transformer with support for patch input + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + class_num=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer='nn.LayerNorm', + epsilon=1e-5, + **kwargs): + super().__init__() + self.class_num = class_num + + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.pos_embed = self.create_parameter( + shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_) + self.add_parameter("pos_embed", self.pos_embed) + self.cls_token = self.create_parameter( + shape=(1, 1, embed_dim), default_initializer=zeros_) + self.add_parameter("cls_token", self.cls_token) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = np.linspace(0, drop_path_rate, depth) + + self.blocks = nn.LayerList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + epsilon=epsilon) for i in range(depth) + ]) + + self.norm = eval(norm_layer)(embed_dim, epsilon=epsilon) + + # Classifier head + self.head = nn.Linear(embed_dim, + class_num) if class_num > 0 else Identity() + + trunc_normal_(self.pos_embed) + trunc_normal_(self.cls_token) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + # B = x.shape[0] + B = x.shape[0] + x = self.patch_embed(x) + cls_tokens = self.cls_token.expand((B, -1, -1)).astype(x.dtype) + x = paddle.concat((cls_tokens, x), axis=1) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ViT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3, + qk_scale=768**-0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_small_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch16_384"], + use_ssld=use_ssld) + return model + + +def ViT_base_patch32_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_base_patch32_384"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch16_224(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch16_224"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch16_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch16_384"], + use_ssld=use_ssld) + return model + + +def ViT_large_patch32_384(pretrained=False, use_ssld=False, **kwargs): + model = VisionTransformer( + img_size=384, + patch_size=32, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + epsilon=1e-6, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["ViT_large_patch32_384"], + use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/wideresnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/wideresnet.py new file mode 100644 index 000000000..8efd3220b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/wideresnet.py @@ -0,0 +1,236 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from ..base.theseus_layer import TheseusLayer +""" +backbone option "WideResNet" +code in this file is adpated from +https://github.com/kekmodel/FixMatch-pytorch/blob/master/models/wideresnet.py +thanks! +""" + + +def mish(x): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)""" + return x * paddle.tanh(F.softplus(x)) + + +class PSBatchNorm2D(nn.BatchNorm2D): + """How Does BN Increase Collapsed Neural Network Filters? (https://arxiv.org/abs/2001.11216)""" + + def __init__(self, + num_features, + alpha=0.1, + eps=1e-05, + momentum=0.999, + weight_attr=None, + bias_attr=None): + super().__init__(num_features, momentum, eps, weight_attr, bias_attr) + self.alpha = alpha + + def forward(self, x): + return super().forward(x) + self.alpha + + +class BasicBlock(nn.Layer): + def __init__(self, + in_planes, + out_planes, + stride, + drop_rate=0.0, + activate_before_residual=False): + super(BasicBlock, self).__init__() + self.bn1 = nn.BatchNorm2D(in_planes, momentum=0.999) + self.relu1 = nn.LeakyReLU(negative_slope=0.1) + self.conv1 = nn.Conv2D( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias_attr=False) + self.bn2 = nn.BatchNorm2D(out_planes, momentum=0.999) + self.relu2 = nn.LeakyReLU(negative_slope=0.1) + self.conv2 = nn.Conv2D( + out_planes, + out_planes, + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + self.drop_rate = drop_rate + self.equalInOut = (in_planes == out_planes) + self.convShortcut = (not self.equalInOut) and nn.Conv2D( + in_planes, + out_planes, + kernel_size=1, + stride=stride, + padding=0, + bias_attr=False) or None + self.activate_before_residual = activate_before_residual + + def forward(self, x): + if not self.equalInOut and self.activate_before_residual == True: + x = self.relu1(self.bn1(x)) + else: + out = self.relu1(self.bn1(x)) + out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) + if self.drop_rate > 0: + out = F.dropout(out, p=self.drop_rate, training=self.training) + out = self.conv2(out) + return paddle.add(x if self.equalInOut else self.convShortcut(x), out) + + +class NetworkBlock(nn.Layer): + def __init__(self, + nb_layers, + in_planes, + out_planes, + block, + stride, + drop_rate=0.0, + activate_before_residual=False): + super(NetworkBlock, self).__init__() + self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, + stride, drop_rate, + activate_before_residual) + + def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, + drop_rate, activate_before_residual): + layers = [] + for i in range(int(nb_layers)): + layers.append( + block(i == 0 and in_planes or out_planes, out_planes, i == 0 + and stride or 1, drop_rate, activate_before_residual)) + return nn.Sequential(*layers) + + def forward(self, x): + return self.layer(x) + + +class Normalize(nn.Layer): + """ Ln normalization copied from + https://github.com/salesforce/CoMatch + """ + + def __init__(self, power=2): + super(Normalize, self).__init__() + self.power = power + + def forward(self, x): + norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) + out = x.divide(norm) + return out + + +class Wide_ResNet(TheseusLayer): + def __init__(self, + num_classes, + depth=28, + widen_factor=2, + drop_rate=0.0, + proj=False, + proj_after=False, + low_dim=64): + super(Wide_ResNet, self).__init__() + # prepare self values + self.widen_factor = widen_factor + self.depth = depth + self.drop_rate = drop_rate + # if use projection head + self.proj = proj + # if use the output of projection head for classification + self.proj_after = proj_after + self.low_dim = low_dim + channels = [ + 16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor + ] + assert ((depth - 4) % 6 == 0) + n = (depth - 4) / 6 + block = BasicBlock + # 1st conv before any network block + self.conv1 = nn.Conv2D( + 3, + channels[0], + kernel_size=3, + stride=1, + padding=1, + bias_attr=False) + # 1st block + self.block1 = NetworkBlock( + n, + channels[0], + channels[1], + block, + 1, + drop_rate, + activate_before_residual=True) + # 2nd block + self.block2 = NetworkBlock(n, channels[1], channels[2], block, 2, + drop_rate) + # 3rd block + self.block3 = NetworkBlock(n, channels[2], channels[3], block, 2, + drop_rate) + # global average pooling and classifier + self.bn1 = nn.BatchNorm2D(channels[3], momentum=0.999) + self.relu = nn.LeakyReLU(negative_slope=0.1) + + # if proj after means we classify after projection head + # so we must change the in channel to low_dim of laster fc + if self.proj_after: + self.fc = nn.Linear(self.low_dim, num_classes) + else: + self.fc = nn.Linear(channels[3], num_classes) + self.channels = channels[3] + # projection head + if self.proj: + self.l2norm = Normalize(2) + + self.fc1 = nn.Linear(64 * self.widen_factor, + 64 * self.widen_factor) + self.relu_mlp = nn.LeakyReLU(negative_slope=0.1) + self.fc2 = nn.Linear(64 * self.widen_factor, self.low_dim) + + def forward(self, x): + feat = self.conv1(x) + feat = self.block1(feat) + feat = self.block2(feat) + feat = self.block3(feat) + feat = self.relu(self.bn1(feat)) + feat = F.adaptive_avg_pool2d(feat, 1) + feat = paddle.reshape(feat, [-1, self.channels]) + if self.proj: + pfeat = self.fc1(feat) + pfeat = self.relu_mlp(pfeat) + pfeat = self.fc2(pfeat) + pfeat = self.l2norm(pfeat) + + # if projection after classifiy, we classify last + if self.proj_after: + out = self.fc(pfeat) + else: + out = self.fc(feat) + + return out, pfeat + + # output + out = self.fc(feat) + return out + + +def WideResNet(depth, + widen_factor, + dropout, + num_classes, + proj=False, + low_dim=64, + **kwargs): + return Wide_ResNet( + depth=depth, + widen_factor=widen_factor, + drop_rate=dropout, + num_classes=num_classes, + proj=proj, + low_dim=low_dim, + **kwargs) \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception.py new file mode 100644 index 000000000..7615cd706 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception.py @@ -0,0 +1,393 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1610.02357 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math +import sys + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "Xception41": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams", + "Xception65": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams", + "Xception71": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + bn_name = "bn_" + name + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + return y + + +class SeparableConv(nn.Layer): + def __init__(self, input_channels, output_channels, stride=1, name=None): + super(SeparableConv, self).__init__() + + self._pointwise_conv = ConvBNLayer( + input_channels, output_channels, 1, name=name + "_sep") + self._depthwise_conv = ConvBNLayer( + output_channels, + output_channels, + 3, + stride=stride, + groups=output_channels, + name=name + "_dw") + + def forward(self, inputs): + x = self._pointwise_conv(inputs) + x = self._depthwise_conv(x) + return x + + +class EntryFlowBottleneckBlock(nn.Layer): + def __init__(self, + input_channels, + output_channels, + stride=2, + name=None, + relu_first=False): + super(EntryFlowBottleneckBlock, self).__init__() + self.relu_first = relu_first + + self._short = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=1, + stride=stride, + padding=0, + weight_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + self._conv1 = SeparableConv( + input_channels, + output_channels, + stride=1, + name=name + "_branch2a_weights") + self._conv2 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2b_weights") + self._pool = MaxPool2D(kernel_size=3, stride=stride, padding=1) + + def forward(self, inputs): + conv0 = inputs + short = self._short(inputs) + if self.relu_first: + conv0 = F.relu(conv0) + conv1 = self._conv1(conv0) + conv2 = F.relu(conv1) + conv2 = self._conv2(conv2) + pool = self._pool(conv2) + return paddle.add(x=short, y=pool) + + +class EntryFlow(nn.Layer): + def __init__(self, block_num=3): + super(EntryFlow, self).__init__() + + name = "entry_flow" + self.block_num = block_num + self._conv1 = ConvBNLayer( + 3, 32, 3, stride=2, act="relu", name=name + "_conv1") + self._conv2 = ConvBNLayer(32, 64, 3, act="relu", name=name + "_conv2") + if block_num == 3: + self._conv_0 = EntryFlowBottleneckBlock( + 64, 128, stride=2, name=name + "_0", relu_first=False) + self._conv_1 = EntryFlowBottleneckBlock( + 128, 256, stride=2, name=name + "_1", relu_first=True) + self._conv_2 = EntryFlowBottleneckBlock( + 256, 728, stride=2, name=name + "_2", relu_first=True) + elif block_num == 5: + self._conv_0 = EntryFlowBottleneckBlock( + 64, 128, stride=2, name=name + "_0", relu_first=False) + self._conv_1 = EntryFlowBottleneckBlock( + 128, 256, stride=1, name=name + "_1", relu_first=True) + self._conv_2 = EntryFlowBottleneckBlock( + 256, 256, stride=2, name=name + "_2", relu_first=True) + self._conv_3 = EntryFlowBottleneckBlock( + 256, 728, stride=1, name=name + "_3", relu_first=True) + self._conv_4 = EntryFlowBottleneckBlock( + 728, 728, stride=2, name=name + "_4", relu_first=True) + else: + sys.exit(-1) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + + if self.block_num == 3: + x = self._conv_0(x) + x = self._conv_1(x) + x = self._conv_2(x) + elif self.block_num == 5: + x = self._conv_0(x) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._conv_3(x) + x = self._conv_4(x) + return x + + +class MiddleFlowBottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels, name): + super(MiddleFlowBottleneckBlock, self).__init__() + + self._conv_0 = SeparableConv( + input_channels, + output_channels, + stride=1, + name=name + "_branch2a_weights") + self._conv_1 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2b_weights") + self._conv_2 = SeparableConv( + output_channels, + output_channels, + stride=1, + name=name + "_branch2c_weights") + + def forward(self, inputs): + conv0 = F.relu(inputs) + conv0 = self._conv_0(conv0) + conv1 = F.relu(conv0) + conv1 = self._conv_1(conv1) + conv2 = F.relu(conv1) + conv2 = self._conv_2(conv2) + return paddle.add(x=inputs, y=conv2) + + +class MiddleFlow(nn.Layer): + def __init__(self, block_num=8): + super(MiddleFlow, self).__init__() + + self.block_num = block_num + self._conv_0 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_0") + self._conv_1 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_1") + self._conv_2 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_2") + self._conv_3 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_3") + self._conv_4 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_4") + self._conv_5 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_5") + self._conv_6 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_6") + self._conv_7 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_7") + if block_num == 16: + self._conv_8 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_8") + self._conv_9 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_9") + self._conv_10 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_10") + self._conv_11 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_11") + self._conv_12 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_12") + self._conv_13 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_13") + self._conv_14 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_14") + self._conv_15 = MiddleFlowBottleneckBlock( + 728, 728, name="middle_flow_15") + + def forward(self, inputs): + x = self._conv_0(inputs) + x = self._conv_1(x) + x = self._conv_2(x) + x = self._conv_3(x) + x = self._conv_4(x) + x = self._conv_5(x) + x = self._conv_6(x) + x = self._conv_7(x) + if self.block_num == 16: + x = self._conv_8(x) + x = self._conv_9(x) + x = self._conv_10(x) + x = self._conv_11(x) + x = self._conv_12(x) + x = self._conv_13(x) + x = self._conv_14(x) + x = self._conv_15(x) + return x + + +class ExitFlowBottleneckBlock(nn.Layer): + def __init__(self, input_channels, output_channels1, output_channels2, + name): + super(ExitFlowBottleneckBlock, self).__init__() + + self._short = Conv2D( + in_channels=input_channels, + out_channels=output_channels2, + kernel_size=1, + stride=2, + padding=0, + weight_attr=ParamAttr(name + "_branch1_weights"), + bias_attr=False) + self._conv_1 = SeparableConv( + input_channels, + output_channels1, + stride=1, + name=name + "_branch2a_weights") + self._conv_2 = SeparableConv( + output_channels1, + output_channels2, + stride=1, + name=name + "_branch2b_weights") + self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1) + + def forward(self, inputs): + short = self._short(inputs) + conv0 = F.relu(inputs) + conv1 = self._conv_1(conv0) + conv2 = F.relu(conv1) + conv2 = self._conv_2(conv2) + pool = self._pool(conv2) + return paddle.add(x=short, y=pool) + + +class ExitFlow(nn.Layer): + def __init__(self, class_num): + super(ExitFlow, self).__init__() + + name = "exit_flow" + + self._conv_0 = ExitFlowBottleneckBlock( + 728, 728, 1024, name=name + "_1") + self._conv_1 = SeparableConv(1024, 1536, stride=1, name=name + "_2") + self._conv_2 = SeparableConv(1536, 2048, stride=1, name=name + "_3") + self._pool = AdaptiveAvgPool2D(1) + stdv = 1.0 / math.sqrt(2048 * 1.0) + self._out = Linear( + 2048, + class_num, + weight_attr=ParamAttr( + name="fc_weights", initializer=Uniform(-stdv, stdv)), + bias_attr=ParamAttr(name="fc_offset")) + + def forward(self, inputs): + conv0 = self._conv_0(inputs) + conv1 = self._conv_1(conv0) + conv1 = F.relu(conv1) + conv2 = self._conv_2(conv1) + conv2 = F.relu(conv2) + pool = self._pool(conv2) + pool = paddle.flatten(pool, start_axis=1, stop_axis=-1) + out = self._out(pool) + return out + + +class Xception(nn.Layer): + def __init__(self, + entry_flow_block_num=3, + middle_flow_block_num=8, + class_num=1000): + super(Xception, self).__init__() + self.entry_flow_block_num = entry_flow_block_num + self.middle_flow_block_num = middle_flow_block_num + self._entry_flow = EntryFlow(entry_flow_block_num) + self._middle_flow = MiddleFlow(middle_flow_block_num) + self._exit_flow = ExitFlow(class_num) + + def forward(self, inputs): + x = self._entry_flow(inputs) + x = self._middle_flow(x) + x = self._exit_flow(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Xception41(pretrained=False, use_ssld=False, **kwargs): + model = Xception(entry_flow_block_num=3, middle_flow_block_num=8, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception41"], use_ssld=use_ssld) + return model + + +def Xception65(pretrained=False, use_ssld=False, **kwargs): + model = Xception( + entry_flow_block_num=3, middle_flow_block_num=16, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception65"], use_ssld=use_ssld) + return model + + +def Xception71(pretrained=False, use_ssld=False, **kwargs): + model = Xception( + entry_flow_block_num=5, middle_flow_block_num=16, **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception71"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception_deeplab.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception_deeplab.py new file mode 100644 index 000000000..f5a7fa529 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/model_zoo/xception_deeplab.py @@ -0,0 +1,423 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1706.05587 + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from ....utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "Xception41_deeplab": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams", + "Xception65_deeplab": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def check_data(data, number): + if type(data) == int: + return [data] * number + assert len(data) == number + return data + + +def check_stride(s, os): + if s <= os: + return True + else: + return False + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +def gen_bottleneck_params(backbone='xception_65'): + if backbone == 'xception_65': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_41': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (8, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_71': + bottleneck_params = { + "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + else: + raise Exception( + "xception backbont only support xception_41/xception_65/xception_71" + ) + return bottleneck_params + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + padding=0, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + weight_attr=ParamAttr(name=name + "/weights"), + bias_attr=False) + self._bn = BatchNorm( + num_channels=output_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/BatchNorm/beta"), + moving_mean_name=name + "/BatchNorm/moving_mean", + moving_variance_name=name + "/BatchNorm/moving_variance") + + def forward(self, inputs): + return self._bn(self._conv(inputs)) + + +class Seperate_Conv(nn.Layer): + def __init__(self, + input_channels, + output_channels, + stride, + filter, + dilation=1, + act=None, + name=None): + super(Seperate_Conv, self).__init__() + + self._conv1 = Conv2D( + in_channels=input_channels, + out_channels=input_channels, + kernel_size=filter, + stride=stride, + groups=input_channels, + padding=(filter) // 2 * dilation, + dilation=dilation, + weight_attr=ParamAttr(name=name + "/depthwise/weights"), + bias_attr=False) + self._bn1 = BatchNorm( + input_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/depthwise/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"), + moving_mean_name=name + "/depthwise/BatchNorm/moving_mean", + moving_variance_name=name + "/depthwise/BatchNorm/moving_variance") + self._conv2 = Conv2D( + input_channels, + output_channels, + 1, + stride=1, + groups=1, + padding=0, + weight_attr=ParamAttr(name=name + "/pointwise/weights"), + bias_attr=False) + self._bn2 = BatchNorm( + output_channels, + act=act, + epsilon=1e-3, + momentum=0.99, + param_attr=ParamAttr(name=name + "/pointwise/BatchNorm/gamma"), + bias_attr=ParamAttr(name=name + "/pointwise/BatchNorm/beta"), + moving_mean_name=name + "/pointwise/BatchNorm/moving_mean", + moving_variance_name=name + "/pointwise/BatchNorm/moving_variance") + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._bn1(x) + x = self._conv2(x) + x = self._bn2(x) + return x + + +class Xception_Block(nn.Layer): + def __init__(self, + input_channels, + output_channels, + strides=1, + filter_size=3, + dilation=1, + skip_conv=True, + has_skip=True, + activation_fn_in_separable_conv=False, + name=None): + super(Xception_Block, self).__init__() + + repeat_number = 3 + output_channels = check_data(output_channels, repeat_number) + filter_size = check_data(filter_size, repeat_number) + strides = check_data(strides, repeat_number) + + self.has_skip = has_skip + self.skip_conv = skip_conv + self.activation_fn_in_separable_conv = activation_fn_in_separable_conv + if not activation_fn_in_separable_conv: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + dilation=dilation, + name=name + "/separable_conv3") + else: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + act="relu", + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + act="relu", + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + act="relu", + dilation=dilation, + name=name + "/separable_conv3") + + if has_skip and skip_conv: + self._short = ConvBNLayer( + input_channels, + output_channels[-1], + 1, + stride=strides[-1], + padding=0, + name=name + "/shortcut") + + def forward(self, inputs): + if not self.activation_fn_in_separable_conv: + x = F.relu(inputs) + x = self._conv1(x) + x = F.relu(x) + x = self._conv2(x) + x = F.relu(x) + x = self._conv3(x) + else: + x = self._conv1(inputs) + x = self._conv2(x) + x = self._conv3(x) + if self.has_skip: + if self.skip_conv: + skip = self._short(inputs) + else: + skip = inputs + return paddle.add(x, skip) + else: + return x + + +class XceptionDeeplab(nn.Layer): + def __init__(self, backbone, class_num=1000): + super(XceptionDeeplab, self).__init__() + + bottleneck_params = gen_bottleneck_params(backbone) + self.backbone = backbone + + self._conv1 = ConvBNLayer( + 3, + 32, + 3, + stride=2, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv1") + self._conv2 = ConvBNLayer( + 32, + 64, + 3, + stride=1, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv2") + + self.block_num = bottleneck_params["entry_flow"][0] + self.strides = bottleneck_params["entry_flow"][1] + self.chns = bottleneck_params["entry_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + + self.entry_flow = [] + self.middle_flow = [] + + self.stride = 2 + self.output_stride = 32 + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/entry_flow/block" + str(i + 1), + Xception_Block( + input_channels=64 if i == 0 else self.chns[i - 1], + output_channels=self.chns[i], + strides=[1, 1, self.stride], + name=self.backbone + "/entry_flow/block" + str(i + 1))) + self.entry_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["middle_flow"][0] + self.strides = bottleneck_params["middle_flow"][1] + self.chns = bottleneck_params["middle_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/middle_flow/block" + str(i + 1), + Xception_Block( + input_channels=728, + output_channels=728, + strides=[1, 1, self.strides[i]], + skip_conv=False, + name=self.backbone + "/middle_flow/block" + str(i + 1))) + self.middle_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["exit_flow"][0] + self.strides = bottleneck_params["exit_flow"][1] + self.chns = bottleneck_params["exit_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + stride = self.strides[0] if check_stride(s * self.strides[0], + self.output_stride) else 1 + self._exit_flow_1 = Xception_Block( + 728, + self.chns[0], [1, 1, stride], + name=self.backbone + "/exit_flow/block1") + s = s * stride + stride = self.strides[1] if check_stride(s * self.strides[1], + self.output_stride) else 1 + self._exit_flow_2 = Xception_Block( + self.chns[0][-1], + self.chns[1], [1, 1, stride], + dilation=2, + has_skip=False, + activation_fn_in_separable_conv=True, + name=self.backbone + "/exit_flow/block2") + s = s * stride + + self.stride = s + + self._drop = Dropout(p=0.5, mode="downscale_in_infer") + self._pool = AdaptiveAvgPool2D(1) + self._fc = Linear( + self.chns[1][-1], + class_num, + weight_attr=ParamAttr(name="fc_weights"), + bias_attr=ParamAttr(name="fc_bias")) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + for ef in self.entry_flow: + x = ef(x) + for mf in self.middle_flow: + x = mf(x) + x = self._exit_flow_1(x) + x = self._exit_flow_2(x) + x = self._drop(x) + x = self._pool(x) + x = paddle.squeeze(x, axis=[2, 3]) + x = self._fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def Xception41_deeplab(pretrained=False, use_ssld=False, **kwargs): + model = XceptionDeeplab('xception_41', **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception41_deeplab"], use_ssld=use_ssld) + return model + + +def Xception65_deeplab(pretrained=False, use_ssld=False, **kwargs): + model = XceptionDeeplab("xception_65", **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["Xception65_deeplab"], use_ssld=use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/__init__.py new file mode 100644 index 000000000..4a2716208 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/__init__.py @@ -0,0 +1,5 @@ +from .resnet_variant import ResNet50_last_stage_stride1, ResNet50_metabin +from .vgg_variant import VGG19Sigmoid +from .pp_lcnet_variant import PPLCNet_x2_5_Tanh +from .pp_lcnetv2_variant import PPLCNetV2_base_ShiTu +from .swin_transformer_variant import SwinTransformer_base_patch4_window7_224_SOLIDER,SwinTransformer_small_patch4_window7_224_SOLIDER,SwinTransformer_tiny_patch4_window7_224_SOLIDER diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/efficientnet_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/efficientnet_variant.py new file mode 100644 index 000000000..86701d4b4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/efficientnet_variant.py @@ -0,0 +1,44 @@ +import paddle +import paddle.nn as nn +from ..model_zoo.efficientnet import EfficientNetB3, _load_pretrained + +MODEL_URLS = { + "EfficientNetB3_watermark": + "https://paddleclas.bj.bcebos.com/models/practical/pretrained/EfficientNetB3_watermark_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +def EfficientNetB3_watermark(padding_type='DYNAMIC', + override_params={"batch_norm_epsilon": 0.00001}, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs): + def replace_function(_fc, pattern): + classifier = nn.Sequential( + # 1536 is the orginal in_features + nn.Linear( + in_features=1536, out_features=625), + nn.ReLU(), + nn.Dropout(p=0.3), + nn.Linear( + in_features=625, out_features=256), + nn.ReLU(), + nn.Linear( + in_features=256, out_features=2), ) + return classifier + + pattern = "_fc" + model = EfficientNetB3( + padding_type=padding_type, + override_params=override_params, + use_se=True, + pretrained=False, + use_ssld=False, + **kwargs) + model.upgrade_sublayer(pattern, replace_function) + _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB3_watermark"], + use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/foundation_vit_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/foundation_vit_variant.py new file mode 100644 index 000000000..7f79dcaf3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/foundation_vit_variant.py @@ -0,0 +1,52 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from ..model_zoo.foundation_vit import CLIP_vit_large_patch14_224, _load_pretrained + +MODEL_URLS = { + "CLIP_large_patch14_224_aesthetic": + "https://paddleclas.bj.bcebos.com/models/practical/pretrained/CLIP_large_patch14_224_aesthetic_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class MLP(nn.Layer): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + nn.Dropout(0.2), + nn.Linear(1024, 128), + nn.Dropout(0.2), + nn.Linear(128, 64), + nn.Dropout(0.1), nn.Linear(64, 16), nn.Linear(16, 1)) + + def forward(self, x): + return self.layers(x) + + +class Aesthetic_Score_Predictor(nn.Layer): + def __init__(self): + super().__init__() + self.model = CLIP_vit_large_patch14_224() + self.fc_head = nn.Linear(1024, 768, bias_attr=False) + self.mlp = MLP(768) + + def forward(self, x): + x = self.model(x) + x = x[:, 0, :] + x = self.fc_head(x) + x = F.normalize(x, p=2, axis=-1) + x = self.mlp(x) + return x + + +def CLIP_large_patch14_224_aesthetic(pretrained=False, + use_ssld=False, + **kwargs): + model = Aesthetic_Score_Predictor() + _load_pretrained(pretrained, model, + MODEL_URLS["CLIP_large_patch14_224_aesthetic"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnet_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnet_variant.py new file mode 100644 index 000000000..e4c25c4c6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnet_variant.py @@ -0,0 +1,29 @@ +import paddle +from paddle.nn import Sigmoid +from paddle.nn import Tanh +from ..legendary_models.pp_lcnet import PPLCNet_x2_5 + +__all__ = ["PPLCNet_x2_5_Tanh"] + + +class TanhSuffix(paddle.nn.Layer): + def __init__(self, origin_layer): + super(TanhSuffix, self).__init__() + self.origin_layer = origin_layer + self.tanh = Tanh() + + def forward(self, input, res_dict=None, **kwargs): + x = self.origin_layer(input) + x = self.tanh(x) + return x + + +def PPLCNet_x2_5_Tanh(pretrained=False, use_ssld=False, **kwargs): + def replace_function(origin_layer, pattern): + new_layer = TanhSuffix(origin_layer) + return new_layer + + pattern = "fc" + model = PPLCNet_x2_5(pretrained=pretrained, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnetv2_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnetv2_variant.py new file mode 100644 index 000000000..6acccdc8e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/pp_lcnetv2_variant.py @@ -0,0 +1,56 @@ +from paddle.nn import Conv2D, Identity + +from ..legendary_models.pp_lcnet_v2 import MODEL_URLS, PPLCNetV2_base, RepDepthwiseSeparable, _load_pretrained + +__all__ = ["PPLCNetV2_base_ShiTu"] + + +def PPLCNetV2_base_ShiTu(pretrained=False, use_ssld=False, **kwargs): + """ + An variant network of PPLCNetV2_base + 1. remove ReLU layer after last_conv + 2. add bias to last_conv + 3. change stride to 1 in last two RepDepthwiseSeparable Block + """ + model = PPLCNetV2_base(pretrained=False, use_ssld=use_ssld, **kwargs) + + def remove_ReLU_function(conv, pattern): + new_conv = Identity() + return new_conv + + def add_bias_last_conv(conv, pattern): + new_conv = Conv2D( + in_channels=conv._in_channels, + out_channels=conv._out_channels, + kernel_size=conv._kernel_size, + stride=conv._stride, + padding=conv._padding, + groups=conv._groups, + bias_attr=True) + return new_conv + + def last_stride_function(rep_block, pattern): + new_conv = RepDepthwiseSeparable( + in_channels=rep_block.in_channels, + out_channels=rep_block.out_channels, + stride=1, + dw_size=rep_block.dw_size, + split_pw=rep_block.split_pw, + use_rep=rep_block.use_rep, + use_se=rep_block.use_se, + use_shortcut=rep_block.use_shortcut) + return new_conv + + pattern_act = ["act"] + pattern_lastconv = ["last_conv"] + pattern_last_stride = [ + "stages[3][0]", + "stages[3][1]", + ] + model.upgrade_sublayer(pattern_act, remove_ReLU_function) + model.upgrade_sublayer(pattern_lastconv, add_bias_last_conv) + model.upgrade_sublayer(pattern_last_stride, last_stride_function) + + # load params again after upgrade some layers + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_base"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/resnet_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/resnet_variant.py new file mode 100644 index 000000000..3569cd206 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/resnet_variant.py @@ -0,0 +1,203 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +import copy +import paddle +from paddle import nn +from paddle.nn import functional as F +from ..legendary_models.resnet import ResNet50, MODEL_URLS, _load_pretrained + +__all__ = [ + "ResNet50_last_stage_stride1", "ResNet50_adaptive_max_pool2d", + 'ResNet50_metabin' +] + + +def ResNet50_last_stage_stride1(pretrained=False, use_ssld=False, **kwargs): + def replace_function(conv, pattern): + new_conv = nn.Conv2D( + in_channels=conv._in_channels, + out_channels=conv._out_channels, + kernel_size=conv._kernel_size, + stride=1, + padding=conv._padding, + groups=conv._groups, + bias_attr=conv._bias_attr) + return new_conv + + pattern = ["blocks[13].conv1.conv", "blocks[13].short.conv"] + model = ResNet50(pretrained=False, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_adaptive_max_pool2d(pretrained=False, use_ssld=False, **kwargs): + def replace_function(pool, pattern): + new_pool = nn.AdaptiveMaxPool2D(output_size=1) + return new_pool + + pattern = ["avg_pool"] + model = ResNet50(pretrained=False, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_metabin(pretrained=False, + use_ssld=False, + bias_lr_factor=1.0, + **kwargs): + """ + ResNet50 which replaces all `bn` layers with MetaBIN + reference: https://arxiv.org/abs/2011.14670 + """ + + class BINGate(nn.Layer): + def __init__(self, num_features): + super().__init__() + self.gate = self.create_parameter( + shape=[num_features], + default_initializer=nn.initializer.Constant(1.0)) + self.add_parameter("gate", self.gate) + + def forward(self, opt={}): + flag_update = 'lr_gate' in opt and \ + opt.get('enable_inside_update', False) + if flag_update and self.gate.grad is not None: # update gate + lr = opt['lr_gate'] * self.gate.optimize_attr.get( + 'learning_rate', 1.0) + gate = self.gate - lr * self.gate.grad + gate.clip_(min=0, max=1) + else: + gate = self.gate + return gate + + def clip_gate(self): + self.gate.set_value(self.gate.clip(0, 1)) + + class MetaBN(nn.BatchNorm2D): + def forward(self, inputs, opt={}): + mode = opt.get("bn_mode", "general") if self.training else "eval" + if mode == "general": # update, but not apply running_mean/var + result = F.batch_norm(inputs, self._mean, self._variance, + self.weight, self.bias, self.training, + self._momentum, self._epsilon) + elif mode == "hold": # not update, not apply running_mean/var + result = F.batch_norm( + inputs, + paddle.mean( + inputs, axis=(0, 2, 3)), + paddle.var(inputs, axis=(0, 2, 3)), + self.weight, + self.bias, + self.training, + self._momentum, + self._epsilon) + elif mode == "eval": # fix and apply running_mean/var, + if self._mean is None: + result = F.batch_norm( + inputs, + paddle.mean( + inputs, axis=(0, 2, 3)), + paddle.var(inputs, axis=(0, 2, 3)), + self.weight, + self.bias, + True, + self._momentum, + self._epsilon) + else: + result = F.batch_norm(inputs, self._mean, self._variance, + self.weight, self.bias, False, + self._momentum, self._epsilon) + return result + + class MetaBIN(nn.Layer): + """ + MetaBIN (Meta Batch-Instance Normalization) + reference: https://arxiv.org/abs/2011.14670 + """ + + def __init__(self, num_features): + super().__init__() + self.batch_norm = MetaBN( + num_features=num_features, use_global_stats=True) + self.instance_norm = nn.InstanceNorm2D(num_features=num_features) + self.gate = BINGate(num_features=num_features) + self.opt = defaultdict() + + def forward(self, inputs): + out_bn = self.batch_norm(inputs, self.opt) + out_in = self.instance_norm(inputs) + gate = self.gate(self.opt) + gate = gate.unsqueeze([0, -1, -1]) + out = out_bn * gate + out_in * (1 - gate) + return out + + def reset_opt(self): + self.opt = defaultdict() + + def setup_opt(self, opt): + """ + Arg: + opt (dict): Optional setting to change the behavior of MetaBIN during training. + It includes three settings which are `enable_inside_update`, `lr_gate` and `bn_mode`. + """ + self.check_opt(opt) + self.opt = copy.deepcopy(opt) + + @classmethod + def check_opt(cls, opt): + assert isinstance(opt, dict), \ + TypeError('Got the wrong type of `opt`. Please use `dict` type.') + + if opt.get('enable_inside_update', False) and 'lr_gate' not in opt: + raise RuntimeError('Missing `lr_gate` in opt.') + + assert isinstance(opt.get('lr_gate', 1.0), float), \ + TypeError('Got the wrong type of `lr_gate`. Please use `float` type.') + assert isinstance(opt.get('enable_inside_update', True), bool), \ + TypeError('Got the wrong type of `enable_inside_update`. Please use `bool` type.') + assert opt.get('bn_mode', "general") in ["general", "hold", "eval"], \ + TypeError('Got the wrong value of `bn_mode`.') + + def bn2metabin(bn, pattern): + metabin = MetaBIN(bn.weight.shape[0]) + metabin.batch_norm.weight.set_value(bn.weight) + metabin.batch_norm.bias.set_value(bn.bias) + metabin.batch_norm._variance.set_value(bn._variance) + metabin.batch_norm._mean.set_value(bn._mean) + return metabin + + def setup_optimize_attr(model, bias_lr_factor): + for name, params in model.named_parameters(): + if params.stop_gradient: + continue + if "bias" in name: + params.optimize_attr['learning_rate'] = bias_lr_factor + + pattern = [] + pattern.extend(["blocks[{}].conv{}.bn".format(i, j) \ + for i in range(16) for j in range(3)]) + pattern.extend(["blocks[{}].short.bn".format(i) for i in [0, 3, 7, 13]]) + pattern.append("stem[0].bn") + + model = ResNet50_last_stage_stride1( + pretrained=pretrained, use_ssld=use_ssld, **kwargs) + + model.upgrade_sublayer(pattern, bn2metabin) + setup_optimize_attr(model=model, bias_lr_factor=bias_lr_factor) + + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/swin_transformer_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/swin_transformer_variant.py new file mode 100644 index 000000000..1e6632f6e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/swin_transformer_variant.py @@ -0,0 +1,355 @@ +import numpy as np +import paddle +import paddle.nn as nn +from ..legendary_models.swin_transformer import SwinTransformer, _load_pretrained, \ + PatchEmbed, BasicLayer, SwinTransformerBlock + +MODEL_URLS_SOLIDER = { + "SwinTransformer_tiny_patch4_window7_224_SOLIDER": + 'https://paddleclas.bj.bcebos.com/models/SOLIDER/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams', + "SwinTransformer_small_patch4_window7_224_SOLIDER": + 'https://paddleclas.bj.bcebos.com/models/SOLIDER/SwinTransformer_small_patch4_window7_224_pretrained.pdparams', + "SwinTransformer_base_patch4_window7_224_SOLIDER": + 'https://paddleclas.bj.bcebos.com/models/SOLIDER/SwinTransformer_base_patch4_window7_224_pretrained.pdparams' +} + +__all__ = list(MODEL_URLS_SOLIDER.keys()) + + +class PatchEmbed_SOLIDER(PatchEmbed): + def forward(self, x): + x = self.proj(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class SwinTransformerBlock_SOLIDER(SwinTransformerBlock): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super(SwinTransformerBlock_SOLIDER, self).__init__( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=shift_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path, + act_layer=act_layer, + norm_layer=norm_layer, + ) + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + self.check_condition() + + def check_condition(self): + if min(self.input_resolution) < self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + +class BasicLayer_SOLIDER(BasicLayer): + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False): + + super(BasicLayer_SOLIDER, self).__init__( + dim=dim, + input_resolution=input_resolution, + depth=depth, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path, + norm_layer=norm_layer, + downsample=downsample, + use_checkpoint=use_checkpoint + ) + # build blocks + self.blocks = nn.LayerList([ + SwinTransformerBlock_SOLIDER( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) for i in range(depth) + ]) + + def forward(self, x): + for blk in self.blocks: + x = blk(x) + + if self.downsample is not None: + x_down = self.downsample(x) + return x_down, x + else: + return x, x + + +class PatchMerging_SOLIDER(nn.Layer): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.sampler = nn.Unfold(kernel_sizes=2, strides=2) + self.norm = norm_layer(4 * dim) + self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, "x size ({}*{}) are not even.".format( + H, W) + + x = x.reshape([B, H, W, C]).transpose([0, 3, 1, 2]) + + x = self.sampler(x) + x = x.transpose([0, 2, 1]) + x = self.norm(x) + x = self.reduction(x) + return x + + +class SwinTransformer_SOLIDER(SwinTransformer): + def __init__(self, + embed_dim=96, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + out_indices=(0, 1, 2, 3), + semantic_weight=1.0, + use_checkpoint=False, + **kwargs): + super(SwinTransformer_SOLIDER, self).__init__() + patches_resolution = self.patch_embed.patches_resolution + self.num_classes = num_classes = class_num + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + # stochastic depth + dpr = np.linspace(0, drop_path_rate, + sum(depths)).tolist() # stochastic depth decay rule + self.patch_embed = PatchEmbed_SOLIDER( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + self.out_indices = out_indices + # build layers + self.layers = nn.LayerList() + for i_layer in range(self.num_layers): + layer = BasicLayer_SOLIDER( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging_SOLIDER + if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + self.num_features_s = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + for i in out_indices: + layer = norm_layer(self.num_features_s[i]) + layer_name = f'norm{i}' + self.add_sublayer(layer_name, layer) + self.avgpool = nn.AdaptiveAvgPool2D(1) + + # semantic embedding + self.semantic_weight = semantic_weight + if self.semantic_weight >= 0: + self.semantic_embed_w = nn.LayerList() + self.semantic_embed_b = nn.LayerList() + for i in range(len(depths)): + if i >= len(depths) - 1: + i = len(depths) - 2 + semantic_embed_w = nn.Linear(2, self.num_features_s[i + 1]) + semantic_embed_b = nn.Linear(2, self.num_features_s[i + 1]) + self._init_weights(semantic_embed_w) + self._init_weights(semantic_embed_b) + self.semantic_embed_w.append(semantic_embed_w) + self.semantic_embed_b.append(semantic_embed_b) + self.softplus = nn.Softplus() + self.head = nn.Linear( + self.num_features, + num_classes) if self.num_classes > 0 else nn.Identity() + + def forward_features(self, x, semantic_weight=None): + if self.semantic_weight >= 0 and semantic_weight is None: + w = paddle.ones((x.shape[0], 1)) * self.semantic_weight + w = paddle.concat([w, 1 - w], axis=-1) + semantic_weight = w.cuda() + x, hw_shape = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + outs = [] + + for i, layer in enumerate(self.layers): + x, out = layer(x) + if self.semantic_weight >= 0: + sw = self.semantic_embed_w[i](semantic_weight).unsqueeze(1) + sb = self.semantic_embed_b[i](semantic_weight).unsqueeze(1) + x = x * self.softplus(sw) + sb + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.reshape([-1, *hw_shape, + self.num_features_s[i]]).transpose([0, 3, 1, 2]) + hw_shape = [item // 2 for item in hw_shape] + outs.append(out) + + x = self.avgpool(outs[-1]) # B C 1 + x = paddle.flatten(x, 1) + + return x + + +def SwinTransformer_tiny_patch4_window7_224_SOLIDER( + pretrained=False, + **kwargs): + model = SwinTransformer_SOLIDER( + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.2, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.1 + **kwargs) + _load_pretrained( + pretrained, + model=model, + model_url=MODEL_URLS_SOLIDER["SwinTransformer_tiny_patch4_window7_224_SOLIDER"], + **kwargs) + return model + + +def SwinTransformer_small_patch4_window7_224_SOLIDER( + pretrained=False, + **kwargs): + model = SwinTransformer_SOLIDER( + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.3, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.2 + **kwargs) + _load_pretrained( + pretrained, + model=model, + model_url=MODEL_URLS_SOLIDER["SwinTransformer_small_patch4_window7_224_SOLIDER"], + **kwargs) + return model + + +def SwinTransformer_base_patch4_window7_224_SOLIDER( + pretrained=False, + **kwargs): + model = SwinTransformer_SOLIDER( + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + drop_path_rate=0.5, # if imagenet22k or imagenet22kto1k, set drop_path_rate=0.2 + **kwargs) + _load_pretrained( + pretrained, + model=model, + model_url=MODEL_URLS_SOLIDER["SwinTransformer_base_patch4_window7_224_SOLIDER"], + **kwargs) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/vgg_variant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/vgg_variant.py new file mode 100644 index 000000000..36ce47768 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/backbone/variant_models/vgg_variant.py @@ -0,0 +1,28 @@ +import paddle +from paddle.nn import Sigmoid +from ..legendary_models.vgg import VGG19 + +__all__ = ["VGG19Sigmoid"] + + +class SigmoidSuffix(paddle.nn.Layer): + def __init__(self, origin_layer): + super().__init__() + self.origin_layer = origin_layer + self.sigmoid = Sigmoid() + + def forward(self, input, res_dict=None, **kwargs): + x = self.origin_layer(input) + x = self.sigmoid(x) + return x + + +def VGG19Sigmoid(pretrained=False, use_ssld=False, **kwargs): + def replace_function(origin_layer, pattern): + new_layer = SigmoidSuffix(origin_layer) + return new_layer + + pattern = "fc2" + model = VGG19(pretrained=pretrained, use_ssld=use_ssld, **kwargs) + model.upgrade_sublayer(pattern, replace_function) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/distill/afd_attention.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/distill/afd_attention.py new file mode 100644 index 000000000..63b094f31 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/distill/afd_attention.py @@ -0,0 +1,123 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle.nn as nn +import paddle.nn.functional as F +import paddle +import numpy as np + + +class LinearBNReLU(nn.Layer): + def __init__(self, nin, nout): + super().__init__() + self.linear = nn.Linear(nin, nout) + self.bn = nn.BatchNorm1D(nout) + self.relu = nn.ReLU() + + def forward(self, x, relu=True): + if relu: + return self.relu(self.bn(self.linear(x))) + return self.bn(self.linear(x)) + + +def unique_shape(s_shapes): + n_s = [] + unique_shapes = [] + n = -1 + for s_shape in s_shapes: + if s_shape not in unique_shapes: + unique_shapes.append(s_shape) + n += 1 + n_s.append(n) + return n_s, unique_shapes + + +class LinearTransformTeacher(nn.Layer): + def __init__(self, qk_dim, t_shapes, keys): + super().__init__() + self.teacher_keys = keys + self.t_shapes = [[1] + t_i for t_i in t_shapes] + self.query_layer = nn.LayerList( + [LinearBNReLU(t_shape[1], qk_dim) for t_shape in self.t_shapes]) + + def forward(self, t_features_dict): + g_t = [t_features_dict[key] for key in self.teacher_keys] + bs = g_t[0].shape[0] + channel_mean = [f_t.mean(3).mean(2) for f_t in g_t] + spatial_mean = [] + for i in range(len(g_t)): + c, h, w = g_t[i].shape[1:] + spatial_mean.append(g_t[i].pow(2).mean(1).reshape([bs, h * w])) + query = paddle.stack( + [ + query_layer( + f_t, relu=False) + for f_t, query_layer in zip(channel_mean, self.query_layer) + ], + axis=1) + value = [F.normalize(f_s, axis=1) for f_s in spatial_mean] + return {"query": query, "value": value} + + +class LinearTransformStudent(nn.Layer): + def __init__(self, qk_dim, t_shapes, s_shapes, keys): + super().__init__() + self.student_keys = keys + self.t_shapes = [[1] + t_i for t_i in t_shapes] + self.s_shapes = [[1] + s_i for s_i in s_shapes] + self.t = len(self.t_shapes) + self.s = len(self.s_shapes) + self.qk_dim = qk_dim + self.n_t, self.unique_t_shapes = unique_shape(self.t_shapes) + self.relu = nn.ReLU() + self.samplers = nn.LayerList( + [Sample(t_shape) for t_shape in self.unique_t_shapes]) + self.key_layer = nn.LayerList([ + LinearBNReLU(s_shape[1], self.qk_dim) for s_shape in self.s_shapes + ]) + self.bilinear = LinearBNReLU(qk_dim, qk_dim * len(self.t_shapes)) + + def forward(self, s_features_dict): + g_s = [s_features_dict[key] for key in self.student_keys] + bs = g_s[0].shape[0] + channel_mean = [f_s.mean(3).mean(2) for f_s in g_s] + spatial_mean = [sampler(g_s, bs) for sampler in self.samplers] + + key = paddle.stack( + [ + key_layer(f_s) + for key_layer, f_s in zip(self.key_layer, channel_mean) + ], + axis=1).reshape([-1, self.qk_dim]) # Bs x h + bilinear_key = self.bilinear( + key, relu=False).reshape([bs, self.s, self.t, self.qk_dim]) + value = [F.normalize(s_m, axis=2) for s_m in spatial_mean] + return {"bilinear_key": bilinear_key, "value": value} + + +class Sample(nn.Layer): + def __init__(self, t_shape): + super().__init__() + self.t_N, self.t_C, self.t_H, self.t_W = t_shape + self.sample = nn.AdaptiveAvgPool2D((self.t_H, self.t_W)) + + def forward(self, g_s, bs): + g_s = paddle.stack( + [ + self.sample(f_s.pow(2).mean( + 1, keepdim=True)).reshape([bs, self.t_H * self.t_W]) + for f_s in g_s + ], + axis=1) + return g_s diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/__init__.py new file mode 100644 index 000000000..b2a6f46a0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/__init__.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .arcmargin import ArcMargin +from .cosmargin import CosMargin +from .circlemargin import CircleMargin +from .fc import FC +from .vehicle_neck import VehicleNeck +from paddle.nn import Tanh, Identity +from .bnneck import BNNeck +from .adamargin import AdaMargin +from .frfn_neck import FRFNNeck +from .metabnneck import MetaBNNeck +from .ml_decoder import MLDecoder + +__all__ = ['build_gear', 'add_ml_decoder_head'] + + +def build_gear(config): + support_dict = [ + 'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck', 'Tanh', + 'BNNeck', 'AdaMargin', 'FRFNNeck', 'MetaBNNeck' + ] + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'head only support {}'.format(support_dict)) + module_class = eval(module_name)(**config) + return module_class + + +def add_ml_decoder_head(model, config): + if 'class_num' not in config: + if hasattr(model, 'class_num'): + config['class_num'] = model.class_num + else: + raise AttributeError( + 'Please manually add parameter `class_num` ' + 'for MLDecoder in the config file.') + + # remove_layers: list of layer names that need to be deleted from backbone + if 'remove_layers' in config: + remove_layers = config.pop('remove_layers') + else: + remove_layers = ['avg_pool', 'flatten'] + for remove_layer in remove_layers: + if hasattr(model, remove_layer): + delattr(model, remove_layer) + setattr(model, remove_layer, Identity()) + else: + raise AttributeError( + f"{remove_layer} does not have attribute the model.") + + # replace_layer: layer name that need to be replaced in backbone + if 'replace_layer' in config: + replace_layer = config.pop('replace_layer') + else: + replace_layer = 'fc' + if hasattr(model, replace_layer): + delattr(model, replace_layer) + setattr(model, replace_layer, MLDecoder(**config)) + else: + raise AttributeError( + f"{replace_layer} does not have attribute the model.") diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/adamargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/adamargin.py new file mode 100644 index 000000000..98e963341 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/adamargin.py @@ -0,0 +1,113 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on AdaFace(https://github.com/mk-minchul/AdaFace) +# Paper: AdaFace: Quality Adaptive Margin for Face Recognition +from paddle.nn import Layer +import math +import paddle + + +def l2_norm(input, axis=1): + norm = paddle.norm(input, 2, axis, True) + output = paddle.divide(input, norm) + return output + + +class AdaMargin(Layer): + def __init__( + self, + embedding_size=512, + class_num=70722, + m=0.4, + h=0.333, + s=64., + t_alpha=1.0, ): + super(AdaMargin, self).__init__() + self.classnum = class_num + kernel_weight = paddle.uniform( + [embedding_size, class_num], min=-1, max=1) + kernel_weight_norm = paddle.norm( + kernel_weight, p=2, axis=0, keepdim=True) + kernel_weight_norm = paddle.where(kernel_weight_norm > 1e-5, + kernel_weight_norm, + paddle.ones_like(kernel_weight_norm)) + kernel_weight = kernel_weight / kernel_weight_norm + self.kernel = self.create_parameter( + [embedding_size, class_num], + attr=paddle.nn.initializer.Assign(kernel_weight)) + + # initial kernel + # self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5) + self.m = m + self.eps = 1e-3 + self.h = h + self.s = s + + # ema prep + self.t_alpha = t_alpha + self.register_buffer('t', paddle.zeros([1]), persistable=True) + self.register_buffer( + 'batch_mean', paddle.ones([1]) * 20, persistable=True) + self.register_buffer( + 'batch_std', paddle.ones([1]) * 100, persistable=True) + + def forward(self, embbedings, label): + if not self.training: + return embbedings + + norms = paddle.norm(embbedings, 2, 1, True) + embbedings = paddle.divide(embbedings, norms) + kernel_norm = l2_norm(self.kernel, axis=0) + cosine = paddle.mm(embbedings, kernel_norm) + cosine = paddle.clip(cosine, -1 + self.eps, + 1 - self.eps) # for stability + + safe_norms = paddle.clip(norms, min=0.001, max=100) # for stability + safe_norms = safe_norms.clone().detach() + + # update batchmean batchstd + with paddle.no_grad(): + mean = safe_norms.mean().detach() + std = safe_norms.std().detach() + self.batch_mean = mean * self.t_alpha + (1 - self.t_alpha + ) * self.batch_mean + self.batch_std = std * self.t_alpha + (1 - self.t_alpha + ) * self.batch_std + + margin_scaler = (safe_norms - self.batch_mean) / ( + self.batch_std + self.eps) # 66% between -1, 1 + margin_scaler = margin_scaler * self.h # 68% between -0.333 ,0.333 when h:0.333 + margin_scaler = paddle.clip(margin_scaler, -1, 1) + + # g_angular + m_arc = paddle.nn.functional.one_hot( + label.reshape([-1]), self.classnum) + g_angular = self.m * margin_scaler * -1 + m_arc = m_arc * g_angular + theta = paddle.acos(cosine) + theta_m = paddle.clip( + theta + m_arc, min=self.eps, max=math.pi - self.eps) + cosine = paddle.cos(theta_m) + + # g_additive + m_cos = paddle.nn.functional.one_hot( + label.reshape([-1]), self.classnum) + g_add = self.m + (self.m * margin_scaler) + m_cos = m_cos * g_add + cosine = cosine - m_cos + + # scale + scaled_cosine_m = cosine * self.s + return scaled_cosine_m diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/arcmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/arcmargin.py new file mode 100644 index 000000000..6c72a71a2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/arcmargin.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1801.07698 + +import paddle +import paddle.nn as nn +import math + + +class ArcMargin(nn.Layer): + def __init__(self, + embedding_size, + class_num, + margin=0.5, + scale=80.0, + easy_margin=False): + super().__init__() + self.embedding_size = embedding_size + self.class_num = class_num + self.margin = margin + self.scale = scale + self.easy_margin = easy_margin + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label=None): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6) + cos_m = math.cos(self.margin) + sin_m = math.sin(self.margin) + phi = cos * cos_m - sin * sin_m + + th = math.cos(self.margin) * (-1) + mm = math.sin(self.margin) * self.margin + if self.easy_margin: + phi = self._paddle_where_more_than(cos, 0, phi, cos) + else: + phi = self._paddle_where_more_than(cos, th, phi, cos - mm) + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, phi) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output + + def _paddle_where_more_than(self, target, limit, x, y): + mask = paddle.cast(x=(target > limit), dtype='float32') + output = paddle.multiply(mask, x) + paddle.multiply((1.0 - mask), y) + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/bnneck.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/bnneck.py new file mode 100644 index 000000000..e7abb8921 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/bnneck.py @@ -0,0 +1,56 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn + +from ..utils import get_param_attr_dict + + +class BNNeck(nn.Layer): + def __init__(self, num_features, **kwargs): + super().__init__() + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(value=0.0), + trainable=False) + + if 'weight_attr' in kwargs: + weight_attr = get_param_attr_dict(kwargs['weight_attr']) + + bias_attr = None + if 'bias_attr' in kwargs: + bias_attr = get_param_attr_dict(kwargs['bias_attr']) + + use_global_stats = None + if 'use_global_stats' in kwargs: + use_global_stats = get_param_attr_dict(kwargs['use_global_stats']) + + self.feat_bn = nn.BatchNorm1D( + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=weight_attr, + bias_attr=bias_attr, + use_global_stats=use_global_stats) + + self.flatten = nn.Flatten() + + def forward(self, x): + x = self.flatten(x) + x = self.feat_bn(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/circlemargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/circlemargin.py new file mode 100644 index 000000000..c04d6618b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/circlemargin.py @@ -0,0 +1,61 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2002.10857 + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class CircleMargin(nn.Layer): + def __init__(self, embedding_size, class_num, margin, scale): + super(CircleMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + feat_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, feat_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + logits = paddle.matmul(input, weight) + if not self.training or label is None: + return logits + + alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.) + alpha_n = paddle.clip(logits.detach() + self.margin, min=0.) + delta_p = 1 - self.margin + delta_n = self.margin + + m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1]) + + logits_p = alpha_p * (logits - delta_p) + logits_n = alpha_n * (logits - delta_n) + pre_logits = logits_p * m_hot + logits_n * (1 - m_hot) + pre_logits = self.scale * pre_logits + + return pre_logits diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/cosmargin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/cosmargin.py new file mode 100644 index 000000000..d420c0ace --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/cosmargin.py @@ -0,0 +1,57 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/1801.09414 + +import paddle +import math +import paddle.nn as nn + + +class CosMargin(paddle.nn.Layer): + def __init__(self, embedding_size, class_num, margin=0.35, scale=64.0): + super(CosMargin, self).__init__() + self.scale = scale + self.margin = margin + self.embedding_size = embedding_size + self.class_num = class_num + + self.weight = self.create_parameter( + shape=[self.embedding_size, self.class_num], + is_bias=False, + default_initializer=paddle.nn.initializer.XavierNormal()) + + def forward(self, input, label): + label.stop_gradient = True + + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + input = paddle.divide(input, input_norm) + + weight_norm = paddle.sqrt( + paddle.sum(paddle.square(self.weight), axis=0, keepdim=True)) + weight = paddle.divide(self.weight, weight_norm) + + cos = paddle.matmul(input, weight) + if not self.training or label is None: + return cos + + cos_m = cos - self.margin + + one_hot = paddle.nn.functional.one_hot(label, self.class_num) + one_hot = paddle.squeeze(one_hot, axis=[1]) + output = paddle.multiply(one_hot, cos_m) + paddle.multiply( + (1.0 - one_hot), cos) + output = output * self.scale + return output diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/fc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/fc.py new file mode 100644 index 000000000..622b0d37d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/fc.py @@ -0,0 +1,48 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + +from ..utils import get_param_attr_dict + + +class FC(nn.Layer): + def __init__(self, embedding_size, class_num, **kwargs): + super(FC, self).__init__() + self.embedding_size = embedding_size + self.class_num = class_num + + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierNormal()) + if 'weight_attr' in kwargs: + weight_attr = get_param_attr_dict(kwargs['weight_attr']) + + bias_attr = None + if 'bias_attr' in kwargs: + bias_attr = get_param_attr_dict(kwargs['bias_attr']) + + self.fc = nn.Linear( + self.embedding_size, + self.class_num, + weight_attr=weight_attr, + bias_attr=bias_attr) + + def forward(self, input, label=None): + out = self.fc(input) + return out diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/frfn_neck.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/frfn_neck.py new file mode 100644 index 000000000..43eadbab4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/frfn_neck.py @@ -0,0 +1,32 @@ +import paddle.nn as nn + + +class Normalize(nn.Layer): + """ Ln normalization copied from + https://github.com/salesforce/CoMatch + """ + + def __init__(self, power=2): + super(Normalize, self).__init__() + self.power = power + + def forward(self, x): + norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) + out = x.divide(norm) + return out + + +class FRFNNeck(nn.Layer): + def __init__(self, num_features, low_dim, **kwargs): + super(FRFNNeck, self).__init__() + self.l2norm = Normalize(2) + self.fc1 = nn.Linear(num_features, num_features) + self.relu_mlp = nn.LeakyReLU(negative_slope=0.1) + self.fc2 = nn.Linear(num_features, low_dim) + + def forward(self, x): + x = self.fc1(x) + x = self.relu_mlp(x) + x = self.fc2(x) + x = self.l2norm(x) + return x \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/identity_head.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/identity_head.py new file mode 100644 index 000000000..7d11e5742 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/identity_head.py @@ -0,0 +1,9 @@ +from paddle import nn + + +class IdentityHead(nn.Layer): + def __init__(self): + super(IdentityHead, self).__init__() + + def forward(self, x, label=None): + return {"features": x, "logits": None} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/metabnneck.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/metabnneck.py new file mode 100644 index 000000000..a27a7a852 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/metabnneck.py @@ -0,0 +1,122 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +from collections import defaultdict +import copy +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from ..utils import get_param_attr_dict + + +class MetaBN1D(nn.BatchNorm1D): + def forward(self, inputs, opt={}): + mode = opt.get("bn_mode", "general") if self.training else "eval" + if mode == "general": # update, but not apply running_mean/var + result = F.batch_norm(inputs, self._mean, self._variance, + self.weight, self.bias, self.training, + self._momentum, self._epsilon) + elif mode == "hold": # not update, not apply running_mean/var + result = F.batch_norm( + inputs, + paddle.mean( + inputs, axis=0), + paddle.var(inputs, axis=0), + self.weight, + self.bias, + self.training, + self._momentum, + self._epsilon) + elif mode == "eval": # fix and apply running_mean/var, + if self._mean is None: + result = F.batch_norm( + inputs, + paddle.mean( + inputs, axis=0), + paddle.var(inputs, axis=0), + self.weight, + self.bias, + True, + self._momentum, + self._epsilon) + else: + result = F.batch_norm(inputs, self._mean, self._variance, + self.weight, self.bias, False, + self._momentum, self._epsilon) + return result + + +class MetaBNNeck(nn.Layer): + def __init__(self, num_features, **kwargs): + super(MetaBNNeck, self).__init__() + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(value=0.0), + trainable=False) + + if 'weight_attr' in kwargs: + weight_attr = get_param_attr_dict(kwargs['weight_attr']) + + bias_attr = None + if 'bias_attr' in kwargs: + bias_attr = get_param_attr_dict(kwargs['bias_attr']) + + use_global_stats = None + if 'use_global_stats' in kwargs: + use_global_stats = get_param_attr_dict(kwargs['use_global_stats']) + + self.feat_bn = MetaBN1D( + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=weight_attr, + bias_attr=bias_attr, + use_global_stats=use_global_stats) + self.flatten = nn.Flatten() + self.opt = {} + + def forward(self, x): + x = self.flatten(x) + x = self.feat_bn(x, self.opt) + return x + + def reset_opt(self): + self.opt = defaultdict() + + def setup_opt(self, opt): + """ + Arg: + opt (dict): Optional setting to change the behavior of MetaBIN during training. + It includes three settings which are `enable_inside_update`, `lr_gate` and `bn_mode`. + """ + self.check_opt(opt) + self.opt = copy.deepcopy(opt) + + @classmethod + def check_opt(cls, opt): + assert isinstance(opt, dict), \ + TypeError('Got the wrong type of `opt`. Please use `dict` type.') + + if opt.get('enable_inside_update', False) and 'lr_gate' not in opt: + raise RuntimeError('Missing `lr_gate` in opt.') + + assert isinstance(opt.get('lr_gate', 1.0), float), \ + TypeError('Got the wrong type of `lr_gate`. Please use `float` type.') + assert isinstance(opt.get('enable_inside_update', True), bool), \ + TypeError('Got the wrong type of `enable_inside_update`. Please use `bool` type.') + assert opt.get('bn_mode', "general") in ["general", "hold", "eval"], \ + TypeError('Got the wrong value of `bn_mode`.') diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/ml_decoder.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/ml_decoder.py new file mode 100644 index 000000000..7f24bac2f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/ml_decoder.py @@ -0,0 +1,124 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import XavierNormal, Constant, Normal + +xavier_normal_ = XavierNormal() +normal_ = Normal +zero_ = Constant(value=0.0) + + +class MLDecoder(nn.Layer): + """ + ML-Decoder is an attention-based classification head, + which introduced by Tal Ridnik et al. in https://arxiv.org/pdf/2111.12933.pdf. + """ + + def __init__(self, + class_num=80, + in_channels=2048, + query_num=80, + embed_dim=768, + depth=1, + num_heads=8, + mlp_hidden_dim=2048, + dropout=0.1, + activation="relu", + freeze_query_embed=True, + remove_self_attn=True): + super().__init__() + self.class_num = class_num + self.in_channels = in_channels + + # 1 <= query_num <= class_num + query_num = min(max(query_num, 1), class_num) + + self.input_proj = nn.Conv2D( + in_channels=in_channels, + out_channels=embed_dim, + kernel_size=1, + stride=1) + + self.query_pos_embed = nn.Embedding( + num_embeddings=query_num, + embedding_dim=embed_dim) + if freeze_query_embed: + self.query_pos_embed.weight.stop_gradient = True + + decoder_layer = nn.TransformerDecoderLayer( + d_model=embed_dim, + nhead=num_heads, + dim_feedforward=mlp_hidden_dim, + dropout=dropout, + activation=activation, + attn_dropout=dropout, + act_dropout=dropout) + if remove_self_attn: + del decoder_layer.self_attn + decoder_layer.self_attn = self.self_attn_identity + self.decoder = nn.TransformerDecoder( + decoder_layer=decoder_layer, + num_layers=depth) + + group_factor = math.ceil(class_num / query_num) + self.group_conv = nn.Conv2D( + in_channels=query_num * embed_dim, + out_channels=query_num * group_factor, + kernel_size=1, + stride=1, + groups=query_num) + + self._init_weights() + + def _init_weights(self): + normal_(self.query_pos_embed.weight) + xavier_normal_(self.group_conv.weight) + zero_(self.group_conv.bias) + + @staticmethod + def self_attn_identity(*args): + return args[0] + + def group_fc_pool(self, x): + x = x.flatten(1)[..., None, None] + x = self.group_conv(x) + x = x.flatten(1)[:, :self.class_num] + return x + + def forward(self, x): + if x.ndim == 2: + assert x.shape[1] % self.in_channels == 0, "Wrong `in_channels` value!!!" + x = x.reshape([x.shape[0], self.in_channels, -1, 1]) + elif x.ndim == 3: + assert x.shape[1] == self.in_channels, "Wrong input shape!!!" + x = x.unsqueeze(-1) + else: + assert x.ndim == 4 and x.shape[1] == self.in_channels, "Wrong input shape!!!" + + feat_proj = F.relu(self.input_proj(x)) + feat_flatten = feat_proj.flatten(2).transpose([0, 2, 1]) + + query_pos_embed = self.query_pos_embed.weight[None].tile([x.shape[0], 1, 1]) + out_embed = self.decoder(query_pos_embed, feat_flatten) + + logit = self.group_fc_pool(out_embed) + return logit diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/vehicle_neck.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/vehicle_neck.py new file mode 100644 index 000000000..05f4e333f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/gears/vehicle_neck.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn + + +class VehicleNeck(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format='NCHW'): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=weight_attr, + data_format=data_format) + self.flatten = nn.Flatten() + + def forward(self, x): + x = self.conv(x) + x = self.flatten(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/__init__.py new file mode 100644 index 000000000..734bfefad --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .prune import prune_model +from .quant import quantize_model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/prune.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/prune.py new file mode 100644 index 000000000..59cd411a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/prune.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ...utils import logger + + +def prune_model(config, model): + if config.get("Slim", False) and config["Slim"].get("prune", False): + import paddleslim + prune_method_name = config["Slim"]["prune"]["name"].lower() + assert prune_method_name in [ + "fpgm", "l1_norm" + ], "The prune methods only support 'fpgm' and 'l1_norm'" + if prune_method_name == "fpgm": + model.pruner = paddleslim.dygraph.FPGMFilterPruner( + model, [1] + config["Global"]["image_shape"]) + else: + model.pruner = paddleslim.dygraph.L1NormFilterPruner( + model, [1] + config["Global"]["image_shape"]) + + # prune model + _prune_model(config, model) + else: + model.pruner = None + + +def _prune_model(config, model): + from paddleslim.analysis import dygraph_flops as flops + logger.info("FLOPs before pruning: {}GFLOPs".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9)) + model.eval() + + params = [] + for sublayer in model.sublayers(): + for param in sublayer.parameters(include_sublayers=False): + if isinstance(sublayer, paddle.nn.Conv2D): + params.append(param.name) + ratios = {} + for param in params: + ratios[param] = config["Slim"]["prune"]["pruned_ratio"] + plan = model.pruner.prune_vars(ratios, [0]) + + logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format( + flops(model, [1] + config["Global"]["image_shape"]) / 1e9, + plan.pruned_flops)) + + for param in model.parameters(): + if "conv2d" in param.name: + logger.info("{}\t{}".format(param.name, param.shape)) + + model.train() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/quant.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/quant.py new file mode 100644 index 000000000..3e31d9d53 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/slim/quant.py @@ -0,0 +1,63 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import paddle +from ...utils import logger + +QUANT_CONFIG = { + # weight preprocess type, default is None and no preprocessing is performed. + 'weight_preprocess_type': None, + # activation preprocess type, default is None and no preprocessing is performed. + 'activation_preprocess_type': None, + # weight quantize type, default is 'channel_wise_abs_max' + 'weight_quantize_type': 'channel_wise_abs_max', + # activation quantize type, default is 'moving_average_abs_max' + 'activation_quantize_type': 'moving_average_abs_max', + # weight quantize bit num, default is 8 + 'weight_bits': 8, + # activation quantize bit num, default is 8 + 'activation_bits': 8, + # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8' + 'dtype': 'int8', + # window size for 'range_abs_max' quantization. default is 10000 + 'window_size': 10000, + # The decay coefficient of moving average, default is 0.9 + 'moving_rate': 0.9, + # for dygraph quantization, layers of type in quantizable_layer_type will be quantized + 'quantizable_layer_type': ['Conv2D', 'Linear'], +} + + +def quantize_model(config, model, mode="train"): + if config.get("Slim", False) and config["Slim"].get("quant", False): + from paddleslim.dygraph.quant import QAT + assert config["Slim"]["quant"]["name"].lower( + ) == 'pact', 'Only PACT quantization method is supported now' + QUANT_CONFIG["activation_preprocess_type"] = "PACT" + if mode in ["infer", "export"]: + QUANT_CONFIG['activation_preprocess_type'] = None + + # for re-parameterization nets, convert to reparameterized model first + for layer in model.sublayers(): + if hasattr(layer, "re_parameterize"): + layer.re_parameterize() + + model.quanter = QAT(config=QUANT_CONFIG) + model.quanter.quantize(model) + logger.info("QAT model summary:") + paddle.summary(model, (1, 3, 224, 224)) + else: + model.quanter = None + return diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/utils.py new file mode 100644 index 000000000..785b7fbbe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/arch/utils.py @@ -0,0 +1,99 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import types +import paddle +from difflib import SequenceMatcher + +from . import backbone +from typing import Any, Dict, Union + + +def get_architectures(): + """ + get all of model architectures + """ + names = [] + for k, v in backbone.__dict__.items(): + if isinstance(v, (types.FunctionType, six.class_types)): + names.append(k) + return names + + +def get_blacklist_model_in_static_mode(): + from ppcls.arch.backbone import distilled_vision_transformer + from ppcls.arch.backbone import vision_transformer + blacklist = distilled_vision_transformer.__all__ + vision_transformer.__all__ + return blacklist + + +def similar_architectures(name='', names=[], thresh=0.1, topk=10): + """ + inferred similar architectures + """ + scores = [] + for idx, n in enumerate(names): + if n.startswith('__'): + continue + score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio() + if score > thresh: + scores.append((idx, score)) + scores.sort(key=lambda x: x[1], reverse=True) + similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]] + return similar_names + + +def get_param_attr_dict(ParamAttr_config: Union[None, bool, Dict[str, Dict]] + ) -> Union[None, bool, paddle.ParamAttr]: + """parse ParamAttr from an dict + + Args: + ParamAttr_config (Union[None, bool, Dict[str, Dict]]): ParamAttr configure + + Returns: + Union[None, bool, paddle.ParamAttr]: Generated ParamAttr + """ + if ParamAttr_config is None: + return None + if isinstance(ParamAttr_config, bool): + return ParamAttr_config + ParamAttr_dict = {} + if 'initializer' in ParamAttr_config: + initializer_cfg = ParamAttr_config.get('initializer') + if 'name' in initializer_cfg: + initializer_name = initializer_cfg.pop('name') + ParamAttr_dict['initializer'] = getattr( + paddle.nn.initializer, initializer_name)(**initializer_cfg) + else: + raise ValueError(f"'name' must specified in initializer_cfg") + if 'learning_rate' in ParamAttr_config: + # NOTE: only support an single value now + learning_rate_value = ParamAttr_config.get('learning_rate') + if isinstance(learning_rate_value, (int, float)): + ParamAttr_dict['learning_rate'] = learning_rate_value + else: + raise ValueError( + f"learning_rate_value must be float or int, but got {type(learning_rate_value)}" + ) + if 'regularizer' in ParamAttr_config: + regularizer_cfg = ParamAttr_config.get('regularizer') + if 'name' in regularizer_cfg: + # L1Decay or L2Decay + regularizer_name = regularizer_cfg.pop('name') + ParamAttr_dict['regularizer'] = getattr( + paddle.regularizer, regularizer_name)(**regularizer_cfg) + else: + raise ValueError(f"'name' must specified in regularizer_cfg") + return paddle.ParamAttr(**ParamAttr_dict) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_pedestrian_attribute.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_pedestrian_attribute.yaml new file mode 100644 index 000000000..0aa50f88d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_pedestrian_attribute.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 26 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pedestrian_attribute/data" + cls_label_path: "dataset/pedestrian_attribute/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.8 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pedestrian_attribute/data" + cls_label_path: "dataset/pedestrian_attribute/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/pedestrian_attribute_label_list.txt + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_vehicle_attribute.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_vehicle_attribute.yaml new file mode 100644 index 000000000..c9a902562 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/PPLCNet_x1_0_vehicle_attribute.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + class_num: 19 + use_ssld: True + lr_mult_list: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/vehicle_attribute_label_list.txt + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/StrongBaselineAttr.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/StrongBaselineAttr.yaml new file mode 100644 index 000000000..2324015d6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Attr/StrongBaselineAttr.yaml @@ -0,0 +1,113 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "ResNet50" + pretrained: True + class_num: 26 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [12, 18, 24, 28] + values: [0.0001, 0.00001, 0.000001, 0.0000001] + regularizer: + name: 'L2' + coeff: 0.0005 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/attribute/data/" + cls_label_path: "dataset/attribute/trainval.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/attribute/data/" + cls_label_path: "dataset/attribute/test.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_base_patch16_224_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_base_patch16_224_finetune.yaml new file mode 100644 index 000000000..899deab20 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_base_patch16_224_finetune.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 20 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: cae_base_patch16_224 + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.1 + attn_drop_rate: 0.0 + + use_mean_pooling: True + init_scale: 0.001 + use_rel_pos_bias: True + use_abs_pos_emb: False + init_values: 0.1 + lin_probe: False + + sin_pos_emb: True + + enable_linear_eval: False + model_key: model|module|state_dict + model_ema: + enable_model_ema: False + model_ema_decay: 0.9999 + model_ema_force_cpu: False + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - SoftTargetCrossEntropy: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamWDL + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + layerwise_decay: 0.65 + lr: + name: Cosine + learning_rate: 0.001 + eta_min: 1e-6 + warmup_epoch: 10 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + batch_transform_ops: + - MixupCutmixHybrid: + mixup_alpha: 0.8 + cutmix_alpha: 1.0 + switch_prob: 0.5 + num_classes: 1000 + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandomResizedCrop: + size: 224 + - RandomHorizontalFlip: + prob: 0.5 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [ 0.5, 0.5, 0.5 ] + std: [ 0.5, 0.5, 0.5 ] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [ 0.5, 0.5, 0.5 ] + std: [ 0.5, 0.5, 0.5 ] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_large_patch16_224_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_large_patch16_224_finetune.yaml new file mode 100644 index 000000000..a7fbe9002 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CAE/cae_large_patch16_224_finetune.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 20 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: cae_large_patch16_224 + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.2 + attn_drop_rate: 0.0 + + use_mean_pooling: True + init_scale: 0.001 + use_rel_pos_bias: True + use_abs_pos_emb: False + init_values: 0.1 + lin_probe: False + + sin_pos_emb: True + + enable_linear_eval: False + model_key: model|module|state_dict + model_ema: + enable_model_ema: False + model_ema_decay: 0.9999 + model_ema_force_cpu: False + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - SoftTargetCrossEntropy: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamWDL + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + layerwise_decay: 0.75 + lr: + name: Cosine + learning_rate: 0.001 + eta_min: 1e-6 + warmup_epoch: 10 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + batch_transform_ops: + - MixupCutmixHybrid: + mixup_alpha: 0.8 + cutmix_alpha: 1.0 + switch_prob: 0.5 + num_classes: 1000 + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandomResizedCrop: + size: 224 + - RandomHorizontalFlip: + prob: 0.5 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [ 0.5, 0.5, 0.5 ] + std: [ 0.5, 0.5, 0.5 ] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [ 0.5, 0.5, 0.5 ] + std: [ 0.5, 0.5, 0.5 ] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml new file mode 100644 index 000000000..aaddd1b15 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_base_patch16_224_finetune.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O2 + use_fp16_test: True + +# model architecture +Arch: + name: CLIP_vit_base_patch16_224 + class_num: 1000 + return_embed: False + pretrained: True + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamWDL + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + layerwise_decay: 0.6 + filter_bias_and_bn: True + lr: + name: Cosine + learning_rate: 0.0003 + eta_min: 1e-6 + warmup_epoch: 10 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_large_patch14_224_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_large_patch14_224_finetune.yaml new file mode 100644 index 000000000..c9c236799 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/CLIP/CLIP_vit_large_patch14_224_finetune.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O2 + use_fp16_test: True + +# model architecture +Arch: + name: CLIP_vit_large_patch14_224 + class_num: 1000 + return_embed: False + pretrained: True + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamWDL + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + layerwise_decay: 0.6 + filter_bias_and_bn: True + lr: + name: Cosine + learning_rate: 0.0003 + eta_min: 1e-6 + warmup_epoch: 10 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Cartoonface/ResNet50_icartoon.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Cartoonface/ResNet50_icartoon.yaml new file mode 100644 index 000000000..3d1b99378 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Cartoonface/ResNet50_icartoon.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_mode: "retrieval" + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + image_shape: [3, 224, 224] + infer_imgs: + save_inference_dir: "./inference" + feature_normalize: True + +Arch: + name: "RecModel" + Backbone: + name: "ResNet50" + pretrained: True + BackboneStopLayer: + name: "flatten" + output_dim: 2048 + Head: + name: "FC" + class_num: 5013 + embedding_size: 2048 + # margin: 0.5 + # scale: 80 + infer_output_key: "features" + infer_add_softmax: "false" + +Loss: + Train: + - CELoss: + weight: 1.0 + # - TripletLoss: + # margin: 0.1 + # weight: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + +DataLoader: + Train: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + #num_instances: 2 + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + Gallery: + dataset: + name: ICartoonDataset + image_root: "./dataset/iCartoonFace" + cls_label_path: "./dataset/iCartoonFace/gallery.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - Recallk: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DCH.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DCH.yaml new file mode 100644 index 000000000..363d7b406 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DCH.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 15 + eval_during_train: True + eval_interval: 15 + epochs: 150 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + + #feature postprocess + feature_normalize: False + feature_binarize: "sign" + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + is_rec: True + + Backbone: + name: AlexNet + pretrained: True + class_num: 48 + +# loss function config for train/eval process +Loss: + Train: + - DCHLoss: + weight: 1.0 + gamma: 20.0 + _lambda: 0.1 + n_class: 10 + Eval: + - DCHLoss: + weight: 1.0 + gamma: 20.0 + _lambda: 0.1 + n_class: 10 + +Optimizer: + name: SGD + lr: + name: Piecewise + learning_rate: 0.005 + decay_epochs: [200] + values: [0.005, 0.0005] + regularizer: + name: 'L2' + coeff: 0.00001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - mAP: {} + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DSHSD.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DSHSD.yaml new file mode 100644 index 000000000..e5b45d64a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/DSHSD.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 15 + eval_during_train: True + eval_interval: 15 + epochs: 150 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + + #feature postprocess + feature_normalize: False + feature_binarize: "sign" + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + is_rec: True + + Backbone: + name: AlexNet + pretrained: True + class_num: 48 + Neck: + name: Tanh + Head: + name: FC + class_num: 10 + embedding_size: 48 + +# loss function config for train/eval process +Loss: + Train: + - DSHSDLoss: + weight: 1.0 + alpha: 0.05 + Eval: + - DSHSDLoss: + weight: 1.0 + alpha: 0.05 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + learning_rate: 0.00001 + decay_epochs: [200] + values: [0.00001, 0.000001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - mAP: {} + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/LCDSH.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/LCDSH.yaml new file mode 100644 index 000000000..5a3349d9d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/DeepHash/LCDSH.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 15 + eval_during_train: True + eval_interval: 15 + epochs: 150 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + + #feature postprocess + feature_normalize: False + feature_binarize: "sign" + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + is_rec: True + + Backbone: + name: AlexNet + pretrained: True + class_num: 48 + +# loss function config for train/eval process +Loss: + Train: + - LCDSHLoss: + weight: 1.0 + _lambda: 3 + n_class: 10 + Eval: + - LCDSHLoss: + weight: 1.0 + _lambda: 3 + n_class: 10 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + learning_rate: 0.00001 + decay_epochs: [200] + values: [0.00001, 0.000001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR10/ + cls_label_path: ./dataset/CIFAR10/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - mAP: {} + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_MobileFaceNet.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_MobileFaceNet.yaml new file mode 100644 index 000000000..e0045ac51 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_MobileFaceNet.yaml @@ -0,0 +1,128 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: output/MobileFaceNet + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 25 + print_batch_step: 20 + use_visualdl: False + eval_mode: face_recognition + retrieval_feature_from: backbone + flip_test: True + feature_normalize: False + re_ranking: False + use_dali: False + # used for static mode and model export + image_shape: [3, 112, 112] + save_inference_dir: ./inference + +AMP: + scale_loss: 27648 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: MobileFaceNet + Head: + name: ArcMargin + embedding_size: 128 + class_num: 93431 + margin: 0.5 + scale: 64 +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 1024 + eta_min: 1e-6 + warmup_epoch: 1 + warmup_start_lr: 0 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: dataset/MS1M_v3/ + cls_label_path: dataset/MS1M_v3/label.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: cv2 + - RandFlipImage: + flip_code: 1 + - ResizeImage: + size: [112, 112] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: FiveFaceEvalDataset + val_data_path: dataset/MS1M_v3/ + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: cv2 + - ResizeImage: + size: [112, 112] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - FaceAccOnFiveDatasets: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_ResNet50.yaml new file mode 100644 index 000000000..fcfd0cd87 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Face_Recognition/FaceRecognition_ArcFace_ResNet50.yaml @@ -0,0 +1,131 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: output/ResNet50_face + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 25 + print_batch_step: 20 + use_visualdl: False + eval_mode: face_recognition + retrieval_feature_from: backbone + flip_test: True + feature_normalize: False + re_ranking: False + use_dali: False + # used for static mode and model export + image_shape: [3, 112, 112] + save_inference_dir: ./inference + +AMP: + scale_loss: 27648.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: ResNet50 + max_pool: False + stride_list: [1, 2, 2, 2, 2] + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 93431 + margin: 0.5 + scale: 64 +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 1024 + eta_min: 1e-6 + warmup_epoch: 1 + warmup_start_lr: 0 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: dataset/MS1M_v3/ + cls_label_path: dataset/MS1M_v3/label.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: cv2 + - RandFlipImage: + flip_code: 1 + - ResizeImage: + size: [112, 112] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: FiveFaceEvalDataset + val_data_path: dataset/MS1M_v3/ + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: cv2 + - ResizeImage: + size: [112, 112] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - BestAccOnFiveDatasets: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/Gallery2FC_PPLCNet_x2_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/Gallery2FC_PPLCNet_x2_5.yaml new file mode 100644 index 000000000..fbaefdcf5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/Gallery2FC_PPLCNet_x2_5.yaml @@ -0,0 +1,51 @@ +# global configs +Global: + pretrained_model: ./pretrained/general_PPLCNet_x2_5_pretrained_v1.0_quant + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference/general_PPLCNet_x2_5_quant/inference + +# for quantizaiton or prune model +Slim: + ## for prune + quant: + name: pact + +# model architecture +Arch: + name: RecModel + + Backbone: + name: PPLCNet_x2_5 + pretrained: False + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# indexing engine config +IndexProcess: + image_root: "./drink_dataset_v1.0/gallery/" + data_file: "./drink_dataset_v1.0/gallery/drink_label.txt" + delimiter: "\t" + batch_size: 2 + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: '' + - ToCHWImage: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml new file mode 100644 index 000000000..70daa639b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: "L2" + coeff: 0.00001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_binary.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_binary.yaml new file mode 100644 index 000000000..728942fe3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_binary.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + + #feature postprocess + feature_normalize: False + feature_binarize: "sign" + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: PPLCNet_x2_5_Tanh + pretrained: True + use_ssld: True + class_num: 512 + Head: + name: FC + embedding_size: 512 + class_num: 185341 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/all_data + cls_label_path: ./dataset/all_data/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_dml.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_dml.yaml new file mode 100644 index 000000000..b6c45363b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_dml.yaml @@ -0,0 +1,188 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: true + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# model architecture +Arch: + name: "DistillationModel" + infer_output_key: features + infer_add_softmax: False + is_rec: True + infer_model_name: "Student" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + models: + - Teacher: + name: RecModel + infer_output_key: features + infer_add_softmax: False + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + - Student: + name: RecModel + infer_output_key: features + infer_add_softmax: False + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: "logits" + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: "logits" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_udml.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_udml.yaml new file mode 100644 index 000000000..bcaea03b8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_udml.yaml @@ -0,0 +1,193 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: true + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# model architecture +Arch: + name: "DistillationModel" + infer_output_key: features + infer_add_softmax: False + is_rec: True + infer_model_name: "Student" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + models: + - Teacher: + name: RecModel + infer_output_key: features + infer_add_softmax: False + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + - Student: + name: RecModel + infer_output_key: features + infer_add_softmax: False + Backbone: + name: PPLCNet_x2_5 + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: "logits" + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: "logits" + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: + weight: 1.0 + key: "backbone" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_base.yaml new file mode 100644 index 000000000..912c3cd39 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_base.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: CLIP_vit_base_patch16_224 + pretrained: True + return_embed: True + return_mean_embed: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 512 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 192613 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 1 + regularizer: + name: "L2" + coeff: 0.00002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data_v2.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - RandomRotation: + prob: 0.3 + degrees: 90 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/ppshitu_traindata/Aliproduct + cls_label_path: ./dataset/ppshitu_traindata/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/ppshitu_traindata/Aliproduct/ + cls_label_path: ./dataset/ppshitu_traindata/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_large.yaml new file mode 100644 index 000000000..f7e33b3f8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_CLIP_vit_large.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: CLIP_vit_large_patch14_224 + pretrained: True + return_embed: True + return_mean_embed: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 512 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 192613 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0025 + warmup_epoch: 1 + regularizer: + name: "L2" + coeff: 0.00002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data_v2.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - RandomRotation: + prob: 0.3 + degrees: 90 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/ppshitu_traindata/Aliproduct + cls_label_path: ./dataset/ppshitu_traindata/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/ppshitu_traindata/Aliproduct/ + cls_label_path: ./dataset/ppshitu_traindata/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml new file mode 100644 index 000000000..61eadb48a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/GeneralRecognitionV2/GeneralRecognitionV2_PPLCNetV2_base.yaml @@ -0,0 +1,209 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 20 + use_visualdl: False + eval_mode: retrieval + retrieval_feature_from: features # 'backbone' or 'features' + re_ranking: False + use_dali: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: PPLCNetV2_base_ShiTu + pretrained: True + use_ssld: True + class_expand: &feat_dim 512 + BackboneStopLayer: + name: flatten + Neck: + name: BNNeck + num_features: *feat_dim + weight_attr: + initializer: + name: Constant + value: 1.0 + bias_attr: + initializer: + name: Constant + value: 0.0 + learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero + Head: + name: FC + embedding_size: *feat_dim + class_num: 192612 + weight_attr: + initializer: + name: Normal + std: 0.001 + bias_attr: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + - TripletAngularMarginLoss: + weight: 1.0 + feature_from: features + margin: 0.5 + reduction: mean + add_absolute: True + absolute_loss_weight: 0.1 + normalize_feature: True + ap_value: 0.8 + an_value: 0.4 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.06 # for 8gpu x 256bs + warmup_epoch: 5 + regularizer: + name: L2 + coeff: 0.00001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data_v2.txt + relabel: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + return_numpy: False + interpolation: bilinear + backend: cv2 + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + backend: cv2 + - RandCropImageV2: + size: [224, 224] + - RandomRotation: + prob: 0.5 + degrees: 90 + interpolation: bilinear + - ResizeImage: + size: [224, 224] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: hwc + sampler: + name: PKSampler + batch_size: 256 + sample_per_id: 4 + drop_last: False + shuffle: True + sample_method: "id_avg_prob" + id_list: [50030, 80700, 92019, 96015] # be careful when set relabel=True + ratio: [4, 4] + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + return_numpy: False + interpolation: bilinear + backend: cv2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSPNet/CSPDarkNet53.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSPNet/CSPDarkNet53.yaml new file mode 100644 index 000000000..29ca02e25 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSPNet/CSPDarkNet53.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSPDarkNet53 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml new file mode 100644 index 000000000..5f116f11a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_base_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2.5e-4 + eta_min: 2.5e-6 + warmup_epoch: 20 + warmup_start_lr: 2.5e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml new file mode 100644 index 000000000..d845d8f4d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_base_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1.25e-4 + eta_min: 1.25e-6 + warmup_epoch: 20 + warmup_start_lr: 1.25e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml new file mode 100644 index 000000000..9cadcc901 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_large_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2.5e-4 + eta_min: 2.5e-6 + warmup_epoch: 20 + warmup_start_lr: 2.5e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml new file mode 100644 index 000000000..1e01bb0bb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_large_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 6.25e-5 + eta_min: 6.25e-7 + warmup_epoch: 20 + warmup_start_lr: 6.25e-8 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 8 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml new file mode 100644 index 000000000..2c182bb53 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_small_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 5e-4 + eta_min: 5e-6 + warmup_epoch: 20 + warmup_start_lr: 5e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml new file mode 100644 index 000000000..fa8986f2c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CSWinTransformer_tiny_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_224.yaml new file mode 100644 index 000000000..591afe390 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_224.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_base_224 + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_384.yaml new file mode 100644 index 000000000..0adec4be5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_base_384.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_base_384 + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_224.yaml new file mode 100644 index 000000000..6f5b23e10 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_224.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_large_224 + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_384.yaml new file mode 100644 index 000000000..63a4aa1a0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_large_384.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_large_384 + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_small.yaml new file mode 100644 index 000000000..d6c0551df --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_small.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_small + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml new file mode 100644 index 000000000..4d705857b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ConvNeXt_tiny + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_224.yaml new file mode 100644 index 000000000..b211c0cd1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_224.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 2 # for 8 cards + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CvT_13_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 # lr 2e-3 for total_batch_size 2048 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_384.yaml new file mode 100644 index 000000000..14e2b9d9e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_13_384.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 2 # for 8 cards + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CvT_13_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 # lr 2e-3 for total_batch_size 2048 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_224.yaml new file mode 100644 index 000000000..8274a582e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_224.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 2 # for 8 cards + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CvT_21_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: pos_embed cls_token .bias + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 # lr 1e-3 for total_batch_size 1024 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: RASampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_384.yaml new file mode 100644 index 000000000..4aa2e27ca --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_21_384.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 2 # for 8 cards + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CvT_21_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: pos_embed cls_token .bias + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 # lr 1e-3 for total_batch_size 1024 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_W24_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_W24_384.yaml new file mode 100644 index 000000000..18b6d7ff6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/CvT/CvT_W24_384.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 2 # for 8 cards + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: CvT_W24_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: pos_embed cls_token .bias + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 # lr 1e-3 for total_batch_size 1024 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + size: 384 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102.yaml new file mode 100644 index 000000000..c87635763 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA102 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x.yaml new file mode 100644 index 000000000..580c8ce4c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA102x + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x2.yaml new file mode 100644 index 000000000..0691a2afc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA102x2.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA102x2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA169.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA169.yaml new file mode 100644 index 000000000..7731d361b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA169.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA169 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA34.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA34.yaml new file mode 100644 index 000000000..555716033 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA34.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA34 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46_c.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46_c.yaml new file mode 100644 index 000000000..1fef5b861 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46_c.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA46_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46x_c.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46x_c.yaml new file mode 100644 index 000000000..a88a940d2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA46x_c.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA46x_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60.yaml new file mode 100644 index 000000000..0a82f7d2c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA60 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x.yaml new file mode 100644 index 000000000..19dc8ef32 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA60x + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x_c.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x_c.yaml new file mode 100644 index 000000000..ebf247840 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DLA/DLA60x_c.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DLA60x_c + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN107.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN107.yaml new file mode 100644 index 000000000..a18c341f6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN107.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DPN107 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN131.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN131.yaml new file mode 100644 index 000000000..68e9479ff --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN131.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DPN131 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN68.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN68.yaml new file mode 100644 index 000000000..33a0e416e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN68.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DPN68 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN92.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN92.yaml new file mode 100644 index 000000000..583079845 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN92.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DPN92 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN98.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN98.yaml new file mode 100644 index 000000000..f3bb99423 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DPN/DPN98.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DPN98 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_base.yaml new file mode 100644 index 000000000..7d4ffcb10 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_base.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DSNet_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_small.yaml new file mode 100644 index 000000000..e006104c9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_small.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DSNet_small + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_tiny.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_tiny.yaml new file mode 100644 index 000000000..884620582 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DSNet/DSNet_tiny.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DSNet_tiny + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DarkNet/DarkNet53.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DarkNet/DarkNet53.yaml new file mode 100644 index 000000000..ccf05adaa --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DarkNet/DarkNet53.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DarkNet53 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml new file mode 100644 index 000000000..127cf91e8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml new file mode 100644 index 000000000..542ce15f8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Baseline.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml new file mode 100644 index 000000000..21ec5f88b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutmix.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - CutmixOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml new file mode 100644 index 000000000..5f9286b82 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - Cutout: + n_holes: 1 + length: 112 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml new file mode 100644 index 000000000..8e14546a3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_GridMask.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - GridMask: + d1: 96 + d2: 224 + rotate: 1 + ratio: 0.5 + mode: 0 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml new file mode 100644 index 000000000..b8bdeba2c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_HideAndSeek.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - HideAndSeek: + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml new file mode 100644 index 000000000..176acaf3e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_Mixup.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml new file mode 100644 index 000000000..c1f8c14f6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandAugment.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - RandAugment: + num_layers: 2 + magnitude: 5 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml new file mode 100644 index 000000000..1788e529d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DataAugment/ResNet50_RandomErasing.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml new file mode 100644 index 000000000..39087d340 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_base_distilled_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml new file mode 100644 index 000000000..bf0ac2b7a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_base_distilled_patch16_384 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml new file mode 100644 index 000000000..cf3f1dc5e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_base_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml new file mode 100644 index 000000000..8f4a9a9d2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_base_patch16_384 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml new file mode 100644 index 000000000..0db9532e3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_small_distilled_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml new file mode 100644 index 000000000..5e91973b1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_small_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml new file mode 100644 index 000000000..3068ada56 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_tiny_distilled_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml new file mode 100644 index 000000000..3cd8cd06c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DeiT_tiny_patch16_224 + drop_path_rate : 0.1 + drop_rate : 0.0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token pos_embed dist_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 2e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet121.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet121.yaml new file mode 100644 index 000000000..13e57d675 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet121.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DenseNet121 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet161.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet161.yaml new file mode 100644 index 000000000..a3fa6b1cf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet161.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DenseNet161 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet169.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet169.yaml new file mode 100644 index 000000000..5eb27d4dd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet169.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DenseNet169 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet201.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet201.yaml new file mode 100644 index 000000000..6b7aad5c1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet201.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DenseNet201 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet264.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet264.yaml new file mode 100644 index 000000000..046e3a83c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/DenseNet/DenseNet264.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: DenseNet264 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x1_0_ssld.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x1_0_ssld.yaml new file mode 100644 index 000000000..a9f28674e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x1_0_ssld.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_r50_vd_distill + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: True + +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet50_vd + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: False + dropout_prob: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 # for bs 1024 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml new file mode 100644 index 000000000..4fb768098 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_lcnet_x2_5_dml + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + infer_model_name: "Student" + models: + - Teacher: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml new file mode 100644 index 000000000..e778cb8d8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml @@ -0,0 +1,160 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_r50_vd_distill + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: True + +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet50_vd + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml new file mode 100644 index 000000000..17e020e9a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_lcnet_x2_5_udml + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + infer_model_name: "Student" + models: + - Teacher: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: logits + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: logits + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: + weight: 1.0 + key: "blocks5" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml new file mode 100644 index 000000000..7cc99b64a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_dali: false + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: MobileNetV3_large_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + dropout_prob: null + - Student: + name: MobileNetV3_small_x1_0 + class_num: *class_num + pretrained: False + dropout_prob: null + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml new file mode 100644 index 000000000..acc0aa380 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_dali: false + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: Res2Net200_vd_26w_4s + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPHGNet_base + class_num: *class_num + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_afd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_afd.yaml new file mode 100644 index 000000000..6816cd257 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_afd.yaml @@ -0,0 +1,211 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + models: + - Teacher: + name: AttentionModel + pretrained_list: + freeze_params_list: + - True + - False + models: + - ResNet34: + name: ResNet34 + pretrained: True + return_patterns: &t_keys ["blocks[0]", "blocks[1]", "blocks[2]", "blocks[3]", + "blocks[4]", "blocks[5]", "blocks[6]", "blocks[7]", + "blocks[8]", "blocks[9]", "blocks[10]", "blocks[11]", + "blocks[12]", "blocks[13]", "blocks[14]", "blocks[15]"] + - LinearTransformTeacher: + name: LinearTransformTeacher + qk_dim: 128 + keys: *t_keys + t_shapes: &t_shapes [[64, 56, 56], [64, 56, 56], [64, 56, 56], [128, 28, 28], + [128, 28, 28], [128, 28, 28], [128, 28, 28], [256, 14, 14], + [256, 14, 14], [256, 14, 14], [256, 14, 14], [256, 14, 14], + [256, 14, 14], [512, 7, 7], [512, 7, 7], [512, 7, 7]] + + - Student: + name: AttentionModel + pretrained_list: + freeze_params_list: + - False + - False + models: + - ResNet18: + name: ResNet18 + pretrained: False + return_patterns: &s_keys ["blocks[0]", "blocks[1]", "blocks[2]", "blocks[3]", + "blocks[4]", "blocks[5]", "blocks[6]", "blocks[7]"] + - LinearTransformStudent: + name: LinearTransformStudent + qk_dim: 128 + keys: *s_keys + s_shapes: &s_shapes [[64, 56, 56], [64, 56, 56], [128, 28, 28], [128, 28, 28], + [256, 14, 14], [256, 14, 14], [512, 7, 7], [512, 7, 7]] + t_shapes: *t_shapes + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + key: logits + - DistillationKLDivLoss: + weight: 0.9 + model_name_pairs: [["Student", "Teacher"]] + temperature: 4 + key: logits + - AFDLoss: + weight: 50.0 + model_name_pair: ["Student", "Teacher"] + student_keys: ["bilinear_key", "value"] + teacher_keys: ["query", "value"] + s_shapes: *s_shapes + t_shapes: *t_shapes + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: MultiStepDecay + learning_rate: 0.1 + milestones: [30, 60, 90] + step_each_epoch: 1 + gamma: 0.1 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml new file mode 100644 index 000000000..a689f6172 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/r34_r18_dist + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet34 + class_num: *class_num + pretrained: True + - Student: + name: ResNet18 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationDISTLoss: + weight: 2.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml new file mode 100644 index 000000000..19c684535 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml @@ -0,0 +1,166 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet34 + pretrained: True + + - Student: + name: ResNet18 + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationDKDLoss: + weight: 1.0 + model_name_pairs: [["Student", "Teacher"]] + temperature: 1 + alpha: 1.0 + beta: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: MultiStepDecay + learning_rate: 0.2 + milestones: [30, 60, 90] + step_each_epoch: 1 + gamma: 0.1 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_mgd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_mgd.yaml new file mode 100644 index 000000000..d501f3dd5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_mgd.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/r34_r18_mgd + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet34 + class_num: *class_num + pretrained: True + return_patterns: &t_stages ["blocks[2]", "blocks[6]", "blocks[12]", "blocks[15]"] + - Student: + name: ResNet18 + class_num: *class_num + pretrained: False + return_patterns: &s_stages ["blocks[1]", "blocks[3]", "blocks[5]", "blocks[7]"] + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationPairLoss: + weight: 1.0 + base_loss_name: MGDLoss + model_name_pairs: [["Student", "Teacher"]] + s_key: "blocks[7]" + t_key: "blocks[15]" + name: "loss_mgd" + student_channels: 512 + teacher_channels: 512 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_pefd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_pefd.yaml new file mode 100644 index 000000000..1c87f03ec --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_pefd.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/r34_r18_pefd + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet34 + class_num: *class_num + pretrained: True + return_patterns: &t_stages ["avg_pool"] + - Student: + name: ResNet18 + class_num: *class_num + pretrained: False + return_patterns: &s_stages ["avg_pool"] + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationPairLoss: + weight: 25.0 + base_loss_name: PEFDLoss + model_name_pairs: [["Student", "Teacher"]] + s_key: "avg_pool" + t_key: "avg_pool" + name: "loss_pefd" + student_channel: 512 + teacher_channel: 512 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_skd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_skd.yaml new file mode 100644 index 000000000..d1100a1b7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_skd.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet34 + pretrained: True + + - Student: + name: ResNet18 + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationSKDLoss: + weight: 1.0 + model_name_pairs: [["Student", "Teacher"]] + temperature: 1.0 + multiplier: 2.0 + alpha: 0.9 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: MultiStepDecay + learning_rate: 0.1 + milestones: [30, 60, 90] + step_each_epoch: 1 + gamma: 0.1 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_wsl.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_wsl.yaml new file mode 100644 index 000000000..adabc5ae0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Distillation/resnet34_distill_resnet18_wsl.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/r34_r18_wsl + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet34 + pretrained: True + + - Student: + name: ResNet18 + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationWSLLoss: + weight: 2.5 + model_name_pairs: [["Student", "Teacher"]] + temperature: 2 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: MultiStepDecay + learning_rate: 0.1 + milestones: [30, 60, 90] + step_each_epoch: 1 + gamma: 0.1 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_25.yaml new file mode 100644 index 000000000..a1ae01f2e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_25.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ESNet_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_5.yaml new file mode 100644 index 000000000..bf806d9cd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_5.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ESNet_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_75.yaml new file mode 100644 index 000000000..566560795 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x0_75.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ESNet_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x1_0.yaml new file mode 100644 index 000000000..e0b32d5c5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ESNet/ESNet_x1_0.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ESNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB0.yaml new file mode 100644 index 000000000..8f8ce1825 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB0.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB1.yaml new file mode 100644 index 000000000..33cdcff94 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB1.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 240, 240] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 240 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 272 + - CropImage: + size: 240 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 272 + - CropImage: + size: 240 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB2.yaml new file mode 100644 index 000000000..3d2e15f85 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB2.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 260, 260] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 260 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 260 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 260 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB3.yaml new file mode 100644 index 000000000..4dd71da17 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB3.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 300, 300] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 300 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 332 + - CropImage: + size: 300 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 332 + - CropImage: + size: 300 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB4.yaml new file mode 100644 index 000000000..a123c1267 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB4.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 380, 380] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 380 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 412 + - CropImage: + size: 380 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 412 + - CropImage: + size: 380 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB5.yaml new file mode 100644 index 000000000..8c163d64f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB5.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 456, 456] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 456 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 488 + - CropImage: + size: 456 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 488 + - CropImage: + size: 456 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB6.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB6.yaml new file mode 100644 index 000000000..9897673a3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB6.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 528, 528] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB6 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 528 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 560 + - CropImage: + size: 528 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 560 + - CropImage: + size: 528 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB7.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB7.yaml new file mode 100644 index 000000000..8f02cd278 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNet/EfficientNetB7.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 600, 600] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: EfficientNetB7 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + lr: + name: Cosine + learning_rate: 0.032 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 600 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 632 + - CropImage: + size: 600 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 632 + - CropImage: + size: 600 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNetV2/EfficientNetV2_S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNetV2/EfficientNetV2_S.yaml new file mode 100644 index 000000000..48257bc13 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/EfficientNetV2/EfficientNetV2_S.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 350 + print_batch_step: 20 + use_visualdl: False + train_mode: progressive # progressive training + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +EMA: + decay: 0.9999 + +# model architecture +Arch: + name: EfficientNetV2_S + class_num: 1000 + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 # 8gpux128bs + warmup_epoch: 5 + regularizer: + name: L2 + coeff: 0.00001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 171 + progress_size: [171, 214, 257, 300] + scale: [0.05, 1.0] + - RandFlipImage: + flip_code: 1 + - RandAugmentV2: + num_layers: 2 + magnitude: 5.0 + progress_magnitude: [5.0, 8.3333333333, 11.66666666667, 15.0] + - NormalizeImage: + scale: 1.0 + mean: [128.0, 128.0, 128.0] + std: [128.0, 128.0, 128.0] + order: "" + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - CropImageAtRatio: + size: 384 + pad: 32 + interpolation: bilinear + - NormalizeImage: + scale: 1.0 + mean: [128.0, 128.0, 128.0] + std: [128.0, 128.0, 128.0] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - CropImageAtRatio: + size: 384 + pad: 32 + interpolation: bilinear + - NormalizeImage: + scale: 1.0 + mean: [128.0, 128.0, 128.0] + std: [128.0, 128.0, 128.0] + order: "" + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_L.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_L.yaml new file mode 100644 index 000000000..e8182839e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_L.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_L + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: 0.01 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.7 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_M.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_M.yaml new file mode 100644 index 000000000..a08f6005e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_M.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_M + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: 1 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.5 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_S.yaml new file mode 100644 index 000000000..5d43a911a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_S.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_S + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.03 + clip_grad: null + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.004 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.3 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: ##RandomResized + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T0.yaml new file mode 100644 index 000000000..6c3c0d66e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T0.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_T0 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.005 + clip_grad: null + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.004 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: null + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.05 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T1.yaml new file mode 100644 index 000000000..436f61eca --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T1.yaml @@ -0,0 +1,163 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_T1 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + clip_grad: null + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.004 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m3-mstd0.5-inc1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.05 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T2.yaml new file mode 100644 index 000000000..eba5d0d49 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/FasterNet/FasterNet_T2.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: False + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: FasterNet_T2 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.02 + clip_grad: null + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.004 + warmup_start_lr: 0.000001 + warmup_epoch: 20 + eta_min: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m5-mstd0.5-inc1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.1 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + to_np: False + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 248 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 1 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml new file mode 100644 index 000000000..fe56eb53d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: GhostNet_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml new file mode 100644 index 000000000..e063ec2b7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: GhostNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml new file mode 100644 index 000000000..40572d4ab --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: GhostNet_x1_3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet39_ds.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet39_ds.yaml new file mode 100644 index 000000000..d15ba0c75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet39_ds.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: HarDNet39_ds + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68.yaml new file mode 100644 index 000000000..f0d8d8e26 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: HarDNet68 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68_ds.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68_ds.yaml new file mode 100644 index 000000000..dc003a88f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet68_ds.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: HarDNet68_ds + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet85.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet85.yaml new file mode 100644 index 000000000..f69bc650c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/HarDNet/HarDNet85.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: HarDNet85 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/GoogLeNet.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/GoogLeNet.yaml new file mode 100644 index 000000000..6709433f9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/GoogLeNet.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: GoogLeNet + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - GoogLeNetLoss: + weight: 1.0 + Eval: + - GoogLeNetLoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - GoogLeNetTopkAcc: + topk: [1, 5] + Eval: + - GoogLeNetTopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV3.yaml new file mode 100644 index 000000000..fe6c66a34 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV3.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: InceptionV3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV4.yaml new file mode 100644 index 000000000..996049490 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Inception/InceptionV4.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: InceptionV4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128.yaml new file mode 100644 index 000000000..496155274 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: LeViT_128 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128S.yaml new file mode 100644 index 000000000..27798a673 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_128S.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: LeViT_128S + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_192.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_192.yaml new file mode 100644 index 000000000..7e045c65d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_192.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: LeViT_192 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_256.yaml new file mode 100644 index 000000000..2b764daed --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_256.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: LeViT_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_384.yaml new file mode 100644 index 000000000..6751e066c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/LeViT/LeViT_384.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: LeViT_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M0.yaml new file mode 100644 index 000000000..fe64441f8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M0.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MicroNet_M0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 # for total batch size 512 + regularizer: + name: 'L2' + coeff: 3e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 # for 4 gpus + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M1.yaml new file mode 100644 index 000000000..694793e2a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M1.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MicroNet_M1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 # for total batch size 512 + regularizer: + name: 'L2' + coeff: 3e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 # for 4 gpus + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M2.yaml new file mode 100644 index 000000000..379445787 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M2.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MicroNet_M2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 # for total batch size 512 + regularizer: + name: 'L2' + coeff: 3e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 # for 4 gpus + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M3.yaml new file mode 100644 index 000000000..7a41c05ba --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MicroNet/MicroNet_M3.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MicroNet_M3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 # for total batch size 512 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 4e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 224 + interpolation: bilinear + use_log_aspect: True + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 # for 4 gpus + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_L.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_L.yaml new file mode 100644 index 000000000..fe75f2cc8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_L.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MixNet_L + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_M.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_M.yaml new file mode 100644 index 000000000..62c039bda --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_M.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MixNet_M + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_S.yaml new file mode 100644 index 000000000..f56f40eb1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MixNet/MixNet_S.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MixNet_S + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNeXt/MobileNeXt_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNeXt/MobileNeXt_x1_0.yaml new file mode 100644 index 000000000..04772014a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNeXt/MobileNeXt_x1_0.yaml @@ -0,0 +1,160 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 50 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNeXt_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: True + no_weight_decay_name: .bias + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.1 # for total batch size 512 + eta_min: 1e-5 + warmup_epoch: 3 + warmup_start_lr: 1e-4 + by_epoch: True + regularizer: + name: 'L2' + coeff: 1e-4 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 224 + interpolation: random + backend: pil + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 # for 4 gpus + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1.yaml new file mode 100644 index 000000000..66fa53ec7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml new file mode 100644 index 000000000..364ed851a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV1_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml new file mode 100644 index 000000000..46fc98859 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV1_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml new file mode 100644 index 000000000..180e97c8c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV1_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2.yaml new file mode 100644 index 000000000..12ba90772 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml new file mode 100644 index 000000000..c8897d8fc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml new file mode 100644 index 000000000..d6c761ba4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml new file mode 100644 index 000000000..ac33c49d4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml new file mode 100644 index 000000000..98fc61db4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml new file mode 100644 index 000000000..1fd273469 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV2_x2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml new file mode 100644 index 000000000..e8c400d1d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_large_x0_35 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml new file mode 100644 index 000000000..b357cc1ae --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_large_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml new file mode 100644 index 000000000..b9bb092fa --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_large_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml new file mode 100644 index 000000000..e3b54a66b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml new file mode 100644 index 000000000..7eab8f904 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_large_x1_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..8957dc785 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml new file mode 100644 index 000000000..8d59196e0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml new file mode 100644 index 000000000..2ccb6faee --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml new file mode 100644 index 000000000..08fb96996 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml new file mode 100644 index 000000000..2880583b9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_ampo2_ultra.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: MobileNetV3_small_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 5.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 1024 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml new file mode 100644 index 000000000..f2df21425 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0_fp32_ultra.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 5.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 1024 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml new file mode 100644 index 000000000..b401bd168 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV3_small_x1_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_large.yaml new file mode 100644 index 000000000..a1aa14dd1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_large.yaml @@ -0,0 +1,181 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV4_conv_large + drop_rate: 0.2 + drop_path_rate: 0.35 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.2 + clip_grad: 5.0 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.003 ##null + eta_min: 1.0e-06 + warmup_epoch: 20 + warmup_start_lr: 0 + + +EMA: + decay: 0.9998 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m8-inc1-mstd1.0 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: # todo 确认是否需要 + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + backend: pil + interpolation: bicubic + resize_short: 448 + - CropImage: + size: 448 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 2 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_medium.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_medium.yaml new file mode 100644 index 000000000..fcfe07dcb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_medium.yaml @@ -0,0 +1,181 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 500 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 320, 320] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV4_conv_medium + drop_rate: 0.2 + drop_path_rate: 0.1 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + clip_grad: 5.0 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0 + warmup_epoch: 20 + warmup_start_lr: 0 + + +EMA: + decay: 0.9998 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m8-inc1-mstd1.0 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + backend: pil + interpolation: bicubic + resize_short: 320 + - CropImage: + size: 320 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_small.yaml new file mode 100644 index 000000000..b1a0ee229 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_conv_small.yaml @@ -0,0 +1,181 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 2400 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV4_conv_small + drop_rate: 0.25 + drop_path_rate: 0.03 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.06 + clip_grad: 5.0 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 ##null + eta_min: 0 + warmup_epoch: 5 + warmup_start_lr: 0 + + +EMA: + decay: 0.9998 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m8-inc1-mstd1.0 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + backend: pil + interpolation: bicubic + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_large.yaml new file mode 100644 index 000000000..da7da794a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_large.yaml @@ -0,0 +1,182 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: True + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV4_hybrid_large + drop_rate: 0.2 + drop_path_rate: 0.35 + use_fused_attn: False + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + clip_grad: 5.0 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.001 + eta_min: 1.0e-06 + warmup_epoch: 20 + warmup_start_lr: 0 + + +EMA: + decay: 0.9998 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-inc1-mstd1.0 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + backend: pil + interpolation: bicubic + resize_short: 448 + - CropImage: + size: 448 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 12 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 448 + - CropImage: + size: 448 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_medium.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_medium.yaml new file mode 100644 index 000000000..dbe7be411 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileNetV4/MobileNetV4_hybrid_medium.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 500 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: True + use_fp16_test: True + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileNetV4_hybrid_medium + drop_rate: 0.2 + drop_path_rate: 0.1 + use_fused_attn: False + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + clip_grad: 5.0 + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.001 + eta_min: 0 + warmup_epoch: 20 + warmup_start_lr: 0 + +EMA: + decay: 0.9998 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-inc1-mstd1.0 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: # todo 确认是否需要 + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + backend: pil + interpolation: bicubic + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 0 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_S.yaml new file mode 100644 index 000000000..bf39753e7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_S.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileViT_S + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 5 + warmup_start_lr: 0.0002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bilinear + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XS.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XS.yaml new file mode 100644 index 000000000..c4b6804e5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XS.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileViT_XS + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 5 + warmup_start_lr: 0.0002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bilinear + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XXS.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XXS.yaml new file mode 100644 index 000000000..a3611f4e3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViT/MobileViT_XXS.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: MobileViT_XXS + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 5 + warmup_start_lr: 0.0002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bilinear + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x0_5.yaml new file mode 100644 index 000000000..12891491a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x0_5.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV2_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.004 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.009 # for total batch size 1024 + eta_min: 0.0009 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_0.yaml new file mode 100644 index 000000000..aeee054da --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_0.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV2_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.013 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.0075 # for total batch size 1024 + eta_min: 0.00075 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_5.yaml new file mode 100644 index 000000000..198e4333f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x1_5.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV2_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.029 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.0035 # for total batch size 1024 + eta_min: 0.00035 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x2_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x2_0.yaml new file mode 100644 index 000000000..2739ea9d1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV2/MobileViTV2_x2_0.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV2_x2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 1024 + eta_min: 0.0002 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S.yaml new file mode 100644 index 000000000..ba49dbbc2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_S + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S_L2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S_L2.yaml new file mode 100644 index 000000000..4a9ba8b01 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_S_L2.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_S_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS.yaml new file mode 100644 index 000000000..3ac7264ac --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_XS + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS_L2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS_L2.yaml new file mode 100644 index 000000000..10b3c7f9e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XS_L2.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_XS_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS.yaml new file mode 100644 index 000000000..719a20068 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_XXS + class_num: 1000 + dropout: 0.05 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS_L2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS_L2.yaml new file mode 100644 index 000000000..ff84fadd7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_XXS_L2.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_XXS_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 384 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_5.yaml new file mode 100644 index 000000000..31eef0c35 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_5.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_x0_5 + class_num: 1000 + classifier_dropout: 0. + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 1020 by referring to official + eta_min: 0.0002 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_75.yaml new file mode 100644 index 000000000..eb4e3a3bd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x0_75.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_x0_75 + class_num: 1000 + classifier_dropout: 0. + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 1020 by referring to official + eta_min: 0.0002 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x1_0.yaml new file mode 100644 index 000000000..e6b437bb4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/MobileViTV3/MobileViTV3_x1_0.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 65536 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTV3_x1_0 + class_num: 1000 + classifier_dropout: 0. + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 0.002 # for total batch size 1020 by referring to official + eta_min: 0.0002 + warmup_epoch: 16 # 20000 iterations + warmup_start_lr: 1e-6 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - RandAugmentV3: + num_layers: 2 + interpolation: bicubic + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: const + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.25 + CutmixOperator: + alpha: 1.0 + prob: 0.25 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_224.yaml new file mode 100644 index 000000000..66317732e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_224.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_base_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_384.yaml new file mode 100644 index 000000000..d013e6470 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_base_384.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_base_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_224.yaml new file mode 100644 index 000000000..33b8dabfa --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_224.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_large_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_384.yaml new file mode 100644 index 000000000..a3eaf4c58 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_large_384.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_large_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_224.yaml new file mode 100644 index 000000000..d16151df2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_224.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_small_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_384.yaml new file mode 100644 index 000000000..daf727621 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/NextViT/NextViT_small_384.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: NextViT_small_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.1 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_base.yaml new file mode 100644 index 000000000..873b027a9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_base.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m15-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.4 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_small.yaml new file mode 100644 index 000000000..9cc9dd9cd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_small.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_small + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml new file mode 100644 index 000000000..77f1e7a5f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_tiny + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B0.yaml new file mode 100644 index 000000000..d5a7ab49e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B0.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B0 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B1.yaml new file mode 100644 index 000000000..bdae7decb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B1.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B1 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B2.yaml new file mode 100644 index 000000000..a30d1dd65 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B2.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B2 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B3.yaml new file mode 100644 index 000000000..536659111 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B3.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B3 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4.yaml new file mode 100644 index 000000000..167a48092 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B4 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage1.yaml new file mode 100644 index 000000000..c8cd5fcf7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage1.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_dali: false + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: PPHGNet_small + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPHGNetV2_B4 + class_num: *class_num + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/" + # ImageNet_5M label path, the training process does not use real labels. + cls_label_path: "./dataset/train_list_imagenet_5M.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage2.yaml new file mode 100644 index 000000000..074a77b42 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B4_ssld_stage2.yaml @@ -0,0 +1,173 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_dali: false + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: PPHGNet_small + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPHGNetV2_B4 + class_num: *class_num + pretrained: path/to/stage1_best_model_student + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # stage2 should reduce learning rate + learning_rate: 0.005 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + # ImageNet-1k label path, the training process does not use real labels + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B5.yaml new file mode 100644 index 000000000..57ca91199 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B5.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B5 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B6.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B6.yaml new file mode 100644 index 000000000..da9cb04bd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPHGNetV2/PPHGNetV2_B6.yaml @@ -0,0 +1,164 @@ +## Note: This config is only used for finetune training. The ImageNet metrics in PaddleClas are not trained through this config. +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B6 + class_num: 1000 + pretrained: True # ssld pretrained + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + # for global bs 1024, when finetune training, you need to reduce learning_rate manually + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml new file mode 100644 index 000000000..f700f6c6e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml new file mode 100644 index 000000000..c83c504a7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x0_35 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml new file mode 100644 index 000000000..00eae55c4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml new file mode 100644 index 000000000..96a43650b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x0_75 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..97291077f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml new file mode 100644 index 000000000..336fb3345 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_ampo2_ultra.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 3.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 1024 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml new file mode 100644 index 000000000..9f7abadd9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_0_fp32_ultra.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.6 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml new file mode 100644 index 000000000..367689197 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml new file mode 100644 index 000000000..87fb92eee --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x2_0 + class_num: 1000 +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml new file mode 100644 index 000000000..762d21fed --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNet_x2_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml new file mode 100644 index 000000000..4195efebc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 480 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNetV2_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 500 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_large.yaml new file mode 100644 index 000000000..1551e673f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_large.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 480 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNetV2_large + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 250 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_small.yaml new file mode 100644 index 000000000..bb937ba78 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PPLCNetV2/PPLCNetV2_small.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 480 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PPLCNetV2_small + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 500 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B0.yaml new file mode 100644 index 000000000..447326ac7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B0.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B0 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B1.yaml new file mode 100644 index 000000000..35ab6507a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B1.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B1 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2.yaml new file mode 100644 index 000000000..ec93edc94 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B2 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml new file mode 100644 index 000000000..77cfbad84 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B2_Linear + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B3.yaml new file mode 100644 index 000000000..9d4443491 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B3.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B3 + class_num: 1000 + drop_path_rate: 0.3 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: 1.0 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B4.yaml new file mode 100644 index 000000000..78e08fa7a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B4.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B4 + class_num: 1000 + drop_path_rate: 0.3 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: 1.0 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B5.yaml new file mode 100644 index 000000000..b8f4da0d3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PVTV2/PVT_V2_B5.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PVT_V2_B5 + class_num: 1000 + drop_path_rate: 0.3 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: 1.0 + no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PeleeNet/PeleeNet.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PeleeNet/PeleeNet.yaml new file mode 100644 index 000000000..06b0059bc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/PeleeNet/PeleeNet.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: PeleeNet + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.18 # for total batch size 512 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bilinear + backend: pil + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 # for 2 cards + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bilinear + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_0.yaml new file mode 100644 index 000000000..c4fa39e9b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_0.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ReXNet_1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_3.yaml new file mode 100644 index 000000000..8bfe5c3c0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_3.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ReXNet_1_3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_5.yaml new file mode 100644 index 000000000..66a8497cb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_1_5.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ReXNet_1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_2_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_2_0.yaml new file mode 100644 index 000000000..80a80c179 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_2_0.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ReXNet_2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_3_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_3_0.yaml new file mode 100644 index 000000000..7b896d20f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ReXNet/ReXNet_3_0.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ReXNet_3_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet101.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet101.yaml new file mode 100644 index 000000000..fa793e4d4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet101.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RedNet101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet152.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet152.yaml new file mode 100644 index 000000000..1b7bc86e9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet152.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RedNet152 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet26.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet26.yaml new file mode 100644 index 000000000..d8e88eb90 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet26.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RedNet26 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet38.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet38.yaml new file mode 100644 index 000000000..b1326d647 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet38.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RedNet38 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet50.yaml new file mode 100644 index 000000000..5e2a5cf06 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RedNet/RedNet50.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RedNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0 + mean: [123.675, 116.28, 103.53] + std: [58.395, 57.12, 57.375] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_12GF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_12GF.yaml new file mode 100644 index 000000000..7d9135427 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_12GF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_12GF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_1600MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_1600MF.yaml new file mode 100644 index 000000000..b14298f5e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_1600MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_1600MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_16GF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_16GF.yaml new file mode 100644 index 000000000..7de217298 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_16GF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_16GF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_200MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_200MF.yaml new file mode 100644 index 000000000..1b87cabc0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_200MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_200MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_3200MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_3200MF.yaml new file mode 100644 index 000000000..702296394 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_3200MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_3200MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_32GF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_32GF.yaml new file mode 100644 index 000000000..0c770b0c3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_32GF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_32GF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_400MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_400MF.yaml new file mode 100644 index 000000000..aad3bb23f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_400MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_400MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_600MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_600MF.yaml new file mode 100644 index 000000000..54dbbcca4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_600MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_600MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_6400MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_6400MF.yaml new file mode 100644 index 000000000..eae2c6a4d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_6400MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_6400MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_800MF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_800MF.yaml new file mode 100644 index 000000000..8dd6d19db --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_800MF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_800MF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_8GF.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_8GF.yaml new file mode 100644 index 000000000..3cb65e961 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RegNet/RegNetX_8GF.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RegNetX_8GF + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A0.yaml new file mode 100644 index 000000000..ecec64644 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A0.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_A0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A1.yaml new file mode 100644 index 000000000..8ce5a8ad4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A1.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_A1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A2.yaml new file mode 100644 index 000000000..7c8e4bc61 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_A2.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_A2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B0.yaml new file mode 100644 index 000000000..5449d698d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B0.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1.yaml new file mode 100644 index 000000000..37f926299 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g2.yaml new file mode 100644 index 000000000..a78d60d6d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g2.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B1g2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g4.yaml new file mode 100644 index 000000000..4b2678409 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B1g4.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B1g4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2.yaml new file mode 100644 index 000000000..d6c3f36a3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B2 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2g4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2g4.yaml new file mode 100644 index 000000000..d596211bb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B2g4.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B2g4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3.yaml new file mode 100644 index 000000000..a15cbdc6c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B3 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3g4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3g4.yaml new file mode 100644 index 000000000..d2921bdb5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_B3g4.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_B3g4 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_D2se.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_D2se.yaml new file mode 100644 index 000000000..0b7f105f1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/RepVGG/RepVGG_D2se.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 320, 320] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: RepVGG_D2se + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + interpolation: bicubic + backend: pil + - CropImage: + size: 320 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + interpolation: bicubic + backend: pil + - CropImage: + size: 320 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml new file mode 100644 index 000000000..155a5caee --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Res2Net101_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 000000000..db0076cf3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Res2Net200_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml new file mode 100644 index 000000000..e90812e03 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Res2Net50_14w_8s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml new file mode 100644 index 000000000..eabf48041 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Res2Net50_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml new file mode 100644 index 000000000..4f72ecbb4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Res2Net50_vd_26w_4s + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt101.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt101.yaml new file mode 100644 index 000000000..315b7c74e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt101.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeSt101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt200.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt200.yaml new file mode 100644 index 000000000..cb3721264 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt200.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 320, 320] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeSt200 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 320 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 320 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 320 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt269.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt269.yaml new file mode 100644 index 000000000..985ff98e7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt269.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 416, 416] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeSt269 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 416 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 416 + - CropImage: + size: 416 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 416 + - CropImage: + size: 416 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50.yaml new file mode 100644 index 000000000..50529aecd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeSt50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml new file mode 100644 index 000000000..c5db97b10 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeSt50_fast_1s1x64d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml new file mode 100644 index 000000000..e0d496e83 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x16d_wsl.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeXt101_32x16d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml new file mode 100644 index 000000000..40f82b5fd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x32d_wsl.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeXt101_32x32d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml new file mode 100644 index 000000000..5974ca719 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x48d_wsl.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeXt101_32x48d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml new file mode 100644 index 000000000..d2f4cd852 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNeXt101_wsl/ResNeXt101_32x8d_wsl.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNeXt101_32x8d_wsl + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101.yaml new file mode 100644 index 000000000..afd6329b3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet101 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101_vd.yaml new file mode 100644 index 000000000..748dfc12f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet101_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet101_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152.yaml new file mode 100644 index 000000000..993d52e5d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet152 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152_vd.yaml new file mode 100644 index 000000000..8daf0f806 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet152_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet152_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18.yaml new file mode 100644 index 000000000..2c0c4c231 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet18 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_dbb.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_dbb.yaml new file mode 100644 index 000000000..35250d83a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_dbb.yaml @@ -0,0 +1,153 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet18 + class_num: 1000 + layer_type: DiverseBranchBlock + use_first_short_conv: False + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + backend: pil + interpolation: bilinear + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + saturation: 0.4 + hue: 0.4 + - PCALighting: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + backend: pil + interpolation: bilinear + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_vd.yaml new file mode 100644 index 000000000..591ef505b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet18_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet18_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet200_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet200_vd.yaml new file mode 100644 index 000000000..135388ae7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet200_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet200_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34.yaml new file mode 100644 index 000000000..88af8fe0e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet34 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34_vd.yaml new file mode 100644 index 000000000..feb6c4266 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet34_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet34_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50.yaml new file mode 100644 index 000000000..a6dd0f5f4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml new file mode 100644 index 000000000..4981efe91 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: True + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + multi_precision: True + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1_ultra.yaml new file mode 100644 index 000000000..47aa4c0e4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O1_ultra.yaml @@ -0,0 +1,150 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + image_channel: &image_channel 4 + # used for static mode and model export + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: True + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + input_image_channel: *image_channel + data_format: "NHWC" + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + multi_precision: True + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml new file mode 100644 index 000000000..7f4de6640 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml @@ -0,0 +1,150 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + image_channel: &image_channel 4 + # used for static mode and model export + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: True + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + input_image_channel: *image_channel + data_format: "NHWC" + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + multi_precision: True + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: True + channel_num: *image_channel + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml new file mode 100644 index 000000000..ee606707a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_ampo2_ultra.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.8 + decay_epochs: [30, 60, 90] + values: [0.8, 0.08, 0.008, 0.0008] + warmup_epoch : 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml new file mode 100644 index 000000000..9fb8da0d6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_fp32_ultra.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + use_dali: True + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.8 + decay_epochs: [30, 60, 90] + values: [0.8, 0.08, 0.008, 0.0008] + warmup_epoch : 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_vd.yaml new file mode 100644 index 000000000..5f30746b1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ResNet/ResNet50_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SENet154_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SENet154_vd.yaml new file mode 100644 index 000000000..33e72300a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SENet154_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SENet154_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml new file mode 100644 index 000000000..df3d9c561 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNeXt101_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml new file mode 100644 index 000000000..835782185 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_channel: &image_channel 4 + image_shape: [*image_channel, 224, 224] + save_inference_dir: ./inference + +# mixed precision training +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: SE_ResNeXt101_32x4d + class_num: 1000 + input_image_channel: *image_channel + data_format: "NHWC" + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + multi_precision: True + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + output_fp16: True + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: *image_channel + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml new file mode 100644 index 000000000..192b5aafd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNeXt50_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml new file mode 100644 index 000000000..e82bfe42c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNeXt50_vd_32x4d + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet18_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet18_vd.yaml new file mode 100644 index 000000000..5cfed65d2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet18_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNet18_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet34_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet34_vd.yaml new file mode 100644 index 000000000..857300d51 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet34_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNet34_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet50_vd.yaml new file mode 100644 index 000000000..e7b94138c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SENet/SE_ResNet50_vd.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SE_ResNet50_vd + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml new file mode 100644 index 000000000..4b0393cce --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_swish + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml new file mode 100644 index 000000000..a00c648cf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x0_25 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml new file mode 100644 index 000000000..e3b82cf90 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x0_33 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml new file mode 100644 index 000000000..c228b57a6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x0_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml new file mode 100644 index 000000000..f0d5d46f0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml new file mode 100644 index 000000000..202514c99 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x1_5 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.25 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml new file mode 100644 index 000000000..d633a754e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 240 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ShuffleNetV2_x2_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.25 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml new file mode 100644 index 000000000..7224467c7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SqueezeNet1_0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml new file mode 100644 index 000000000..ed5ef8ecb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml @@ -0,0 +1,140 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SqueezeNet1_1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S1.yaml new file mode 100644 index 000000000..a25df46d4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S1.yaml @@ -0,0 +1,165 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: StarNet_S1 + drop_rate: 0 + drop_path_rate: 0 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: None + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 3e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m1-mstd0.5-inc1 + interpolation: random + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 0.2 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 20 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S2.yaml new file mode 100644 index 000000000..f57360e59 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S2.yaml @@ -0,0 +1,165 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: StarNet_S2 + drop_rate: 0 + drop_path_rate: 0.01 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: None + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 3e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m1-mstd0.5-inc1 + interpolation: random + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 0.2 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 20 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S3.yaml new file mode 100644 index 000000000..af99efbc4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S3.yaml @@ -0,0 +1,166 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: StarNet_S3 + drop_rate: 0 + drop_path_rate: 0.01 + class_num: 1000 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: None + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 3e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list_smallbatch.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m1-mstd0.5-inc1 + interpolation: random + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 0.2 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 20 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S4.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S4.yaml new file mode 100644 index 000000000..608f76c7f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/StarNet/StarNet_S4.yaml @@ -0,0 +1,165 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: StarNet_S4 + drop_rate: 0 + drop_path_rate: 0.02 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + clip_grad: None + no_weight_decay_name: null + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 3e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m1-mstd0.5-inc1 + interpolation: random + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 0.2 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + backend: pil + channel_first: False + - ResizeImage: + interpolation: bicubic + backend: pil + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + sampler: + name: DistributedBatchSampler + batch_size: 20 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 224 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml new file mode 100644 index 000000000..c212dab12 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformer_base_patch4_window12_384 + class_num: 1000 + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml new file mode 100644 index 000000000..6941543d8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformer_base_patch4_window7_224 + class_num: 1000 + pretrained: True + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml new file mode 100644 index 000000000..3e335b24e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformer_large_patch4_window12_384 + class_num: 1000 + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 438 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml new file mode 100644 index 000000000..89bdd0108 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformer_large_patch4_window7_224 + class_num: 1000 + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml new file mode 100644 index 000000000..ee134d5f0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformer_small_patch4_window7_224 + class_num: 1000 + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..e359afcef --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: True + use_fp16_test: True + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 1000 + # fused op can be used in AMP O2 mode only + use_fused_attn: False + use_fused_linear: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 20 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window16_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window16_256.yaml new file mode 100644 index 000000000..966bb2cd1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window16_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_base_patch4_window16_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window24_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window24_384.yaml new file mode 100644 index 000000000..b71ad0ec4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window24_384.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_base_patch4_window24_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window8_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window8_256.yaml new file mode 100644 index 000000000..2337b5d8d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_base_patch4_window8_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_base_patch4_window8_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window16_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window16_256.yaml new file mode 100644 index 000000000..ed60a4b82 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window16_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_large_patch4_window16_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window24_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window24_384.yaml new file mode 100644 index 000000000..48972c48f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_large_patch4_window24_384.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_large_patch4_window24_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + interpolation: bicubic + backend: pil + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window16_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window16_256.yaml new file mode 100644 index 000000000..e8c4c7c4c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window16_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_small_patch4_window16_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window8_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window8_256.yaml new file mode 100644 index 000000000..c0e1e0c7e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_small_patch4_window8_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_small_patch4_window8_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window16_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window16_256.yaml new file mode 100644 index 000000000..18678441e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window16_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_tiny_patch4_window16_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window8_256.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window8_256.yaml new file mode 100644 index 000000000..a1b69b3d7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/SwinTransformerV2/SwinTransformerV2_tiny_patch4_window8_256.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SwinTransformerV2_tiny_patch4_window8_256 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 292 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_base.yaml new file mode 100644 index 000000000..c2c7766be --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_base.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TNT_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_small.yaml new file mode 100644 index 000000000..2ab6cb60f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TNT/TNT_small.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TNT_small + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_A.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_A.yaml new file mode 100644 index 000000000..4e6c365bd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_A.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 450 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 192] + save_inference_dir: ./inference + +# model ema +EMA: + decay: 0.9999 + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TinyNet_A + class_num: 1000 + override_params: + batch_norm_momentum: 0.9 + batch_norm_epsilon: 1e-5 + depth_trunc: round + drop_connect_rate: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + one_dim_param_no_weight_decay: True + lr: + name: Step + learning_rate: 0.048 + step_size: 2.4 + gamma: 0.97 + warmup_epoch: 3 + warmup_start_lr: 1e-6 + regularizer: + name: 'L2' + coeff: 1e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 192 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 219 + interpolation: bicubic + backend: pil + - CropImage: + size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + - ResizeImage: + resize_short: 219 + interpolation: bicubic + backend: pil + - CropImage: + size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_B.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_B.yaml new file mode 100644 index 000000000..96fd0d06b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_B.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 450 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 188, 188] + save_inference_dir: ./inference + +# model ema +EMA: + decay: 0.9999 + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TinyNet_B + class_num: 1000 + override_params: + batch_norm_momentum: 0.9 + batch_norm_epsilon: 1e-5 + depth_trunc: round + drop_connect_rate: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + one_dim_param_no_weight_decay: True + lr: + name: Step + learning_rate: 0.048 + step_size: 2.4 + gamma: 0.97 + warmup_epoch: 3 + warmup_start_lr: 1e-6 + regularizer: + name: 'L2' + coeff: 1e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 188 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 214 + interpolation: bicubic + backend: pil + - CropImage: + size: 188 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + - ResizeImage: + resize_short: 214 + interpolation: bicubic + backend: pil + - CropImage: + size: 188 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_C.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_C.yaml new file mode 100644 index 000000000..addaec8c1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_C.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 450 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 184, 184] + save_inference_dir: ./inference + +# model ema +EMA: + decay: 0.9999 + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TinyNet_C + class_num: 1000 + override_params: + batch_norm_momentum: 0.9 + batch_norm_epsilon: 1e-5 + depth_trunc: round + drop_connect_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + one_dim_param_no_weight_decay: True + lr: + name: Step + learning_rate: 0.048 + step_size: 2.4 + gamma: 0.97 + warmup_epoch: 3 + warmup_start_lr: 1e-6 + regularizer: + name: 'L2' + coeff: 1e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 184 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 210 + interpolation: bicubic + backend: pil + - CropImage: + size: 184 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + - ResizeImage: + resize_short: 210 + interpolation: bicubic + backend: pil + - CropImage: + size: 184 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_D.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_D.yaml new file mode 100644 index 000000000..a868af59b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_D.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 450 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 152, 152] + save_inference_dir: ./inference + +# model ema +EMA: + decay: 0.9999 + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TinyNet_D + class_num: 1000 + override_params: + batch_norm_momentum: 0.9 + batch_norm_epsilon: 1e-5 + depth_trunc: round + drop_connect_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + one_dim_param_no_weight_decay: True + lr: + name: Step + learning_rate: 0.048 + step_size: 2.4 + gamma: 0.97 + warmup_epoch: 3 + warmup_start_lr: 1e-6 + regularizer: + name: 'L2' + coeff: 1e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 152 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 173 + interpolation: bicubic + backend: pil + - CropImage: + size: 152 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + - ResizeImage: + resize_short: 173 + interpolation: bicubic + backend: pil + - CropImage: + size: 152 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_E.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_E.yaml new file mode 100644 index 000000000..02617db16 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/TinyNet/TinyNet_E.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 450 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 106, 106] + save_inference_dir: ./inference + +# model ema +EMA: + decay: 0.9999 + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: TinyNet_E + class_num: 1000 + override_params: + batch_norm_momentum: 0.9 + batch_norm_epsilon: 1e-5 + depth_trunc: round + drop_connect_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: RMSProp + momentum: 0.9 + rho: 0.9 + epsilon: 0.001 + one_dim_param_no_weight_decay: True + lr: + name: Step + learning_rate: 0.048 + step_size: 2.4 + gamma: 0.97 + warmup_epoch: 3 + warmup_start_lr: 1e-6 + regularizer: + name: 'L2' + coeff: 1e-5 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 106 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - ColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 121 + interpolation: bicubic + backend: pil + - CropImage: + size: 106 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + - ResizeImage: + resize_short: 121 + interpolation: bicubic + backend: pil + - CropImage: + size: 106 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_base.yaml new file mode 100644 index 000000000..216bda288 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_base.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: alt_gvt_base + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.3 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_large.yaml new file mode 100644 index 000000000..ff2e62520 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_large.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: alt_gvt_large + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.5 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_small.yaml new file mode 100644 index 000000000..7de1133c1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/alt_gvt_small.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: alt_gvt_small + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_base.yaml new file mode 100644 index 000000000..b3fbeca13 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_base.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: pcpvt_base + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.3 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_large.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_large.yaml new file mode 100644 index 000000000..4d91ea255 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_large.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: pcpvt_large + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.5 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_small.yaml new file mode 100644 index 000000000..97d5f1e19 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Twins/pcpvt_small.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: pcpvt_small + class_num: 1000 + drop_rate: 0.0 + drop_path_rate: 0.2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 2e-5 + warmup_epoch: 5 + warmup_start_lr: 2e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base.yaml new file mode 100644 index 000000000..58ef6931f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: UniFormer_base + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base_ls.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base_ls.yaml new file mode 100644 index 000000000..29a3d4261 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_base_ls.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: UniFormer_base_ls + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small.yaml new file mode 100644 index 000000000..5d1860d89 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: UniFormer_small + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus.yaml new file mode 100644 index 000000000..f08fb716a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: UniFormer_small_plus + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus_dim64.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus_dim64.yaml new file mode 100644 index 000000000..bff77c140 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/UniFormer/UniFormer_small_plus_dim64.yaml @@ -0,0 +1,174 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: UniFormer_small_plus_dim64 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: pos_embed cls_token .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B0.yaml new file mode 100644 index 000000000..f121dc57d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B0.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: VAN_B0 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: random + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: random + img_size: 224 + mean: [0.5, 0.5, 0.5] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B1.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B1.yaml new file mode 100644 index 000000000..fbc30f6f5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B1.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: VAN_B1 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: random + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: random + img_size: 224 + mean: [0.5, 0.5, 0.5] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B2.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B2.yaml new file mode 100644 index 000000000..1dca03a2d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B2.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: VAN_B2 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: random + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: random + img_size: 224 + mean: [0.5, 0.5, 0.5] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B3.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B3.yaml new file mode 100644 index 000000000..6bdef9a20 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VAN/VAN_B3.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: VAN_B3 + class_num: 1000 + drop_path_rate: 0.1 + drop_rate: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-3 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: random + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: random + img_size: 224 + mean: [0.5, 0.5, 0.5] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 248 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml new file mode 100644 index 000000000..5b9c518a7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_base_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml new file mode 100644 index 000000000..a8792a036 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_base_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml new file mode 100644 index 000000000..477d9edd0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_base_patch32_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml new file mode 100644 index 000000000..7174f151f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_large_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml new file mode 100644 index 000000000..195cd2293 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_large_patch16_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml new file mode 100644 index 000000000..afa78dacf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 384, 384] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_large_patch32_384 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 384 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 384 + - CropImage: + size: 384 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml new file mode 100644 index 000000000..7eafe2acd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: ViT_small_patch16_224 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41.yaml new file mode 100644 index 000000000..c622617f7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Xception41 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41_deeplab.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41_deeplab.yaml new file mode 100644 index 000000000..d03b6bc77 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception41_deeplab.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Xception41_deeplab + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65.yaml new file mode 100644 index 000000000..c134331a0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Xception65 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65_deeplab.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65_deeplab.yaml new file mode 100644 index 000000000..05e88336e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception65_deeplab.yaml @@ -0,0 +1,141 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Xception65_deeplab + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.045 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception71.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception71.yaml new file mode 100644 index 000000000..3fabd6595 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ImageNet/Xception/Xception71.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 299, 299] + save_inference_dir: ./inference + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: Xception71 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0225 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 299 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 320 + - CropImage: + size: 299 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Logo/ResNet50_ReID.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Logo/ResNet50_ReID.yaml new file mode 100644 index 000000000..bfbedf8f9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Logo/ResNet50_ReID.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "CircleMargin" + margin: 0.35 + scale: 64 + embedding_size: 512 + class_num: 3000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - PairwiseCosface: + margin: 0.35 + gamma: 64 + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: "L2" + coeff: 0.0001 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "dataset/LogoDet-3K-crop/train/" + cls_label_path: "dataset/LogoDet-3K-crop/train_list.txt" + relabel: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + - RandomErasing: + EPSILON: 0.5 + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: True + + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: "dataset/LogoDet-3K-crop/val/" + cls_label_path: "dataset/LogoDet-3K-crop/query_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: "dataset/LogoDet-3K-crop/train/" + cls_label_path: "dataset/LogoDet-3K-crop/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/CLIP_vit_base_patch16_448_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/CLIP_vit_base_patch16_448_ml_decoder_448.yaml new file mode 100644 index 000000000..435842f89 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/CLIP_vit_base_patch16_448_ml_decoder_448.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O2 + +# model architecture +Arch: + name: CLIP_vit_base_patch16_224 + class_num: 80 + return_embed: False + use_fused_attn: False # fused attn can be used in AMP O2 mode only + pretrained: True + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 768 + remove_layers: [] + replace_layer: 'head' + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-5 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 8 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B0_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B0_ml_decoder_448.yaml new file mode 100644 index 000000000..00b3abb16 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B0_ml_decoder_448.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B0 + class_num: 80 + pretrained: True # ssld pretrained + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B4_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B4_ml_decoder_448.yaml new file mode 100644 index 000000000..a92c89bc1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B4_ml_decoder_448.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B4 + class_num: 80 + pretrained: True # ssld pretrained + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B6_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B6_ml_decoder_448.yaml new file mode 100644 index 000000000..d21453c31 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-HGNetV2-B6_ml_decoder_448.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNetV2_B6 + class_num: 80 + pretrained: True # ssld pretrained + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 8 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-LCNet_x1_0_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-LCNet_x1_0_ml_decoder_448.yaml new file mode 100644 index 000000000..15aaee0a2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/PP-LCNet_x1_0_ml_decoder_448.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 80 + pretrained: True + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + class_num: 80 + in_channels: 1280 + + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/README.md b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/README.md new file mode 100644 index 000000000..e4e4263c1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/README.md @@ -0,0 +1,272 @@ +# ML-Decoder多标签分类 + +## 目录 + +* [1. 模型介绍](#1) +* [2. 数据和模型准备](#2) +* [3. 模型训练](#3) +* [4. 模型评估](#4) +* [5. 模型预测](#5) +* [6. 基于预测引擎预测](#6) + * [6.1 导出 inference model](#6.1) + * [6.2 基于 Python 预测引擎推理](#6.2) +* [7. 引用](#7) + + +## 1. 模型介绍 + +ML-Decoder是一种新的基于注意力的分类头,它通过查询来预测类别标签的存在,并且比全局平均池化能够更好地利用空间数据。ML-Decoder的特点有以下几点: + +1. ML-Decoder重新设计了解码器的架构,并且使用了一种新颖的分组解码方案,使得ML-Decoder具有高效性和可扩展性,可以很好地处理数千个类别的分类任务。 +2. ML-Decoder具有一致性的速度-准确度权衡,相比于使用更大的主干网络,ML-Decoder能够提供更好的性能。 +3. ML-Decoder也具有多功能性,它可以作为各种分类头的替代方案,并且在使用词语查询时能够泛化到未见过的类别。通过使用新颖的查询增强方法,ML-Decoder的泛化能力进一步提高。 + +使用ML-Decoder,作者在多个分类任务上取得了最先进的结果: +1. 在MS-COCO多标签分类上,达到了91.4%的mAP; +2. 在NUS-WIDE零样本分类上,达到了31.1%的ZSL mAP; +3. 在ImageNet单标签分类上,使用普通的ResNet50主干网络,达到了80.7%的新高分,而没有使用额外的数据或蒸馏。 + +`PaddleClas` 目前支持在单标签分类和多标签分类任务中使用ML-Decoder, 且使用方式非常简单方便,须在配置文件的Arch作用域下设置use_ml_decoder=True 并给出对ML-Decoder的参数配置即可: +```yaml +# model architecture +Arch: + name: ResNet101 + class_num: 80 + pretrained: True + # use ml-decoder head to replace avg_pool and fc + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + # optional args + # class_num: 80 + # remove_layers: ['avg_pool', 'flatten'] + # replace_layer: 'fc' +``` +注意: +1. 当所选择的Backbone无法调取class_num属性时,MLDecoder需要在配置文件中手动添加`class_num`属性。 +2. 实际使用时可根据对应Backbone中的实际的层的名称来修改`remove_layers`和`replace_layer`参数。 Backbone中的实际的层的名称可根据选取的主干模型名在`ppcls/arch/backbone`路径下查找对应的实现代码,并查看改模型相应输出层的名称。 + +下面是选择`RepVGG_A0`为例, 对应查看`ppcls/arch/backbone/model_zoo/repvgg.py`, 适应性添加或修改`class_num`, `remove_layers`和`replace_layer`后的MLDecoder配置示例: +```yaml +# model architecture +Arch: + name: RepVGG_A0 + class_num: 80 + pretrained: True + # use ml-decoder head to replace avg_pool and fc + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 1280 + # optional args + class_num: 80 + remove_layers: ['gap'] + replace_layer: 'linear' +``` + +开发者可以自行尝试使用不同的主干模型来结合ML-Decoder。 + +目前使用多类Backbone结合ML-Decoder在COCO2017的多标签分类任务上的性能指标如下: + +| Model | Backbone | Resolution | mAP | Links | +|:--------------------:|:---------:|:----------:|:---:|:-----------------------------------------:| +| PP-LCNet_x1_0_ml_decoder_448 | PP-LCNet_x1_0 | 448x448 | 77.96% | [config](./PP-LCNet_x1_0_ml_decoder_448.yaml) | +| PP-HGNetV2-B0_ml_decoder_448 | PP-HGNetV2-B0 | 448x448 | 80.98% | [config](./PP-HGNetV2-B0_ml_decoder_448.yaml) | +| PP-HGNetV2-B4_ml_decoder_448 | PP-HGNetV2-B4 | 448x448 | 87.96% | [config](./PP-HGNetV2-B4_ml_decoder_448.yaml) | +| PP-HGNetV2-B6_ml_decoder_448 | PP-HGNetV2-B6 | 448x448 | 91.25% | [config](./PP-HGNetV2-B6_ml_decoder_448.yaml) | +| ResNet50_ml_decoder_448 | ResNet50 | 448x448 | 83.50% | [config](./ResNet50_ml_decoder_448.yaml) | +| ResNet101_ml_decoder | ResNet101 | 448x448 | 91.40% | [config](./ResNet101_ml_decoder_448.yaml) | +| CLIP_vit_base_patch16_448_ml_decoder_448 | CLIP_vit_base_patch16_448 | 448x448 | 89.15% | [config](./CLIP_vit_base_patch16_448_ml_decoder_448.yaml) | + +基于 [COCO2017](https://cocodataset.org/) 数据集,如下将介绍添加ml-decoder进行多标签分类的训练、评估、预测的过程。请首先安装 PaddlePaddle 和 PaddleClas,具体安装步骤可详看 [环境准备](../installation.md)。 + + + + +## 2. 数据和模型准备 + +* 进入 `PaddleClas` 目录。 + +``` +cd path_to_PaddleClas +``` + +* 创建并进入 `dataset/COCO2017` 目录,下载并解压 COCO2017 数据集。 + +```shell +mkdir dataset/COCO2017 && cd dataset/COCO2017 +wget http://images.cocodataset.org/zips/train2017.zip -O t.zip && unzip t.zip -d . && rm t.zip +wget http://images.cocodataset.org/zips/val2017.zip -O t.zip && unzip t.zip -d . && rm t.zip +wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip -O t.zip && unzip t.zip -d . && rm t.zip +``` + +* 返回 `PaddleClas` 根目录 + + +```shell +cd ../../ +# 转换训练集并生成`COCO2017_labels.txt` +python3 ./ppcls/utils/create_coco_multilabel_lists.py \ + --dataset_dir dataset/COCO2017 \ + --image_dir train2017 \ + --anno_path annotations/instances_train2017.json \ + --save_name multilabel_train_list --save_label_name +# 转换测试集 +python3 ./ppcls/utils/create_coco_multilabel_lists.py \ + --dataset_dir dataset/COCO2017 \ + --image_dir val2017 \ + --anno_path annotations/instances_val2017.json \ + --save_name multilabel_val_list +``` + + +## 3. 模型训练 + +```shell +# 多卡 +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml +# 单卡 +python3 tools/train.py \ + -c ./ppcls/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml +``` + +**注意:** +1. 目前多标签分类的损失函数默认使用`MultiLabelAsymmetricLoss`。 +2. 目前多标签分类的评估指标默认使用`MultiLabelMAP(integral)`。 + + + +## 4. 模型评估 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml \ + -o Global.pretrained_model="./output/ResNet101_ml_decoder_448/best_model" +``` + + +## 5. 模型预测 + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml \ + -o Global.pretrained_model="./output/ResNet101_ml_decoder_448/best_model" +``` + +得到类似下面的输出: +``` +[{'class_ids': [0, 2, 7, 24, 25, 26, 33, 56], 'scores': [0.99998, 0.52104, 0.51953, 0.59292, 0.64329, 0.63605, 0.99994, 0.7054], 'label_names': ['person', 'car', 'truck', 'backpack', 'umbrella', 'handbag', 'kite', 'chair'], 'file_name': 'deploy/images/coco_000000570688.jpg'}] +``` + + +## 6. 基于预测引擎预测 + + +### 6.1 导出 inference model + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml \ + -o Global.pretrained_model="./output/ResNet101_ml_decoder_448/best_model" +``` +inference model 的路径默认在当前路径下 `./inference` +`./inference` 文件夹下应有如下文件结构: + +``` +├── inference +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + +切换到depoly目录下,并且使用deploy中的脚本进行推理前需要确认paddleclas为非本地安装, 如不是请进行切换,不然会出现包的导入错误。 + +```shell +# 本地安装 +pip install -e . +# 非本地安装 +python setup.py install + +# 进入deploy目录下 +cd deploy +``` + + + +#### 6.2.1 预测单张图像 + +运行下面的命令,对图像 `./images/coco_000000570688.jpg` 进行分类。 + +```shell +# linux使用`python3`,windows使用`python (-m)`来执行脚本 +# 使用下面的命令使用 GPU 进行预测 +python3 python/predict_cls.py \ + -c configs/inference_cls_multilabel.yaml \ + -o Global.inference_model_dir=../inference/ \ + -o Global.infer_imgs=images/coco_000000570688.jpg \ + -o PostProcess.MultiLabelThreshOutput.class_id_map_file=../ppcls/utils/COCO2017_label_list.txt +# 使用下面的命令使用 CPU 进行预测 +python3 python/predict_cls.py \ + -c configs/inference_cls_multilabel.yaml \ + -o Global.inference_model_dir=../inference/ \ + -o Global.infer_imgs=images/coco_000000570688.jpg \ + -o PostProcess.MultiLabelThreshOutput.class_id_map_file=../ppcls/utils/COCO2017_label_list.txt \ + -o Global.use_gpu=False +``` + +输出结果如下: + +``` +coco_000000570688.jpg: class id(s): [0, 2, 3, 4, 7, 9, 21, 22, 23, 24, 25, 27, 28, 29, 30, 33, 38, 39, 45, 46, 47, 48, 49, 51, 52, 53, 54, 57, 58, 60, 61, 62, 63, 64, 65, 67, 69, 70, 71, 72, 73, 75], score(s): [0.84, 0.68, 0.93, 0.54, 0.74, 0.90, 0.56, 0.60, 0.63, 0.77, 0.64, 0.70, 0.94, 0.82, 0.99, 0.71, 0.86, 0.81, 0.81, 0.65, 0.65, 0.92, 0.67, 0.53, 0.83, 0.63, 0.58, 0.52, 0.83, 0.55, 0.92, 0.72, 0.74, 0.59, 0.82, 0.50, 0.62, 0.77, 0.87, 0.64, 0.84, 0.67], label_name(s): ['person', 'car', 'motorcycle', 'airplane', 'truck', 'traffic light', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'kite', 'tennis racket', 'bottle', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'carrot', 'hot dog', 'pizza', 'donut', 'couch', 'potted plant', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'cell phone', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'vase'] +``` + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# linux使用`python3`,windows使用`python (-m)`来执行脚本 +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3 python/predict_cls.py \ + -c configs/inference_cls_multilabel.yaml \ + -o Global.inference_model_dir=../inference/ \ + -o PostProcess.MultiLabelThreshOutput.class_id_map_file=../ppcls/utils/COCO2017_label_list.txt \ + -o Global.infer_imgs=images/ImageNet/ +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [0, 2, 3, 7, 9, 21, 22, 23, 24, 25, 27, 28, 29, 30, 33, 38, 39, 40, 41, 45, 46, 47, 48, 49, 52, 53, 54, 58, 60, 61, 62, 63, 64, 65, 69, 70, 71, 72, 73, 75], score(s): [0.80, 0.58, 0.89, 0.74, 0.86, 0.66, 0.56, 0.60, 0.81, 0.64, 0.73, 0.94, 0.75, 0.99, 0.70, 0.86, 0.78, 0.63, 0.57, 0.76, 0.66, 0.60, 0.94, 0.65, 0.90, 0.63, 0.52, 0.79, 0.50, 0.93, 0.72, 0.70, 0.60, 0.83, 0.61, 0.75, 0.86, 0.67, 0.87, 0.64], label_name(s): ['person', 'car', 'motorcycle', 'truck', 'traffic light', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'kite', 'tennis racket', 'bottle', 'wine glass', 'cup', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'hot dog', 'pizza', 'donut', 'potted plant', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'vase'] +ILSVRC2012_val_00010010.jpeg: class id(s): [0, 2, 3, 6, 7, 8, 9, 21, 22, 23, 24, 25, 27, 28, 29, 30, 33, 38, 39, 40, 45, 46, 47, 48, 49, 51, 52, 53, 54, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 73, 75], score(s): [0.78, 0.66, 0.93, 0.54, 0.77, 0.50, 0.86, 0.69, 0.63, 0.50, 0.78, 0.56, 0.71, 0.93, 0.78, 0.99, 0.64, 0.85, 0.80, 0.53, 0.85, 0.71, 0.66, 0.96, 0.70, 0.62, 0.85, 0.58, 0.57, 0.57, 0.78, 0.50, 0.92, 0.64, 0.73, 0.71, 0.77, 0.53, 0.66, 0.52, 0.73, 0.87, 0.69, 0.85, 0.66], label_name(s): ['person', 'car', 'motorcycle', 'train', 'truck', 'boat', 'traffic light', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'kite', 'tennis racket', 'bottle', 'wine glass', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'carrot', 'hot dog', 'pizza', 'donut', 'couch', 'potted plant', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'vase'] +ILSVRC2012_val_00020010.jpeg: class id(s): [0, 2, 3, 7, 9, 21, 22, 23, 24, 25, 27, 28, 29, 30, 33, 38, 39, 40, 41, 45, 46, 47, 48, 49, 51, 52, 53, 54, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 73, 75], score(s): [0.85, 0.62, 0.94, 0.69, 0.89, 0.56, 0.56, 0.62, 0.77, 0.67, 0.71, 0.93, 0.78, 0.99, 0.65, 0.86, 0.75, 0.57, 0.60, 0.76, 0.66, 0.58, 0.95, 0.73, 0.50, 0.88, 0.63, 0.59, 0.62, 0.84, 0.59, 0.82, 0.74, 0.74, 0.62, 0.86, 0.53, 0.53, 0.60, 0.73, 0.88, 0.65, 0.85, 0.70], label_name(s): ['person', 'car', 'motorcycle', 'truck', 'traffic light', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'kite', 'tennis racket', 'bottle', 'wine glass', 'cup', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'carrot', 'hot dog', 'pizza', 'donut', 'couch', 'potted plant', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'vase'] +ILSVRC2012_val_00030010.jpeg: class id(s): [0, 2, 3, 4, 7, 9, 21, 22, 23, 24, 25, 27, 28, 29, 30, 33, 38, 39, 40, 41, 45, 46, 47, 48, 49, 51, 52, 53, 58, 60, 61, 62, 63, 64, 65, 69, 70, 71, 72, 73, 75], score(s): [0.82, 0.60, 0.92, 0.54, 0.67, 0.87, 0.57, 0.63, 0.57, 0.84, 0.70, 0.80, 0.92, 0.82, 0.99, 0.72, 0.86, 0.80, 0.59, 0.55, 0.84, 0.73, 0.60, 0.94, 0.75, 0.53, 0.89, 0.51, 0.84, 0.56, 0.90, 0.87, 0.67, 0.70, 0.85, 0.59, 0.82, 0.91, 0.62, 0.89, 0.67], label_name(s): ['person', 'car', 'motorcycle', 'airplane', 'truck', 'traffic light', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'kite', 'tennis racket', 'bottle', 'wine glass', 'cup', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'carrot', 'hot dog', 'pizza', 'potted plant', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'vase'] +``` + + +## 7. 引用 +``` +@misc{ridnik2021mldecoder, + title={ML-Decoder: Scalable and Versatile Classification Head}, + author={Tal Ridnik and Gilad Sharir and Avi Ben-Cohen and Emanuel Ben-Baruch and Asaf Noy}, + year={2021}, + eprint={2111.12933}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml new file mode 100644 index 000000000..8bafbb23d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet101_ml_decoder_448.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: ResNet101 + class_num: 80 + pretrained: True + # use ml-decoder head to replace avg_pool and fc + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + one_dim_param_no_weight_decay: True + weight_decay: 0.0001 # 1e-4 + lr: + name: OneCycleLR + max_learning_rate: 0.0001 # 1e-4 + divide_factor: 25.0 + end_learning_rate: 0.0000000001 # 1e-10 + phase_pct: 0.2 + anneal_strategy: cos + three_phase: False + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: ./dataset/COCO2017/train2017 + cls_label_path: ./dataset/COCO2017/multilabel_train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 56 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: ./dataset/COCO2017/val2017 + cls_label_path: ./dataset/COCO2017/multilabel_val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet50_ml_decoder_448.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet50_ml_decoder_448.yaml new file mode 100644 index 000000000..067c5be1c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/MultiLabelCOCO/MLDecoder/ResNet50_ml_decoder_448.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 448, 448] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_multilabel: True + +# model ema +EMA: + decay: 0.9997 + +# mixed precision +AMP: + use_amp: True + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + +# model architecture +Arch: + name: ResNet50 + class_num: 80 + pretrained: True + use_ml_decoder: True + +# ml-decoder head +MLDecoder: + query_num: 80 # default: 80, query_num <= class_num + in_channels: 2048 + +# loss function config for training/eval process +Loss: + Train: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + + Eval: + - MultiLabelAsymmetricLoss: + weight: 1.0 + gamma_pos: 0 + gamma_neg: 4 + clip: 0.05 + disable_focal_loss_grad: True + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 1e-4 + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 1e-10 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - Cutout: + length: 224 + fill_value: none + - RandAugmentV4: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: dataset/coco_ml/images + cls_label_path: dataset/coco_ml/val.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/coco_000000570688.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 448 + interpolation: bilinear + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: ppcls/utils/COCO2017_label_list.txt + +Metric: + Train: + Eval: + - MultiLabelMAP: + # support list: integral, 11point + # default: integral + map_type: integral diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..911b8edec --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,139 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..247f655b5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.9 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..4c11802d6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..c263f2309 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..a75fda4b4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/search.yaml new file mode 100644 index 000000000..820337c02 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/car_exists/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person_cls +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/clarity_assessment/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/clarity_assessment/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..0f766380f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/clarity_assessment/PPLCNet_x1_0.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 2 + use_last_conv: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.14 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: CustomLabelDataset + image_root: ./dataset/ + sample_list_path: ./dataset/ImageNet_OCR_det.txt + label_key: blur_image + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - BlurImage: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/blur/ + cls_label_path: ./dataset/blur/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./test_img/ + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..8566b0c5c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,137 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: test_images/ + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..02a960116 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0.yaml @@ -0,0 +1,145 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: test_imags/ + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_code + label_1: contains_code + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..2ba2dcd1b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/code_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_code + label_1: contains_code + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..b6b627e10 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/PPLCNet_x1_0_search.yaml @@ -0,0 +1,150 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_code + label_1: contains_code + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..7600a4361 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,167 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-5 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/code_exists/ + cls_label_path: ./dataset/code_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: test_images + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_code + label_1: contains_code + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/search.yaml new file mode 100644 index 000000000..585450e5b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/code_exists/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/code_exists/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/code_exists/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_code_exists +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.005, 0.0075, 0.01, 0.015, 0.02] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/image_orientation/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/image_orientation/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..c55ad1a92 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/image_orientation/PPLCNet_x1_0.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 10 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 4 + use_last_conv: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.14 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: CustomLabelDataset + image_root: ./dataset/OrientationDataset/ + sample_list_path: ./dataset/OrientationDataset/train_list.txt + label_key: random_rot90_orientation + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + - RandomRot90: + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 12 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/OrientationDataset/ + cls_label_path: ./dataset/OrientationDataset/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: ./test_img/ + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..c3973ff42 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 20 + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 10 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..081d8d23f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 10 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + lr_mult_list : [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 1.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..d792c573d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 10 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + lr_mult_list : [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 1.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..49a5f1702 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 192] + save_inference_dir: ./inference + start_eval_epoch: 20 + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 10 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 32 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..4e1a45a9e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,160 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 10 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/search.yaml new file mode 100644 index 000000000..a4b3dde56 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/language_classification/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_language_classification +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.2, 0.4, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + - DataLoader.Eval.dataset.transform_ops.1.ResizeImage.size + search_values: [[192, 48], [180, 60], [160, 80]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.5, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.5, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..94b443832 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "MobileNetV3_small_x0_35" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + #clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..b042ad757 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 26 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.8 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml new file mode 100644 index 000000000..bd6503488 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + use_multilabel: True + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 26 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + - DistillationMultiLabelLoss: + weight: 1.0 + weight_ratio: True + model_names: ["Student"] + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.8 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..8f6b0d7fe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 26 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 000000000..4f7dc273c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "Res2Net200_vd_26w_4s" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..36c3d6aae --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "SwinTransformer_tiny_patch4_window7_224" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + #clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - Padv2: + size: [244, 244] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [224, 224] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/search.yaml new file mode 100644 index 000000000..78192d113 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_attribute/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_attr +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0001, 0.005, 0.01, 0.02, 0.05] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.4.RandomCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [[192, 256]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.7.RandomErasing.EPSILON + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..9510ec258 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..93e9841d9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.9 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..3d3aa3258 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..86c25a05b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..be10d67b7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-5 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/search.yaml new file mode 100644 index 000000000..820337c02 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/person_exists/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person_cls +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..9ef4beb79 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + pretrained: True + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.08 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..4c3c8642d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 2 + use_sync_bn : True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.025 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 176 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob : 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size : 176 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON : 0.1 + r1 : 0.3 + sh : 1.0/3.0 + sl : 0.02 + attempt : 10 + use_log_aspect : True + mode : pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..254db5df4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,185 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + use_sync_bn: True + models: + - Teacher: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + return_stages: True + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + return_stages: True + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: logits + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: logits + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: + weight: 1.0 + key: "blocks4" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.015 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..98f63a613 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 2 + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.10 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 000000000..5b987d510 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,137 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net200_vd_26w_4s + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.005 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..5863ee17e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + pretrained: True + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-5 + eta_min: 1e-7 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/search.yaml new file mode 100644 index 000000000..e8c1c933d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/safety_helmet/search.yaml @@ -0,0 +1,36 @@ +base_config_file: ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_safety_helmet +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.11, 0.12] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "udml" +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..2c1e9b253 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 6 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/table_attribute/" + cls_label_path: "dataset/table_attribute/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/table_attribute/" + cls_label_path: "dataset/table_attribute/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/table_attribute/val_3610.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: TableAttribute + source_threshold: 0.5 + number_threshold: 0.5 + color_threshold: 0.5 + clarity_threshold : 0.5 + obstruction_threshold: 0.5 + angle_threshold: 0.5 + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..5b48f4a1f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/table_attribute/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,155 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 6 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + infer_model_name: "Student" + freeze_params_list: + - True + - False + use_ssld: True + models: + - Teacher: + name: ResNet50_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - DistillationMultiLabelLoss: + weight: 1.0 + model_names: ["Student"] + weight_ratio: True + size_sum: True + - DistillationDMLLoss: + weight: 1.0 + weight_ratio: True + sum_across_class_dim: False + model_name_pairs: + - ["Student", "Teacher"] + + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/table_attribute/" + cls_label_path: "dataset/table_attribute/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/table_attribute/" + cls_label_path: "dataset/table_attribute/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/table_attribute/val_3253.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: TableAttribute + source_threshold: 0.5 + number_threshold: 0.5 + color_threshold: 0.5 + clarity_threshold : 0.5 + obstruction_threshold: 0.5 + angle_threshold: 0.5 + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..7eaff9768 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 40 + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 4 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: ddeploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..c8ded908e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 4 + pretrained: True + use_ssld: True + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..b8fd0b108 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 4 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..0ba788156 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 40 + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 4 + pretrained: True + use_ssld: True + + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..4d123cd4b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 4 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 2.5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/search.yaml new file mode 100644 index 000000000..d8e65f5f0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/text_image_orientation/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_text_image_orientation +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.1, 0.2, 0.4, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.0, 0.3, 0.5, 0.8, 1.0] + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..040868378 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 18 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.13 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..3ab3657d8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml new file mode 100644 index 000000000..17b9cbb15 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..2cc57e637 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..e9e186377 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 192] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..a466d5e08 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/search.yaml new file mode 100644 index 000000000..4419949bc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/textline_orientation/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/text_direction/PPLCNet_x1_0.yaml +distill_config_file: ppcls/configs/PULC/text_direction/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_text +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + - DataLoader.Eval.dataset.transform_ops.1.ResizeImage.size + search_values: [[192, 48], [180, 60], [160, 80]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml new file mode 100644 index 000000000..5ebe7441e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 232 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..5362d07b7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 232 + pretrained: True + use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/traffic_sign/99603_17806.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..b00c250e1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 232 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + pretrained: False + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train_for_distillation.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..27fbc4b86 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 232 + pretrained: True + # use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/traffic_sign/99603_17806.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 000000000..ae86ae622 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 232 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 2e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/search.yaml new file mode 100644 index 000000000..029d042df --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/traffic_sign/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_traffic_sign +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "skl-ugi" + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml new file mode 100644 index 000000000..a35bc6114 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,115 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "MobileNetV3_small_x0_35" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml new file mode 100644 index 000000000..a3369a9ee --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + class_num: 19 + use_ssld: True + lr_mult_list: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: VehicleAttribute + color_threshold: 0.5 + type_threshold: 0.5 + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 000000000..d098ca81f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 19 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + infer_model_name: "Student" + freeze_params_list: + - True + - False + use_ssld: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - DistillationMultiLabelLoss: + weight: 1.0 + model_names: ["Student"] + weight_ratio: True + size_sum: True + - DistillationDMLLoss: + weight: 1.0 + weight_ratio: True + sum_across_class_dim: False + model_name_pairs: + - ["Student", "Teacher"] + + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: VehicleAttribute + color_threshold: 0.5 + type_threshold: 0.5 + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml new file mode 100644 index 000000000..5f84c2a65 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 000000000..c6618f960 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,122 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/mo" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "Res2Net200_vd_26w_4s" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/ResNet50.yaml new file mode 100644 index 000000000..9218769c6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/ResNet50.yaml @@ -0,0 +1,116 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "ResNet50" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/search.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/search.yaml new file mode 100644 index 000000000..2a16266bf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/PULC/vehicle_attribute/search.yaml @@ -0,0 +1,35 @@ +base_config_file: ppcls/configs/PULC/vehicle_attr/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/vehicle_attr/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_vehicle_attr +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.7.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "skl-ugi" + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml new file mode 100644 index 000000000..ad77ea950 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/MV3_Large_1x_Aliproduct_DLBHC.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_dlbhc/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + #eval_mode: "retrieval" + print_batch_step: 10 + use_visualdl: False + + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + #feature postprocess + feature_normalize: False + feature_binarize: "round" + +# model architecture +Arch: + name: "RecModel" + Backbone: + name: "MobileNetV3_large_x1_0" + pretrained: True + class_num: 512 + Head: + name: "FC" + class_num: 50030 + embedding_size: 512 + + infer_output_key: "features" + infer_add_softmax: "false" + +# loss function config for train/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [50, 150] + values: [0.1, 0.01, 0.001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 256 + - RandCropImage: + size: 227 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 227 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 227 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + +# switch to metric below when eval by retrieval +# - Recallk: +# topk: [1] +# - mAP: +# - Precisionk: +# topk: [1] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Aliproduct.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Aliproduct.yaml new file mode 100644 index 000000000..70f805647 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Aliproduct.yaml @@ -0,0 +1,119 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: classification + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: ResNet50_vd + pretrained: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: FC + embedding_size: 512 + class_num: 50030 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + regularizer: + name: 'L2' + coeff: 0.00007 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Inshop.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Inshop.yaml new file mode 100644 index 000000000..18ddfa3a8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_Inshop.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams" + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: ResNet50_vd + pretrained: False + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 3997 + margin: 0.15 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.04 + milestones: [30, 60, 70, 80, 90, 100] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 2 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/query_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/Inshop/ + cls_label_path: ./dataset/Inshop/gallery_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_SOP.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_SOP.yaml new file mode 100644 index 000000000..7728a6678 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Products/ResNet50_vd_SOP.yaml @@ -0,0 +1,156 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/product_ResNet50_vd_Aliproduct_v1.0_pretrained.pdparams" + output_dir: ./output/ + device: gpu + save_interval: 10 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + Backbone: + name: ResNet50_vd + pretrained: False + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 2048 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 11319 + margin: 0.15 + scale: 30 + infer_output_key: features + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.01 + milestones: [30, 60, 70, 80, 90, 100] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: VeriWild + image_root: ./dataset/SOP/ + cls_label_path: ./dataset/SOP/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/SOP/ + cls_label_path: ./dataset/SOP/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/SOP/ + cls_label_path: ./dataset/SOP/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ResNet50_UReID_infer.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ResNet50_UReID_infer.yaml new file mode 100644 index 000000000..750851d7c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ResNet50_UReID_infer.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + # pretrained_model: "./pd_model_trace/ISE/ISE_M_model" # pretrained ISE model for Market1501 + # pretrained_model: "./pd_model_trace/ISE/ISE_MS_model" # pretrained ISE model for MSMT17 + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 128, 256] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "BNNeck" + num_features: 2048 + Head: + name: "FC" + embedding_size: 2048 + class_num: 751 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" # ["Market1501", "MSMT17"] + image_root: "./dataset" + cls_label_path: "bounding_box_train" + transform_ops: + - ResizeImage: + size: [128, 256] + interpolation: 'bicubic' + backend: 'pil' + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + fill: 0 + - RandomCrop: + size: [128, 256] + pad_if_needed: False + - NormalizeImage: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0.485, 0.456, 0.406] + + sampler: + name: PKSampler + batch_size: 16 + sample_per_id: 4 + drop_last: True + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" # ["Market1501", "MSMT17"] + image_root: "./dataset" + cls_label_path: "query" + transform_ops: + - ResizeImage: + size: [128, 256] + interpolation: 'bicubic' + backend: 'pil' + - NormalizeImage: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" # ["Market1501", "MSMT17"] + image_root: "./dataset" + cls_label_path: "bounding_box_test" + transform_ops: + - ResizeImage: + size: [128, 256] + interpolation: 'bicubic' + backend: 'pil' + - NormalizeImage: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_base.yml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_base.yml new file mode 100644 index 000000000..9ab15d5c4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_base.yml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 320] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SVTR_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_large.yml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_large.yml new file mode 100644 index 000000000..a3b556e97 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_large.yml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 320] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SVTR_large + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_tiny.yml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_tiny.yml new file mode 100644 index 000000000..21d6788af --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/SVTR/svtr_tiny.yml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 320] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + + +# mixed precision +AMP: + use_amp: False + use_fp16_test: False + scale_loss: 128.0 + use_dynamic_loss_scaling: True + use_promote: False + # O1: mixed fp16, O2: pure fp16 + level: O1 + + +# model architecture +Arch: + name: SVTR_tiny + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: False + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: + - 320 + - 48 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/StrategySearch/person.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/StrategySearch/person.yaml new file mode 100644 index 000000000..906635595 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/StrategySearch/person.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/PPLCNet_2.5x_ReID.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/PPLCNet_2.5x_ReID.yaml new file mode 100644 index 000000000..eb9f145a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/PPLCNet_2.5x_ReID.yaml @@ -0,0 +1,158 @@ +# global configs +# pretrained_model: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_PPLCNet2.5x_VERIWild_v1.0_pretrained.pdparams +# VeriWild v1 small: recall1: 0.93736, recall5: 0.98427, mAP: 0.82125 +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output_reid/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "PPLCNet_x2_5" + pretrained: True + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: "FC" + embedding_size: 1280 + class_num: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: True + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50.yaml new file mode 100644 index 000000000..6b6172475 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50_ReID.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50_ReID.yaml new file mode 100644 index 000000000..c13d59afd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/Vehicle/ResNet50_ReID.yaml @@ -0,0 +1,155 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: True + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/adaface_ir18.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/adaface_ir18.yaml new file mode 100644 index 000000000..2cbfe5da4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/adaface_ir18.yaml @@ -0,0 +1,105 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 26 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 112, 112] + save_inference_dir: "./inference" + eval_mode: "adaface" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "AdaFace_IR_18" + input_size: [112, 112] + Head: + name: "AdaMargin" + embedding_size: 512 + class_num: 70722 + m: 0.4 + s: 64 + h: 0.333 + t_alpha: 0.01 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [12, 20, 24] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "AdaFaceDataset" + root_dir: "dataset/face/" + label_path: "dataset/face/train_filter_label.txt" + transform: + - CropWithPadding: + prob: 0.2 + padding_num: 0 + size: [112, 112] + scale: [0.2, 1.0] + ratio: [0.75, 1.3333333333333333] + - RandomInterpolationAugment: + prob: 0.2 + - ColorJitter: + prob: 0.2 + brightness: 0.5 + contrast: 0.5 + saturation: 0.5 + hue: 0 + - RandomHorizontalFlip: + - ToTensor: + - Normalize: + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: FiveValidationDataset + val_data_path: dataset/face/faces_emore + concat_mem_file_name: dataset/face/faces_emore/concat_validation_memfile + sampler: + name: BatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True +Metric: + Train: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/xbm_resnet50.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/xbm_resnet50.yaml new file mode 100644 index 000000000..44dcfe065 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/metric_learning/xbm_resnet50.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 35 + iter_per_epoch: &iter_per_epoch 1000 + print_batch_step: 20 + use_visualdl: False + eval_mode: retrieval + retrieval_feature_from: features # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + Backbone: + name: ResNet50_adaptive_max_pool2d + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: flatten + Neck: + name: FC + embedding_size: 2048 + class_num: &feat_dim 128 + weight_attr: + initializer: + name: KaimingNormal + fan_in: *feat_dim + negative_slope: 0.0 + nonlinearity: leaky_relu + bias_attr: + initializer: + name: Constant + value: 0.0 + +# loss function config for traing/eval process +Loss: + Train: + - ContrastiveLoss_XBM: + weight: 1.0 + xbm_size: 55000 + xbm_weight: 1.0 + start_iter: 1000 + margin: 0.5 + embedding_size: *feat_dim + epsilon: 1.0e-5 + normalize_feature: True + feature_from: features + Eval: + - ContrastiveLoss: + weight: 1.0 + margin: 0.5 + embedding_size: *feat_dim + normalize_feature: True + epsilon: 1.0e-5 + feature_from: features + +Optimizer: + name: Adam + lr: + name: ReduceOnPlateau + learning_rate: 0.0001 + mode: max + factor: 0.1 + patience: 4 + threshold: 0.001 + threshold_mode: rel + cooldown: 2 + min_lr: 0.000005 + epsilon: 1e-8 + by_epoch: True + regularizer: + name: L2 + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: VeriWild + image_root: ./dataset/SOP + cls_label_path: ./dataset/SOP/train_list.txt + backend: pil + transform_ops: + - Resize: + size: 256 + - RandomResizedCrop: + scale: [0.2, 1] + size: 224 + - RandomHorizontalFlip: + prob: 0.5 + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + max_iters: *iter_per_epoch + loader: + num_workers: 8 + use_shared_memory: True + Eval: + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/SOP + cls_label_path: ./dataset/SOP/test_list.txt + backend: pil + transform_ops: + - Resize: + size: 256 + - CenterCrop: + size: 224 + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Query: + dataset: + name: VeriWild + image_root: ./dataset/SOP + cls_label_path: ./dataset/SOP/test_list.txt + backend: pil + transform_ops: + - Resize: + size: 256 + - CenterCrop: + size: 224 + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/multi_scale/MobileNetV1_multi_scale.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/multi_scale/MobileNetV1_multi_scale.yaml new file mode 100644 index 000000000..530e75075 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/multi_scale/MobileNetV1_multi_scale.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MobileNetV1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 64 + divided_factor: 32 + is_training: True + + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/.gitkeep b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/.gitkeep new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/.gitkeep @@ -0,0 +1 @@ + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/CLIP_large_patch14_224_aesthetic.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/CLIP_large_patch14_224_aesthetic.yaml new file mode 100644 index 000000000..8d0a72371 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/CLIP_large_patch14_224_aesthetic.yaml @@ -0,0 +1,78 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: CLIP_large_patch14_224_aesthetic + pretrained: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: deploy/images/practical/aesthetic_score_predictor/Highscore.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ScoreOutput + decimal_places: 2 + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/EfficientNetB3_watermark.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/EfficientNetB3_watermark.yaml new file mode 100644 index 000000000..2fd142ae7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/EfficientNetB3_watermark.yaml @@ -0,0 +1,81 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: EfficientNetB3_watermark + pretrained: True + class_num: 2 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: False + +Infer: + infer_imgs: deploy/images/practical/watermark_exists/watermark_example.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: contains_watermark + label_1: no_watermark + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml new file mode 100644 index 000000000..c6415cd47 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml @@ -0,0 +1,150 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPHGNet_tiny + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 3 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/phone_train_list_halfbody.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 2 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/phone_val_list_halfbody.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 2 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: dataset/phone_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 1] + Eval: + - TopkAcc: + topk: [1, 1] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV1_retrieval.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV1_retrieval.yaml new file mode 100644 index 000000000..bac477392 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV1_retrieval.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: MobileNetV1 + pretrained: False + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1024 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 101 + margin: 0.15 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.5 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: MultiStepDecay + learning_rate: 0.01 + milestones: [20, 30, 40] + gamma: 0.5 + verbose: False + last_epoch: -1 + regularizer: + name: "L2" + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 2 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/CUB_200_2011/ + cls_label_path: ./dataset/CUB_200_2011/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV3_large_x1_0.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV3_large_x1_0.yaml new file mode 100644 index 000000000..d87dc0991 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/MobileNetV3_large_x1_0.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.00375 + warmup_epoch: 5 + last_epoch: -1 + regularizer: + name: 'L2' + coeff: 0.000001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ./dataset/flowers102/flowers102_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/ResNet50_vd.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/ResNet50_vd.yaml new file mode 100644 index 000000000..90b2c88d6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/ResNet50_vd.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ./dataset/flowers102/flowers102_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml new file mode 100644 index 000000000..6a461ccfa --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/HRNet_W18_C_finetune_kunlun.yaml @@ -0,0 +1,68 @@ +mode: 'train' +ARCHITECTURE: + name: 'HRNet_W18_C' +pretrained_model: "./pretrained/HRNet_W18_C_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 10 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.00375 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml new file mode 100644 index 000000000..7fad5eebe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/ResNet50_vd_finetune_kunlun.yaml @@ -0,0 +1,69 @@ +mode: 'train' +ARCHITECTURE: + name: 'ResNet50_vd' +pretrained_model: "./pretrained/ResNet50_vd_pretrained" +load_static_weights: true +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.00375 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.000001 + +TRAIN: + batch_size: 20 + num_workers: 1 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 1 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml new file mode 100644 index 000000000..389a5f35f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG16_finetune_kunlun.yaml @@ -0,0 +1,70 @@ +mode: 'train' +ARCHITECTURE: + name: 'VGG16' + params: + stop_grad_layers: 5 +pretrained_model: "./pretrained/VGG16_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.0005 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml new file mode 100644 index 000000000..6ba38b974 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/kunlun/VGG19_finetune_kunlun.yaml @@ -0,0 +1,70 @@ +mode: 'train' +ARCHITECTURE: + name: 'VGG19' + params: + stop_grad_layers: 5 +pretrained_model: "./pretrained/VGG19_pretrained" +model_save_dir: "./output/" +classes_num: 102 +total_images: 1020 +save_interval: 1 +validate: True +valid_interval: 1 +epochs: 20 +topk: 5 +image_shape: [3, 224, 224] + +LEARNING_RATE: + function: 'Cosine' + params: + lr: 0.0005 + +OPTIMIZER: + function: 'Momentum' + params: + momentum: 0.9 + regularizer: + function: 'L2' + factor: 0.00001 + +TRAIN: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/train_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + +VALID: + batch_size: 20 + num_workers: 0 + file_list: "./dataset/flowers102/val_list.txt" + data_dir: "./dataset/flowers102/" + shuffle_seed: 0 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml new file mode 100644 index 000000000..124636690 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/new_user/ShuffleNetV2_x0_25.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: cpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ShuffleNetV2_x0_25 + class_num: 102 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/flowers102/ + cls_label_path: ./dataset/flowers102/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ./dataset/flowers102/flowers102_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV1_multilabel.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV1_multilabel.yaml new file mode 100644 index 000000000..7c64ae3b3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV1_multilabel.yaml @@ -0,0 +1,130 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + use_multilabel: True +# model architecture +Arch: + name: MobileNetV1 + class_num: 33 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + Eval: + - MultiLabelLoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/images/ + cls_label_path: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/multilabel_train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: MultiLabelDataset + image_root: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/images/ + cls_label_path: ./dataset/NUS-WIDE-SCENE/NUS-SCENE-dataset/multilabel_test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/0517_2715693311.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: MultiLabelThreshOutput + threshold: 0.5 + class_id_map_file: "ppcls/utils/NUS-WIDE-SCENE_label_list.txt" + delimiter: " " + +Metric: + Train: + - AccuracyScore: + - HammingDistance: + Eval: + - AccuracyScore: + - HammingDistance: diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml new file mode 100644 index 000000000..423a45389 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/MobileNetV3_large_x1_0_CIFAR100_finetune.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml new file mode 100644 index 000000000..a27068d16 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/R50_vd_distill_MV3_large_x1_0_CIFAR100.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet50_vd + class_num: 100 + pretrained: "./pretrained/best_model" + - Student: + name: MobileNetV3_large_x1_0 + class_num: 100 + pretrained: True + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/CIFAR100/" + cls_label_path: "./dataset/CIFAR100/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/CIFAR100/" + cls_label_path: "./dataset/CIFAR100/test_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: DistillationPostProcess + func: Topk + topk: 5 + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml new file mode 100644 index 000000000..10c326a40 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_CIFAR100.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml new file mode 100644 index 000000000..d8ff817f8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/ResNet50_vd_mixup_CIFAR100_finetune.yaml @@ -0,0 +1,127 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +# model architecture +Arch: + name: ResNet50_vd + class_num: 100 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 32 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/CIFAR100/ + cls_label_path: ./dataset/CIFAR100/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 36 + - CropImage: + size: 32 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml new file mode 100644 index 000000000..97228828a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/quick_start/professional/VGG19_CIFAR10_DeepHash.yaml @@ -0,0 +1,147 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + eval_mode: "retrieval" + epochs: 128 + print_batch_step: 10 + use_visualdl: False + + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + + #feature postprocess + feature_normalize: False + feature_binarize: "round" + +# model architecture +Arch: + name: "RecModel" + Backbone: + name: "VGG19Sigmoid" + pretrained: True + class_num: 48 + Head: + name: "FC" + class_num: 10 + embedding_size: 48 + + infer_output_key: "features" + infer_add_softmax: "false" + +# loss function config for train/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.01 + decay_epochs: [200] + values: [0.01, 0.001] + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/train.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 256 + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/test.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: ImageNetDataset + image_root: ./dataset/cifar10/ + cls_label_path: ./dataset/cifar10/cifar10-2/database.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - mAP: + - Precisionk: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/MetaBIN_ResNet50_cross_domain.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/MetaBIN_ResNet50_cross_domain.yaml new file mode 100644 index 000000000..19c9adafc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/MetaBIN_ResNet50_cross_domain.yaml @@ -0,0 +1,277 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + iter_per_epoch: &iter_per_epoch 50 + warmup_iter: 10 + save_interval: 8 + eval_during_train: True + eval_interval: 8 + epochs: &epochs 348 # 348*50 = 120*145 = 17400 iters + print_batch_step: 25 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "features" # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + train_mode: "metabin" + +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + +# model architecture +Arch: + name: "RecModel" + Backbone: + name: "ResNet50_metabin" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + bias_lr_factor: 2.0 + BackboneStopLayer: + name: "flatten" + Neck: + name: MetaBNNeck + num_features: &feat_dim 2048 + use_global_stats: True + Head: + name: "FC" + embedding_size: *feat_dim + class_num: 751 + weight_attr: + initializer: + name: KaimingUniform + negative_slope: 2.23606 # math.sqrt(5) + nonlinearity: "leaky_relu" + bias_attr: False + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bicubic" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ColorJitter: + brightness: 0.15 + contrast: 0.15 + saturation: 0.1 + hue: 0.1 + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: NaiveIdentityBatchSampler + batch_size: 96 + num_instances: 4 + drop_last: True + loader: + num_workers: 4 + use_shared_memory: True + + Metalearning: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bicubic" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ColorJitter: + brightness: 0.15 + contrast: 0.15 + saturation: 0.1 + hue: 0.1 + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DomainShuffleBatchSampler + batch_size: 96 + num_instances: 4 + drop_last: True + camera_to_domain: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: "DukeMTMC" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bicubic" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "DukeMTMC" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bicubic" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +# loss function config for traing/eval process +Loss: + Train: + - CELossForMetaBIN: + weight: 1.0 + epsilon: 0.1 + - TripletLossForMetaBIN: + weight: 1.0 + margin: 0.3 + feature_from: "backbone" + - IntraDomainScatterLoss: + weight: 1.0 + normalize_feature: True + feature_from: "backbone" + - InterDomainShuffleLoss: + weight: 1.0 + normalize_feature: False + feature_from: "backbone" + Basic: + - CELossForMetaBIN: + weight: 1.0 + epsilon: 0.1 + - TripletLossForMetaBIN: + weight: 1.0 + margin: 0.3 + feature_from: "backbone" + MetaTrain: + - CELossForMetaBIN: + weight: 1.0 + epsilon: 0.1 + - TripletLossForMetaBIN: + weight: 1.0 + margin: 0.3 + feature_from: "backbone" + - IntraDomainScatterLoss: + weight: 1.0 + normalize_feature: True + feature_from: "backbone" + - InterDomainShuffleLoss: + weight: 1.0 + normalize_feature: False + feature_from: "backbone" + MetaTest: + - CELossForMetaBIN: + weight: 1.0 + epsilon: 0.1 + - TripletLossForMetaBIN: + weight: 1.0 + margin: 0.3 + feature_from: "backbone" + Eval: + - TripletLossForMetaBIN: + weight: 1.0 + margin: 0.3 + feature_from: "backbone" + +Optimizer: + - Momentum: + scope: ".*(conv|batch_norm|instance_norm|feat_bn|fc)" + lr: + name: MultiStepDecay + epochs: *epochs + learning_rate: 0.01 + step_each_epoch: *iter_per_epoch + milestones: [145, 261] + gamma: 0.1 + warmup_epoch: 29 + warmup_start_lr: 0.0001 + by_epoch: False + last_epoch: -1 + momentum: 0.9 + regularizer: + name: "L2" + coeff: 0.0005 + - SGD: + scope: "backbone.*gate" + lr: + name: Constant + learning_rate: 0.2 + last_epoch: -1 + - SGD: + scope: "RecModel" + lr: + name: Cyclic + epochs: *epochs + step_each_epoch: *iter_per_epoch + base_learning_rate: 0.001 + max_learning_rate: 0.1 + warmup_epoch: 0 + warmup_start_lr: 1 + step_size_up: 1095 + step_size_down: 1095 + by_epoch: False + last_epoch: 0 + +Metric: + Eval: + - Recallk: + topk: [1, 5, 10] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/baseline.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/baseline.yaml new file mode 100644 index 000000000..5c83b8da8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/baseline.yaml @@ -0,0 +1,158 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "backbone" # 'backbone' or 'neck' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Head: + name: "FC" + embedding_size: 2048 + class_num: 751 + weight_attr: + initializer: + name: KaimingUniform + fan_in: 12288 # 6*embedding_size + bias_attr: + initializer: + name: KaimingUniform + fan_in: 12288 # 6*embedding_size + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [40, 70] + values: [0.00035, 0.000035, 0.0000035] + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet.yaml new file mode 100644 index 000000000..43f1de62f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "features" # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Neck: + name: BNNeck + num_features: &feat_dim 2048 + weight_attr: + initializer: + name: Constant + value: 1.0 + bias_attr: + initializer: + name: Constant + value: 0.0 + learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero + Head: + name: "FC" + embedding_size: *feat_dim + class_num: 751 + weight_attr: + initializer: + name: Normal + std: 0.001 + bias_attr: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [30, 60] + values: [0.00035, 0.000035, 0.0000035] + warmup_epoch: 10 + warmup_start_lr: 0.0000035 + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0.485, 0.456, 0.406] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet_with_center.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet_with_center.yaml new file mode 100644 index 000000000..70c70a99b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/reid/strong_baseline/softmax_triplet_with_center.yaml @@ -0,0 +1,187 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "features" # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Neck: + name: BNNeck + num_features: &feat_dim 2048 + weight_attr: + initializer: + name: Constant + value: 1.0 + bias_attr: + initializer: + name: Constant + value: 0.0 + learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero + Head: + name: "FC" + embedding_size: *feat_dim + class_num: &class_num 751 + weight_attr: + initializer: + name: Normal + std: 0.001 + bias_attr: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + - CenterLoss: + weight: 0.0005 + num_classes: *class_num + feat_dim: *feat_dim + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + - Adam: + scope: RecModel + lr: + name: Piecewise + decay_epochs: [30, 60] + values: [0.00035, 0.000035, 0.0000035] + warmup_epoch: 10 + warmup_start_lr: 0.0000035 + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + - SGD: + scope: CenterLoss + lr: + name: ConstLR + learning_rate: 1000.0 # NOTE: set to ori_lr*(1/centerloss_weight) to avoid manually scaling centers' gradidents. + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0.485, 0.456, 0.406] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/GeneralRecognition_PPLCNet_x2_5_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/GeneralRecognition_PPLCNet_x2_5_quantization.yaml new file mode 100644 index 000000000..7b21d0ba8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/GeneralRecognition_PPLCNet_x2_5_quantization.yaml @@ -0,0 +1,154 @@ +# global configs +Global: + checkpoints: null + pretrained_model: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + eval_mode: retrieval + use_dali: False + to_static: False + +# for quantizaiton or prune model +Slim: + ## for prune + quant: + name: pact + +# model architecture +Arch: + name: RecModel + infer_output_key: features + infer_add_softmax: False + + Backbone: + name: PPLCNet_x2_5 + pretrained: False + use_ssld: True + BackboneStopLayer: + name: "flatten" + Neck: + name: FC + embedding_size: 1280 + class_num: 512 + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 + margin: 0.2 + scale: 30 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.002 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/train_reg_all_data.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ + cls_label_path: ./dataset/Aliproduct/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_prune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_prune.yaml new file mode 100644 index 000000000..6655c3a0a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_prune.yaml @@ -0,0 +1,139 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantization or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.65 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_quantization.yaml new file mode 100644 index 000000000..517c4677c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/MobileNetV3_large_x1_0_quantization.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantalization or prune model +Slim: + ## for quantization + quant: + name: pact + +# model architecture +Arch: + name: MobileNetV3_large_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.065 + warmup_epoch: 0 + regularizer: + name: 'L2' + coeff: 0.00002 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/PPLCNet_x1_0_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/PPLCNet_x1_0_quantization.yaml new file mode 100644 index 000000000..40111a036 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/PPLCNet_x1_0_quantization.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantalization or prune model +Slim: + ## for quantization + quant: + name: pact + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 0 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AutoAugment: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_prune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_prune.yaml new file mode 100644 index 000000000..7bfc537b1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_prune.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 200 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantization or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.1 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_quantization.yaml new file mode 100644 index 000000000..f9db41020 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vd_quantization.yaml @@ -0,0 +1,137 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# for quantalization or prune model +Slim: + ## for quantization + quant: + name: pact + +# model architecture +Arch: + name: ResNet50_vd + class_num: 1000 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.00007 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_prune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_prune.yaml new file mode 100644 index 000000000..1f6fea887 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_prune.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_cls_ResNet50_CompCars_v1.2_pretrained.pdparams" + output_dir: "./output_vehicle_cls_prune/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +Slim: + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_quantization.yaml new file mode 100644 index 000000000..026b86547 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_cls_quantization.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_cls_ResNet50_CompCars_v1.2_pretrained.pdparams" + output_dir: "./output_vehicle_cls_pact/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 80 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + +Slim: + quant: + name: pact + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 431 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + bbox_crop: True + cls_label_path: "./dataset/CompCars/train_test_split/classification/train_label.txt" + transform_ops: + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: "CompCars" + image_root: "./dataset/CompCars/image/" + label_root: "./dataset/CompCars/label/" + cls_label_path: "./dataset/CompCars/train_test_split/classification/test_label.txt" + bbox_crop: True + transform_ops: + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_prune.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_prune.yaml new file mode 100644 index 000000000..63b87f1ca --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_prune.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_ResNet50_VERIWild_v1.1_pretrained.pdparams" + output_dir: "./output_vehicle_reid_prune/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 160 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# for quantizaiton or prune model +Slim: + ## for prune + prune: + name: fpgm + pruned_ratio: 0.3 + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 128 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_quantization.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_quantization.yaml new file mode 100644 index 000000000..cca9915e2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/slim/ResNet50_vehicle_reid_quantization.yaml @@ -0,0 +1,161 @@ +# global configs +Global: + checkpoints: null + pretrained_model: "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/vehicle_reid_ResNet50_VERIWild_v1.1_pretrained.pdparams" + output_dir: "./output_vehicle_reid_pact/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + eval_mode: "retrieval" + +# for quantizaiton or prune model +Slim: + ## for prune + quant: + name: pact + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: True + BackboneStopLayer: + name: "avg_pool" + Neck: + name: "VehicleNeck" + in_channels: 2048 + out_channels: 512 + Head: + name: "ArcMargin" + embedding_size: 512 + class_num: 30671 + margin: 0.15 + scale: 32 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - SupConLoss: + weight: 1.0 + views: 2 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images/" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/train_list_start0.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - AugMix: + prob: 0.5 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0., 0., 0.] + + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 2 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + Eval: + Query: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id_query.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + + Gallery: + dataset: + name: "VeriWild" + image_root: "./dataset/VeRI-Wild/images" + cls_label_path: "./dataset/VeRI-Wild/train_test_split/test_3000_id.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 6 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} + diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar100_10000_4gpu.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar100_10000_4gpu.yaml new file mode 100644 index 000000000..a2382817d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar100_10000_4gpu.yaml @@ -0,0 +1,209 @@ +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: -1 + eval_during_train: true + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: false + use_dali: false + train_mode: fixmatch_ccssl + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +SSL: + T: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +Arch: + name: RecModel + infer_output_key: logits + infer_add_softmax: false + head_feature_from: backbone + Backbone: + name: WideResNet + widen_factor: 8 + depth: 28 + dropout: 0 + num_classes: 100 + low_dim: 64 + proj: false + proj_after: false + BackboneStopLayer: + name: bn1 + Neck: + name: FRFNNeck + num_features: 512 + low_dim: 64 + Head: + name: FC + embedding_size: 512 + class_num: 100 + + use_sync_bn: true + +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 + +UnLabelLoss: + Train: + - CCSSLCELoss: + weight: 1. + - SoftSupConLoss: + weight: 1.0 + temperature: 0.07 + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: true + weight_decay: 0.001 + lr: + name: 'CosineFixmatch' + learning_rate: 0.03 + num_warmup_steps: 0 + +DataLoader: + mean: [0.5071, 0.4867, 0.4408] + std: [0.2675, 0.2565, 0.2761] + Train: + dataset: + name: Cifar100 + data_file: null + mode: 'train' + download: true + backend: 'pil' + sample_per_label: 100 + expand_labels: 1 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5071, 0.4867, 0.4408] + std: [0.2675, 0.2565, 0.2761] + order: hwc + + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: true + shuffle: true + loader: + num_workers: 4 + use_shared_memory: true + + UnLabelTrain: + dataset: + name: Cifar100 + data_file: null + mode: 'train' + backend: 'pil' + download: true + + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5071, 0.4867, 0.4408] + std: [0.2675, 0.2565, 0.2761] + order: hwc + + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5071, 0.4867, 0.4408] + std: [0.2675, 0.2565, 0.2761] + order: hwc + + transform_ops_strong2: + - RandomResizedCrop: + size: [32, 32] + - RandFlipImage: + flip_code: 1 + - RandomApply: + transforms: + - RawColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + p: 1.0 # refer to official settings + - RandomGrayscale: + p: 0.2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0., 0., 0.] + std: [1., 1., 1.] + order: hwc + + + sampler: + name: DistributedBatchSampler + batch_size: 112 + drop_last: true + shuffle: true + loader: + num_workers: 4 + use_shared_memory: true + + Eval: + dataset: + name: Cifar100 + mode: 'test' + backend: 'pil' + download: true + data_file: null + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.5071, 0.4867, 0.4408] + std: [0.2675, 0.2565, 0.2761] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: true + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar10_4000_4gpu.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar10_4000_4gpu.yaml new file mode 100644 index 000000000..79667edc6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/CCSSL/FixMatchCCSSL_cifar10_4000_4gpu.yaml @@ -0,0 +1,208 @@ +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: -1 + eval_during_train: true + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: false + use_dali: false + train_mode: fixmatch_ccssl + image_shape: [3, 32, 32] + save_inference_dir: ./inference + +SSL: + T: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +Arch: + name: RecModel + infer_output_key: logits + infer_add_softmax: false + head_feature_from: backbone + Backbone: + name: WideResNet + widen_factor: 2 + depth: 28 + dropout: 0 + num_classes: 10 + low_dim: 64 + proj: false + proj_after: false + BackboneStopLayer: + name: bn1 + Neck: + name: FRFNNeck + num_features: 128 + low_dim: 64 + Head: + name: FC + embedding_size: 128 + class_num: 10 + + use_sync_bn: true + +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 + +UnLabelLoss: + Train: + - CCSSLCELoss: + weight: 1. + - SoftSupConLoss: + weight: 1.0 + temperature: 0.07 + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: true + weight_decay: 0.001 + lr: + name: 'CosineFixmatch' + learning_rate: 0.03 + num_warmup_steps: 0 + +DataLoader: + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + Train: + dataset: + name: Cifar10 + data_file: null + mode: 'train' + download: true + backend: 'pil' + sample_per_label: 400 + expand_labels: 1 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: true + shuffle: true + loader: + num_workers: 4 + use_shared_memory: true + + UnLabelTrain: + dataset: + name: Cifar10 + data_file: null + mode: 'train' + backend: 'pil' + download: true + + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + + transform_ops_strong2: + - RandCropImageV2: + size: [32, 32] + - RandFlipImage: + flip_code: 1 + - RandomApply: + transforms: + - RawColorJitter: + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + p: 1.0 # refer to official settings + - RandomGrayscale: + p: 0.2 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + + sampler: + name: DistributedBatchSampler + batch_size: 448 + drop_last: true + shuffle: true + loader: + num_workers: 4 + use_shared_memory: true + + Eval: + dataset: + name: Cifar10 + mode: 'test' + backend: 'pil' + download: true + data_file: null + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: true + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_250.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_250.yaml new file mode 100644 index 000000000..e8e00e8c8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_250.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: '../test/torch2paddle_cifar10' + output_dir: ./output_25 + device: gpu + save_interval: -1 + eval_during_train: True + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: False + use_dali: False + train_mode: fixmatch + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +SSL: + tempture: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +# AMP: +# scale_loss: 65536 +# use_dynamic_loss_scaling: True +# # O1: mixed fp16 +# level: O1 + +# model architecture +Arch: + name: WideResNet + depth: 28 + widen_factor: 2 + dropout: 0 + num_classes: 10 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 +UnLabelLoss: + Train: + - CELoss: + weight: 1.0 + reduction: "none" + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: True + no_weight_decay_name: bn bias + weight_decay: 0.0005 + lr: + name: CosineFixmatch + learning_rate: 0.03 + num_warmup_steps: 0 + num_cycles: 0.4375 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: 25 + expand_labels: 263 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + UnLabelTrain: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: None + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 448 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + + Eval: + dataset: + name: Cifar10 + data_file: None + mode: 'test' + download: True + backend: 'pil' + sample_per_label: None + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40.yaml new file mode 100644 index 000000000..0327fcd9c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/torch2paddle_weight/torch2paddle_initialize_cifar10_WideResNet_depth28_widenfactor2_classnum10.pdparams' + output_dir: ./output + device: gpu + save_interval: -1 + eval_during_train: True + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: False + use_dali: False + train_mode: fixmatch + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +SSL: + tempture: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +# AMP: +# scale_loss: 65536 +# use_dynamic_loss_scaling: True +# # O1: mixed fp16 +# level: O1 + +# model architecture +Arch: + name: WideResNet + depth: 28 + widen_factor: 2 + dropout: 0 + num_classes: 10 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 +UnLabelLoss: + Train: + - CELoss: + weight: 1.0 + reduction: "none" + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: True + no_weight_decay_name: bn bias + weight_decay: 0.0005 + lr: + name: CosineFixmatch + learning_rate: 0.03 + num_warmup_steps: 0 + num_cycles: 0.4375 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: 4 + expand_labels: 1639 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + UnLabelTrain: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: None + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 448 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + + Eval: + dataset: + name: Cifar10 + data_file: None + mode: 'test' + download: True + backend: 'pil' + sample_per_label: None + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_4000.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_4000.yaml new file mode 100644 index 000000000..0989fc89e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_4000.yaml @@ -0,0 +1,175 @@ +# global configs +Global: + checkpoints: null + pretrained_model: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/torch2paddle_weight/torch2paddle_initialize_cifar10_WideResNet_depth28_widenfactor2_classnum10.pdparams' + output_dir: ./output + device: gpu + save_interval: -1 + eval_during_train: True + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: False + use_dali: False + train_mode: fixmatch + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +SSL: + tempture: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +# AMP: +# scale_loss: 65536 +# use_dynamic_loss_scaling: True +# # O1: mixed fp16 +# level: O1 + +# model architecture +Arch: + name: WideResNet + depth: 28 + widen_factor: 2 + dropout: 0 + num_classes: 10 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 +UnLabelLoss: + Train: + - CELoss: + weight: 1.0 + reduction: "none" + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: True + no_weight_decay_name: bn bias + weight_decay: 0.0005 + lr: + name: CosineFixmatch + learning_rate: 0.03 + num_warmup_steps: 0 + num_cycles: 0.4375 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: 400 + expand_labels: 17 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + UnLabelTrain: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: None + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 448 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + + Eval: + dataset: + name: Cifar10 + data_file: None + mode: 'test' + download: True + backend: 'pil' + sample_per_label: None + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40_4gpu.yaml b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40_4gpu.yaml new file mode 100644 index 000000000..feeb1a9e8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/configs/ssl/FixMatch/FixMatch_cifar10_40_4gpu.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/torch2paddle_weight/torch2paddle_initialize_cifar10_WideResNet_depth28_widenfactor2_classnum10.pdparams' + output_dir: ./output + device: gpu + save_interval: -1 + eval_during_train: True + eval_interval: 1 + epochs: 1024 + iter_per_epoch: 1024 + print_batch_step: 20 + use_visualdl: False + use_dali: False + train_mode: fixmatch + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +SSL: + tempture: 1 + threshold: 0.95 + +EMA: + decay: 0.999 + +# AMP: +# scale_loss: 65536 +# use_dynamic_loss_scaling: True +# # O1: mixed fp16 +# level: O1 + +# model architecture +Arch: + name: WideResNet + depth: 28 + widen_factor: 2 + dropout: 0 + num_classes: 10 + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + reduction: "mean" + Eval: + - CELoss: + weight: 1.0 +UnLabelLoss: + Train: + - CELoss: + weight: 1.0 + reduction: "none" + +Optimizer: + name: Momentum + momentum: 0.9 + use_nesterov: True + no_weight_decay_name: bn bias + weight_decay: 0.0005 + lr: + name: CosineFixmatch + learning_rate: 0.03 + num_warmup_steps: 0 + num_cycles: 0.4375 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: 4 + expand_labels: 1639 + transform_ops: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 16 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + UnLabelTrain: + dataset: + name: Cifar10 + data_file: None + mode: 'train' + download: True + backend: 'pil' + sample_per_label: None + transform_ops_weak: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + transform_ops_strong: + - RandFlipImage: + flip_code: 1 + - Pad_paddle_vision: + padding: 4 + padding_mode: reflect + - RandCropImageV2: + size: [32, 32] + - RandAugment: + num_layers: 2 + magnitude: 10 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 112 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + + Eval: + dataset: + name: Cifar10 + data_file: None + mode: 'test' + download: True + backend: 'pil' + sample_per_label: None + transform_ops: + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.4914, 0.4822, 0.4465] + std: [0.2471, 0.2435, 0.2616] + order: hwc + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/engine.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/engine.py new file mode 100644 index 000000000..7cce65daf --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/engine.py @@ -0,0 +1,717 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import gc +import shutil +import copy +import platform +import paddle +import paddle.distributed as dist +from visualdl import LogWriter +from paddle import nn +import numpy as np +import random + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.utils.config import print_config, dump_infer_config +from ppcls.data import build_dataloader +from ppcls.arch import build_model, RecModel, DistillationModel, TheseusLayer +from ppcls.arch import apply_to_static +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.utils.amp import AutoCast, build_scaler +from ppcls.utils.ema import ExponentialMovingAverage +from ppcls.utils.save_load import load_dygraph_pretrain +from ppcls.utils.save_load import init_model +from ppcls.utils.save_result import update_train_results +from ppcls.utils import save_load, save_predict_result + +from ppcls.data.utils.get_image_list import get_image_list +from ppcls.data.postprocess import build_postprocess +from ppcls.data import create_operators +from ppcls.engine import train as train_method +from ppcls.engine.train.utils import type_name +from ppcls.engine import evaluation +from ppcls.arch.gears.identity_head import IdentityHead + + +class Engine(object): + def __init__(self, config, mode="train"): + assert mode in ["train", "eval", "infer", "export"] + self.mode = mode + self.config = config + self.eval_mode = self.config["Global"].get("eval_mode", + "classification") + self.train_mode = self.config["Global"].get("train_mode", None) + if "Head" in self.config["Arch"] or self.config["Arch"].get("is_rec", + False): + self.is_rec = True + else: + self.is_rec = False + + # set seed + seed = self.config["Global"].get("seed", False) + if seed or seed == 0: + assert isinstance(seed, int), "The 'seed' must be a integer!" + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + # init logger + self.output_dir = self.config['Global']['output_dir'] + log_file = os.path.join(self.output_dir, f"{mode}.log") + log_ranks = self.config['Global'].get("log_ranks", "0") + init_logger(log_file=log_file, log_ranks=log_ranks) + print_config(config) + + # init train_func and eval_func + assert self.eval_mode in [ + "classification", "retrieval", "adaface", "face_recognition" + ], logger.error("Invalid eval mode: {}".format(self.eval_mode)) + if self.train_mode is None: + self.train_epoch_func = train_method.train_epoch + else: + self.train_epoch_func = getattr(train_method, + "train_epoch_" + self.train_mode) + self.eval_func = getattr(evaluation, self.eval_mode + "_eval") + + self.use_dali = self.config['Global'].get("use_dali", False) + + # for visualdl + self.vdl_writer = None + if self.config['Global'][ + 'use_visualdl'] and mode == "train" and dist.get_rank() == 0: + vdl_writer_path = self.output_dir + if not os.path.exists(vdl_writer_path): + os.makedirs(vdl_writer_path) + self.vdl_writer = LogWriter(logdir=vdl_writer_path) + + # set device + assert self.config["Global"]["device"] in [ + "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps" + ] + self.device = paddle.set_device(self.config["Global"]["device"]) + logger.info('train with paddle {} and device {}'.format( + paddle.__version__, self.device)) + + # gradient accumulation + self.update_freq = self.config["Global"].get("update_freq", 1) + + if "class_num" in config["Global"]: + global_class_num = config["Global"]["class_num"] + if "class_num" not in config["Arch"]: + config["Arch"]["class_num"] = global_class_num + msg = f"The Global.class_num will be deprecated. Please use Arch.class_num instead. Arch.class_num has been set to {global_class_num}." + else: + msg = "The Global.class_num will be deprecated. Please use Arch.class_num instead. The Global.class_num has been ignored." + logger.warning(msg) + #TODO(gaotingquan): support rec + class_num = config["Arch"].get("class_num", None) + self.config["DataLoader"].update({"class_num": class_num}) + self.config["DataLoader"].update({ + "epochs": self.config["Global"]["epochs"] + }) + + # build dataloader + if self.mode == 'train': + self.train_dataloader = build_dataloader( + self.config["DataLoader"], "Train", self.device, self.use_dali) + if self.config["DataLoader"].get('UnLabelTrain', None) is not None: + self.unlabel_train_dataloader = build_dataloader( + self.config["DataLoader"], "UnLabelTrain", self.device, + self.use_dali) + else: + self.unlabel_train_dataloader = None + + self.iter_per_epoch = len( + self.train_dataloader) - 1 if platform.system( + ) == "Windows" else len(self.train_dataloader) + if self.config["Global"].get("iter_per_epoch", None): + # set max iteration per epoch mannualy, when training by iteration(s), such as XBM, FixMatch. + self.iter_per_epoch = self.config["Global"].get( + "iter_per_epoch") + if self.iter_per_epoch < self.update_freq: + logger.warning( + "The arg Global.update_freq greater than iter_per_epoch and has been set to 1. This may be caused by too few of batches." + ) + self.update_freq = 1 + self.iter_per_epoch = self.iter_per_epoch // self.update_freq * self.update_freq + + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + if self.eval_mode in ["classification", "adaface", "face_recognition"]: + self.eval_dataloader = build_dataloader( + self.config["DataLoader"], "Eval", self.device, + self.use_dali) + elif self.eval_mode == "retrieval": + self.gallery_query_dataloader = None + if len(self.config["DataLoader"]["Eval"].keys()) == 1: + key = list(self.config["DataLoader"]["Eval"].keys())[0] + self.gallery_query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], key, self.device, + self.use_dali) + else: + self.gallery_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Gallery", + self.device, self.use_dali) + self.query_dataloader = build_dataloader( + self.config["DataLoader"]["Eval"], "Query", self.device, + self.use_dali) + + # build loss + if self.mode == "train": + label_loss_info = self.config["Loss"]["Train"] + self.train_loss_func = build_loss(label_loss_info) + unlabel_loss_info = self.config.get("UnLabelLoss", {}).get("Train", + None) + self.unlabel_train_loss_func = build_loss(unlabel_loss_info) + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + loss_config = self.config.get("Loss", None) + if loss_config is not None: + loss_config = loss_config.get("Eval") + if loss_config is not None: + self.eval_loss_func = build_loss(loss_config) + else: + self.eval_loss_func = None + else: + self.eval_loss_func = None + + # build metric + if self.mode == 'train' and "Metric" in self.config and "Train" in self.config[ + "Metric"] and self.config["Metric"]["Train"]: + metric_config = self.config["Metric"]["Train"] + if hasattr(self.train_dataloader, "collate_fn" + ) and self.train_dataloader.collate_fn is not None: + for m_idx, m in enumerate(metric_config): + if "TopkAcc" in m: + msg = f"Unable to calculate accuracy when using \"batch_transform_ops\". The metric \"{m}\" has been removed." + logger.warning(msg) + metric_config.pop(m_idx) + self.train_metric_func = build_metrics(metric_config) + else: + self.train_metric_func = None + + if self.mode == "eval" or (self.mode == "train" and + self.config["Global"]["eval_during_train"]): + if self.eval_mode == "classification": + if "Metric" in self.config and "Eval" in self.config["Metric"]: + self.eval_metric_func = build_metrics(self.config["Metric"][ + "Eval"]) + else: + self.eval_metric_func = None + elif self.eval_mode == "retrieval": + if "Metric" in self.config and "Eval" in self.config["Metric"]: + metric_config = self.config["Metric"]["Eval"] + else: + metric_config = [{"name": "Recallk", "topk": (1, 5)}] + self.eval_metric_func = build_metrics(metric_config) + elif self.eval_mode == "face_recognition": + if "Metric" in self.config and "Eval" in self.config["Metric"]: + self.eval_metric_func = build_metrics(self.config["Metric"] + ["Eval"]) + else: + self.eval_metric_func = None + + # build model + self.model = build_model(self.config, self.mode) + # set @to_static for benchmark, skip this by default. + apply_to_static(self.config, self.model, is_rec=self.is_rec) + + # load_pretrain + if self.config["Global"]["pretrained_model"] is not None: + load_dygraph_pretrain( + [self.model, getattr(self, 'train_loss_func', None)], + self.config["Global"]["pretrained_model"]) + + # build optimizer + if self.mode == 'train': + self.optimizer, self.lr_sch = build_optimizer( + self.config["Optimizer"], self.config["Global"]["epochs"], + self.iter_per_epoch // self.update_freq, + [self.model, self.train_loss_func]) + # amp + self._init_amp() + + # build EMA model + self.ema = "EMA" in self.config and self.mode == "train" + if self.ema: + self.model_ema = ExponentialMovingAverage( + self.model, self.config['EMA'].get("decay", 0.9999)) + + # check the gpu num + world_size = dist.get_world_size() + self.config["Global"]["distributed"] = world_size != 1 + if self.mode == "train": + std_gpu_num = 8 if isinstance( + self.config["Optimizer"], + dict) and self.config["Optimizer"]["name"] == "AdamW" else 4 + if world_size != std_gpu_num: + msg = f"The training strategy provided by PaddleClas is based on {std_gpu_num} gpus. But the number of gpu is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use this config to train." + logger.warning(msg) + + # for distributed + if self.config["Global"]["distributed"]: + dist.init_parallel_env() + self.model = paddle.DataParallel(self.model) + if self.mode == 'train' and len(self.train_loss_func.parameters( + )) > 0: + self.train_loss_func = paddle.DataParallel(self.train_loss_func) + + # set different seed in different GPU manually in distributed environment + if seed is None: + logger.warning( + "The random seed cannot be None in a distributed environment. Global.seed has been set to 42 by default" + ) + self.config["Global"]["seed"] = seed = 42 + logger.info( + f"Set random seed to ({int(seed)} + $PADDLE_TRAINER_ID) for different trainer" + ) + paddle.seed(int(seed) + dist.get_rank()) + np.random.seed(int(seed) + dist.get_rank()) + random.seed(int(seed) + dist.get_rank()) + + # build postprocess for infer + if self.mode == 'infer': + self.preprocess_func = create_operators(self.config["Infer"][ + "transforms"]) + self.postprocess_func = build_postprocess(self.config["Infer"][ + "PostProcess"]) + + def train(self): + assert self.mode == "train" + print_batch_step = self.config['Global']['print_batch_step'] + save_interval = self.config["Global"]["save_interval"] + best_metric = { + "metric": -1.0, + "epoch": 0, + } + acc_ema = -1.0 + best_metric_ema = -1.0 + ema_module = None + if self.ema: + ema_module = self.model_ema.module + # key: + # val: metrics list word + self.output_info = dict() + self.time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + # global iter counter + self.global_step = 0 + uniform_output_enabled = self.config['Global'].get( + "uniform_output_enabled", False) + + if self.config.Global.checkpoints is not None: + metric_info = init_model(self.config.Global, self.model, + self.optimizer, self.train_loss_func, + ema_module) + if metric_info is not None: + best_metric.update(metric_info) + if hasattr(self.train_dataloader.batch_sampler, "set_epoch"): + self.train_dataloader.batch_sampler.set_epoch(best_metric[ + "epoch"]) + + for epoch_id in range(best_metric["epoch"] + 1, + self.config["Global"]["epochs"] + 1): + acc = 0.0 + # for one epoch train + self.train_epoch_func(self, epoch_id, print_batch_step) + + if self.use_dali: + self.train_dataloader.reset() + metric_msg = ", ".join( + [self.output_info[key].avg_info for key in self.output_info]) + logger.info("[Train][Epoch {}/{}][Avg]{}".format( + epoch_id, self.config["Global"]["epochs"], metric_msg)) + self.output_info.clear() + + # eval model and save model if possible + start_eval_epoch = self.config["Global"].get("start_eval_epoch", + 0) - 1 + if self.config["Global"][ + "eval_during_train"] and epoch_id % self.config["Global"][ + "eval_interval"] == 0 and epoch_id > start_eval_epoch: + acc = self.eval(epoch_id) + + # step lr (by epoch) according to given metric, such as acc + for i in range(len(self.lr_sch)): + if getattr(self.lr_sch[i], "by_epoch", False) and \ + type_name(self.lr_sch[i]) == "ReduceOnPlateau": + self.lr_sch[i].step(acc) + + # update best_metric + if acc >= best_metric["metric"]: + best_metric["metric"] = acc + best_metric["epoch"] = epoch_id + logger.info("[Eval][Epoch {}][best metric: {}]".format( + epoch_id, best_metric["metric"])) + logger.scaler( + name="eval_acc", + value=acc, + step=epoch_id, + writer=self.vdl_writer) + + if self.ema: + ori_model, self.model = self.model, ema_module + acc_ema = self.eval(epoch_id) + self.model = ori_model + ema_module.eval() + + # update best_ema + if acc_ema > best_metric_ema: + best_metric_ema = acc_ema + logger.info("[Eval][Epoch {}][best metric ema: {}]".format( + epoch_id, best_metric_ema)) + logger.scaler( + name="eval_acc_ema", + value=acc_ema, + step=epoch_id, + writer=self.vdl_writer) + + # save best model from best_acc or best_ema_acc + if max(acc, acc_ema) >= max(best_metric["metric"], + best_metric_ema): + metric_info = { + "metric": max(acc, acc_ema), + "epoch": epoch_id + } + prefix = "best_model" + save_load.save_model( + self.model, + self.optimizer, + metric_info, + os.path.join(self.output_dir, prefix) + if uniform_output_enabled else self.output_dir, + ema=ema_module, + model_name=self.config["Arch"]["name"], + prefix=prefix, + loss=self.train_loss_func, + save_student_model=True) + if uniform_output_enabled: + save_path = os.path.join(self.output_dir, prefix, + "inference") + self.export(save_path, uniform_output_enabled) + gc.collect() + if self.ema: + ema_save_path = os.path.join( + self.output_dir, prefix, "inference_ema") + self.export(ema_save_path, uniform_output_enabled) + gc.collect() + update_train_results( + self.config, prefix, metric_info, ema=self.ema) + save_load.save_model_info(metric_info, self.output_dir, + prefix) + + self.model.train() + + # save model + if save_interval > 0 and epoch_id % save_interval == 0: + metric_info = {"metric": acc, "epoch": epoch_id} + prefix = "epoch_{}".format(epoch_id) + save_load.save_model( + self.model, + self.optimizer, + metric_info, + os.path.join(self.output_dir, prefix) + if uniform_output_enabled else self.output_dir, + ema=ema_module, + model_name=self.config["Arch"]["name"], + prefix=prefix, + loss=self.train_loss_func) + if uniform_output_enabled: + save_path = os.path.join(self.output_dir, prefix, + "inference") + self.export(save_path, uniform_output_enabled) + gc.collect() + if self.ema: + ema_save_path = os.path.join(self.output_dir, prefix, + "inference_ema") + self.export(ema_save_path, uniform_output_enabled) + gc.collect() + update_train_results( + self.config, + prefix, + metric_info, + done_flag=epoch_id == self.config["Global"]["epochs"], + ema=self.ema) + save_load.save_model_info(metric_info, self.output_dir, + prefix) + # save the latest model + metric_info = {"metric": acc, "epoch": epoch_id} + prefix = "latest" + save_load.save_model( + self.model, + self.optimizer, + metric_info, + os.path.join(self.output_dir, prefix) + if uniform_output_enabled else self.output_dir, + ema=ema_module, + model_name=self.config["Arch"]["name"], + prefix=prefix, + loss=self.train_loss_func) + if uniform_output_enabled: + save_path = os.path.join(self.output_dir, prefix, "inference") + self.export(save_path, uniform_output_enabled) + gc.collect() + if self.ema: + ema_save_path = os.path.join(self.output_dir, prefix, + "inference_ema") + self.export(ema_save_path, uniform_output_enabled) + gc.collect() + save_load.save_model_info(metric_info, self.output_dir, prefix) + self.model.train() + + if self.vdl_writer is not None: + self.vdl_writer.close() + + @paddle.no_grad() + def eval(self, epoch_id=0): + assert self.mode in ["train", "eval"] + self.model.eval() + eval_result = self.eval_func(self, epoch_id) + self.model.train() + return eval_result + + @paddle.no_grad() + def infer(self): + assert self.mode == "infer" and self.eval_mode == "classification" + results = [] + total_trainer = dist.get_world_size() + local_rank = dist.get_rank() + infer_imgs = self.config["Infer"]["infer_imgs"] + infer_list = self.config["Infer"].get("infer_list", None) + image_list = get_image_list(infer_imgs, infer_list=infer_list) + # data split + image_list = image_list[local_rank::total_trainer] + + batch_size = self.config["Infer"]["batch_size"] + self.model.eval() + batch_data = [] + image_file_list = [] + save_path = self.config["Infer"].get("save_dir", None) + for idx, image_file in enumerate(image_list): + with open(image_file, 'rb') as f: + x = f.read() + try: + for process in self.preprocess_func: + x = process(x) + batch_data.append(x) + image_file_list.append(image_file) + if len(batch_data) >= batch_size or idx == len(image_list) - 1: + batch_tensor = paddle.to_tensor(batch_data) + + with self.auto_cast(is_eval=True): + out = self.model(batch_tensor) + + if isinstance(out, list): + out = out[0] + if isinstance(out, dict) and "Student" in out: + out = out["Student"] + if isinstance(out, dict) and "logits" in out: + out = out["logits"] + if isinstance(out, dict) and "output" in out: + out = out["output"] + + result = self.postprocess_func(out, image_file_list) + if not save_path: + logger.info(result) + results.extend(result) + batch_data.clear() + image_file_list.clear() + except Exception as ex: + logger.error( + "Exception occured when parse line: {} with msg: {}".format( + image_file, ex)) + continue + if save_path: + save_predict_result(save_path, results) + return results + + def export(self, + save_path=None, + uniform_output_enabled=False, + ema_module=None): + assert self.mode == "export" or uniform_output_enabled + if paddle.distributed.get_rank() != 0: + return + use_multilabel = self.config["Global"].get( + "use_multilabel", + False) or "ATTRMetric" in self.config["Metric"]["Eval"][0] + model = self.model_ema.module if self.ema else self.model + if hasattr(model, '_layers'): + model = copy.deepcopy(model._layers) + else: + model = copy.deepcopy(model) + model = ExportModel(self.config["Arch"], model + if not ema_module else ema_module, use_multilabel) + if self.config["Global"][ + "pretrained_model"] is not None and not uniform_output_enabled: + load_dygraph_pretrain(model.base_model, + self.config["Global"]["pretrained_model"]) + model.eval() + # for re-parameterization nets + for layer in model.sublayers(): + if hasattr(layer, "re_parameterize") and not getattr(layer, + "is_repped"): + layer.re_parameterize() + if not save_path: + save_path = os.path.join( + self.config["Global"]["save_inference_dir"], "inference") + else: + save_path = os.path.join(save_path, "inference") + + model = paddle.jit.to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + if hasattr(model.base_model, + "quanter") and model.base_model.quanter is not None: + model.base_model.quanter.save_quantized_model(model, + save_path + "_int8") + else: + paddle.jit.save(model, save_path) + if self.config["Global"].get("export_for_fd", + False) or uniform_output_enabled: + dst_path = os.path.join(os.path.dirname(save_path), 'inference.yml') + dump_infer_config(self.config, dst_path) + logger.info( + f"Export succeeded! The inference model exported has been saved in \"{save_path}\"." + ) + + def _init_amp(self): + if self.mode == "export": + return + + amp_config = self.config.get("AMP", None) + use_amp = True if amp_config and amp_config.get("use_amp", + True) else False + + if not use_amp: + self.auto_cast = AutoCast(use_amp) + self.scaler = build_scaler(use_amp) + else: + AMP_RELATED_FLAGS_SETTING = {'FLAGS_max_inplace_grad_add': 8, } + if paddle.is_compiled_with_cuda(): + AMP_RELATED_FLAGS_SETTING.update({ + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1 + }) + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) + + use_promote = amp_config.get("use_promote", False) + amp_level = amp_config.get("level", "O1") + if amp_level not in ["O1", "O2"]: + msg = "[Parameter Error]: The optimize level of AMP only support 'O1' and 'O2'. The level has been set 'O1'." + logger.warning(msg) + amp_level = amp_config["level"] = "O1" + + amp_eval = self.config["AMP"].get("use_fp16_test", False) + # TODO(gaotingquan): Paddle not yet support FP32 evaluation when training with AMPO2 + if self.mode == "train" and self.config["Global"].get( + "eval_during_train", + True) and amp_level == "O2" and amp_eval == False: + msg = "PaddlePaddle only support FP16 evaluation when training with AMP O2 now. " + logger.warning(msg) + self.config["AMP"]["use_fp16_test"] = True + amp_eval = True + + self.auto_cast = AutoCast( + use_amp, + amp_level=amp_level, + use_promote=use_promote, + amp_eval=amp_eval) + + scale_loss = amp_config.get("scale_loss", 1.0) + use_dynamic_loss_scaling = amp_config.get( + "use_dynamic_loss_scaling", False) + self.scaler = build_scaler( + use_amp, + scale_loss=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling) + + if self.mode == "train": + self.model, self.optimizer = paddle.amp.decorate( + models=self.model, + optimizers=self.optimizer, + level=amp_level, + save_dtype='float32') + elif amp_eval: + self.model = paddle.amp.decorate( + models=self.model, level=amp_level, save_dtype='float32') + + if self.mode == "train" and len(self.train_loss_func.parameters( + )) > 0: + self.train_loss_func = paddle.amp.decorate( + models=self.train_loss_func, + level=self.amp_level, + save_dtype='float32') + + +class ExportModel(TheseusLayer): + """ + ExportModel: add softmax onto the model + """ + + def __init__(self, config, model, use_multilabel): + super().__init__() + self.base_model = model + # we should choose a final model to export + if isinstance(self.base_model, DistillationModel): + self.infer_model_name = config["infer_model_name"] + else: + self.infer_model_name = None + + self.infer_output_key = config.get("infer_output_key", None) + if self.infer_output_key == "features" and isinstance(self.base_model, + RecModel): + self.base_model.head = IdentityHead() + if use_multilabel: + self.out_act = nn.Sigmoid() + else: + if config.get("infer_add_softmax", True): + self.out_act = nn.Softmax(axis=-1) + else: + self.out_act = None + + def eval(self): + self.training = False + for layer in self.sublayers(): + layer.training = False + layer.eval() + + def forward(self, x): + x = self.base_model(x) + if isinstance(x, list): + x = x[0] + if self.infer_model_name is not None: + x = x[self.infer_model_name] + if self.infer_output_key is not None: + x = x[self.infer_output_key] + if self.out_act is not None: + if isinstance(x, dict): + x = x["logits"] + x = self.out_act(x) + return x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/__init__.py new file mode 100644 index 000000000..0ac783e4c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppcls.engine.evaluation.classification import classification_eval +from ppcls.engine.evaluation.retrieval import retrieval_eval +from ppcls.engine.evaluation.adaface import adaface_eval +from ppcls.engine.evaluation.face_recognition import face_recognition_eval diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/adaface.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/adaface.py new file mode 100644 index 000000000..e62144b5c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/adaface.py @@ -0,0 +1,260 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import numpy as np +import platform +import paddle +import sklearn +from sklearn.model_selection import KFold +from sklearn.decomposition import PCA + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def fuse_features_with_norm(stacked_embeddings, stacked_norms): + assert stacked_embeddings.ndim == 3 # (n_features_to_fuse, batch_size, channel) + assert stacked_norms.ndim == 3 # (n_features_to_fuse, batch_size, 1) + pre_norm_embeddings = stacked_embeddings * stacked_norms + fused = pre_norm_embeddings.sum(axis=0) + norm = paddle.norm(fused, 2, 1, True) + fused = paddle.divide(fused, norm) + return fused, norm + + +def adaface_eval(engine, epoch_id=0): + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + metric_key = None + tic = time.time() + unique_dict = {} + for iter_id, batch in enumerate(engine.eval_dataloader): + images, labels, dataname, image_index = batch + if iter_id == 5: + for key in time_info: + time_info[key].reset() + time_info["reader_cost"].update(time.time() - tic) + batch_size = images.shape[0] + batch[0] = paddle.to_tensor(images) + embeddings = engine.model(images, labels)['features'] + norms = paddle.divide(embeddings, paddle.norm(embeddings, 2, 1, True)) + embeddings = paddle.divide(embeddings, norms) + fliped_images = paddle.flip(images, axis=[3]) + flipped_embeddings = engine.model(fliped_images, labels)['features'] + flipped_norms = paddle.divide( + flipped_embeddings, paddle.norm(flipped_embeddings, 2, 1, True)) + flipped_embeddings = paddle.divide(flipped_embeddings, flipped_norms) + stacked_embeddings = paddle.stack( + [embeddings, flipped_embeddings], axis=0) + stacked_norms = paddle.stack([norms, flipped_norms], axis=0) + embeddings, norms = fuse_features_with_norm(stacked_embeddings, + stacked_norms) + + for out, nor, label, data, idx in zip(embeddings, norms, labels, + dataname, image_index): + unique_dict[int(idx.numpy())] = { + 'output': out, + 'norm': nor, + 'target': label, + 'dataname': data + } + # calc metric + time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + + unique_keys = sorted(unique_dict.keys()) + all_output_tensor = paddle.stack( + [unique_dict[key]['output'] for key in unique_keys], axis=0) + all_norm_tensor = paddle.stack( + [unique_dict[key]['norm'] for key in unique_keys], axis=0) + all_target_tensor = paddle.stack( + [unique_dict[key]['target'] for key in unique_keys], axis=0) + all_dataname_tensor = paddle.stack( + [unique_dict[key]['dataname'] for key in unique_keys], axis=0) + + eval_result = cal_metric(all_output_tensor, all_norm_tensor, + all_target_tensor, all_dataname_tensor) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info + ]) + face_msg = ", ".join([ + "{}: {:.5f}".format(key, eval_result[key]) + for key in eval_result.keys() + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg + ", " + + face_msg)) + + # return 1st metric in the dict + return eval_result['all_test_acc'] + + +def cal_metric(all_output_tensor, all_norm_tensor, all_target_tensor, + all_dataname_tensor): + all_target_tensor = all_target_tensor.reshape([-1]) + all_dataname_tensor = all_dataname_tensor.reshape([-1]) + dataname_to_idx = { + "agedb_30": 0, + "cfp_fp": 1, + "lfw": 2, + "cplfw": 3, + "calfw": 4 + } + idx_to_dataname = {val: key for key, val in dataname_to_idx.items()} + test_logs = {} + # _, indices = paddle.unique(all_dataname_tensor, return_index=True, return_inverse=False, return_counts=False) + for dataname_idx in all_dataname_tensor.unique(): + dataname = idx_to_dataname[dataname_idx.item()] + # per dataset evaluation + embeddings = all_output_tensor[all_dataname_tensor == + dataname_idx].numpy() + labels = all_target_tensor[all_dataname_tensor == dataname_idx].numpy() + issame = labels[0::2] + tpr, fpr, accuracy, best_thresholds = evaluate_face( + embeddings, issame, nrof_folds=10) + acc, best_threshold = accuracy.mean(), best_thresholds.mean() + + num_test_samples = len(embeddings) + test_logs[f'{dataname}_test_acc'] = acc + test_logs[f'{dataname}_test_best_threshold'] = best_threshold + test_logs[f'{dataname}_num_test_samples'] = num_test_samples + + test_acc = np.mean([ + test_logs[f'{dataname}_test_acc'] + for dataname in dataname_to_idx.keys() + if f'{dataname}_test_acc' in test_logs + ]) + + test_logs['all_test_acc'] = test_acc + return test_logs + + +def evaluate_face(embeddings, actual_issame, nrof_folds=10, pca=0): + # Calculate evaluation metrics + thresholds = np.arange(0, 4, 0.01) + embeddings1 = embeddings[0::2] + embeddings2 = embeddings[1::2] + tpr, fpr, accuracy, best_thresholds = calculate_roc( + thresholds, + embeddings1, + embeddings2, + np.asarray(actual_issame), + nrof_folds=nrof_folds, + pca=pca) + return tpr, fpr, accuracy, best_thresholds + + +def calculate_roc(thresholds, + embeddings1, + embeddings2, + actual_issame, + nrof_folds=10, + pca=0): + assert (embeddings1.shape[0] == embeddings2.shape[0]) + assert (embeddings1.shape[1] == embeddings2.shape[1]) + nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) + nrof_thresholds = len(thresholds) + k_fold = KFold(n_splits=nrof_folds, shuffle=False) + + tprs = np.zeros((nrof_folds, nrof_thresholds)) + fprs = np.zeros((nrof_folds, nrof_thresholds)) + accuracy = np.zeros((nrof_folds)) + best_thresholds = np.zeros((nrof_folds)) + indices = np.arange(nrof_pairs) + # print('pca', pca) + dist = None + + if pca == 0: + diff = np.subtract(embeddings1, embeddings2) + dist = np.sum(np.square(diff), 1) + + for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): + # print('train_set', train_set) + # print('test_set', test_set) + if pca > 0: + print('doing pca on', fold_idx) + embed1_train = embeddings1[train_set] + embed2_train = embeddings2[train_set] + _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) + # print(_embed_train.shape) + pca_model = PCA(n_components=pca) + pca_model.fit(_embed_train) + embed1 = pca_model.transform(embeddings1) + embed2 = pca_model.transform(embeddings2) + embed1 = sklearn.preprocessing.normalize(embed1) + embed2 = sklearn.preprocessing.normalize(embed2) + # print(embed1.shape, embed2.shape) + diff = np.subtract(embed1, embed2) + dist = np.sum(np.square(diff), 1) + + # Find the best threshold for the fold + acc_train = np.zeros((nrof_thresholds)) + for threshold_idx, threshold in enumerate(thresholds): + _, _, acc_train[threshold_idx] = calculate_accuracy( + threshold, dist[train_set], actual_issame[train_set]) + best_threshold_index = np.argmax(acc_train) + best_thresholds[fold_idx] = thresholds[best_threshold_index] + for threshold_idx, threshold in enumerate(thresholds): + tprs[fold_idx, threshold_idx], fprs[ + fold_idx, threshold_idx], _ = calculate_accuracy( + threshold, dist[test_set], actual_issame[test_set]) + _, _, accuracy[fold_idx] = calculate_accuracy( + thresholds[best_threshold_index], dist[test_set], + actual_issame[test_set]) + + tpr = np.mean(tprs, 0) + fpr = np.mean(fprs, 0) + return tpr, fpr, accuracy, best_thresholds + + +def calculate_accuracy(threshold, dist, actual_issame): + predict_issame = np.less(dist, threshold) + tp = np.sum(np.logical_and(predict_issame, actual_issame)) + fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) + tn = np.sum( + np.logical_and( + np.logical_not(predict_issame), np.logical_not(actual_issame))) + fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) + + tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) + fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) + acc = float(tp + tn) / dist.size + return tpr, fpr, acc diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/classification.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/classification.py new file mode 100644 index 000000000..d802eeeda --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/classification.py @@ -0,0 +1,175 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import platform +import paddle + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def classification_eval(engine, epoch_id=0): + if hasattr(engine.eval_metric_func, "reset"): + engine.eval_metric_func.reset() + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + tic = time.time() + accum_samples = 0 + total_samples = len( + engine.eval_dataloader. + dataset) if not engine.use_dali else engine.eval_dataloader.size + max_iter = len(engine.eval_dataloader) - 1 if platform.system( + ) == "Windows" else len(engine.eval_dataloader) + for iter_id, batch in enumerate(engine.eval_dataloader): + if iter_id >= max_iter: + break + if iter_id == 5: + for key in time_info: + time_info[key].reset() + + time_info["reader_cost"].update(time.time() - tic) + batch_size = batch[0].shape[0] + batch[0] = paddle.to_tensor(batch[0]) + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + + # image input + with engine.auto_cast(is_eval=True): + if engine.is_rec: + out = engine.model(batch[0], batch[1]) + else: + out = engine.model(batch[0]) + + # just for DistributedBatchSampler issue: repeat sampling + current_samples = batch_size * paddle.distributed.get_world_size() + accum_samples += current_samples + + if isinstance(out, dict) and "Student" in out: + out = out["Student"] + if isinstance(out, dict) and "logits" in out: + out = out["logits"] + + # gather Tensor when distributed + if paddle.distributed.get_world_size() > 1: + label_list = [] + device_id = paddle.distributed.ParallelEnv().device_id + label = batch[1].cuda(device_id) if engine.config["Global"][ + "device"] == "gpu" else batch[1] + paddle.distributed.all_gather(label_list, label) + labels = paddle.concat(label_list, 0) + + if isinstance(out, list): + preds = [] + for x in out: + pred_list = [] + paddle.distributed.all_gather(pred_list, x) + pred_x = paddle.concat(pred_list, 0) + preds.append(pred_x) + else: + pred_list = [] + paddle.distributed.all_gather(pred_list, out) + preds = paddle.concat(pred_list, 0) + + if accum_samples > total_samples and not engine.use_dali: + if isinstance(preds, list): + preds = [ + pred[:total_samples + current_samples - accum_samples] + for pred in preds + ] + else: + preds = preds[:total_samples + current_samples - + accum_samples] + labels = labels[:total_samples + current_samples - + accum_samples] + current_samples = total_samples + current_samples - accum_samples + else: + labels = batch[1] + preds = out + + # calc loss + if engine.eval_loss_func is not None: + with engine.auto_cast(is_eval=True): + loss_dict = engine.eval_loss_func(preds, labels) + + for key in loss_dict: + if key not in output_info: + output_info[key] = AverageMeter(key, '7.5f') + output_info[key].update(float(loss_dict[key]), current_samples) + + # calc metric + if engine.eval_metric_func is not None: + engine.eval_metric_func(preds, labels) + time_info["batch_cost"].update(time.time() - tic) + + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + if "ATTRMetric" in engine.config["Metric"]["Eval"][0]: + metric_msg = "" + else: + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + if "MultiLabelMAP" not in engine.config["Metric"]["Eval"][0]: + metric_msg += ", {}".format(engine.eval_metric_func.avg_info) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + if engine.use_dali: + engine.eval_dataloader.reset() + + if "ATTRMetric" in engine.config["Metric"]["Eval"][0]: + metric_msg = ", ".join([ + "evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}". + format(*engine.eval_metric_func.attr_res()) + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return engine.eval_metric_func.attr_res()[0] + else: + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) + for key in output_info + ]) + metric_msg += ", {}".format(engine.eval_metric_func.avg_info) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return engine.eval_metric_func.avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/face_recognition.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/face_recognition.py new file mode 100644 index 000000000..fcedca3c1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/face_recognition.py @@ -0,0 +1,152 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time +import platform +import paddle +import paddle.nn.functional as F + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger, all_gather + + +def face_recognition_eval(engine, epoch_id=0): + # reset metric on beginning of eval + if hasattr(engine.eval_metric_func, "reset"): + engine.eval_metric_func.reset() + output_info = dict() + + # log time_info for each batch + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + tic = time.time() + accum_samples = 0 + total_samples = len( + engine.eval_dataloader. + dataset) if not engine.use_dali else engine.eval_dataloader.size + max_iter = len(engine.eval_dataloader) - 1 if platform.system( + ) == "Windows" else len(engine.eval_dataloader) + flip_test = engine.config["Global"].get("flip_test", False) + feature_normalize = engine.config["Global"].get("feature_normalize", False) + for iter_id, batch in enumerate(engine.eval_dataloader): + if iter_id >= max_iter: + break + if iter_id == 5: + for key in time_info: + time_info[key].reset() + time_info["reader_cost"].update(time.time() - tic) + + images_left, images_right, labels = [ + paddle.to_tensor(x) for x in batch[:3]] + batch_remains = [paddle.to_tensor(x) for x in batch[3:]] + labels = labels.astype('int64') + batch_size = images_left.shape[0] + + # flip images + if flip_test: + images_left = paddle.concat( + [images_left, paddle.flip(images_left, axis=-1)], 0) + images_right = paddle.concat( + [images_right, paddle.flip(images_right, axis=-1)], 0) + + with engine.auto_cast(is_eval=True): + out_left = engine.model(images_left) + out_right = engine.model(images_right) + + # get features + if engine.config["Global"].get("retrieval_feature_from", + "features") == "features": + # use output from neck as feature + embeddings_left = out_left["features"] + embeddings_right = out_right["features"] + else: + # use output from backbone as feature + embeddings_left = out_left["backbone"] + embeddings_right = out_right["backbone"] + + # normalize features + if feature_normalize: + embeddings_left = F.normalize(embeddings_left, p=2, axis=1) + embeddings_right = F.normalize(embeddings_right, p=2, axis=1) + + # fuse features by sum up if flip_test is True + if flip_test: + embeddings_left = embeddings_left[:batch_size] + \ + embeddings_left[batch_size:] + embeddings_right = embeddings_right[:batch_size] + \ + embeddings_right[batch_size:] + + # just for DistributedBatchSampler issue: repeat sampling + current_samples = batch_size * paddle.distributed.get_world_size() + accum_samples += current_samples + + # gather Tensor when distributed + if paddle.distributed.get_world_size() > 1: + embeddings_left = all_gather(embeddings_left) + embeddings_right = all_gather(embeddings_right) + labels = all_gather(labels) + batch_remains = [all_gather(x) for x in batch_remains] + + # discard redundant padding sample(s) in the last batch + if accum_samples > total_samples and not engine.use_dali: + rest_num = total_samples + current_samples - accum_samples + embeddings_left = embeddings_left[:rest_num] + embeddings_right = embeddings_right[:rest_num] + labels = labels[:rest_num] + batch_remains = [x[:rest_num] for x in batch_remains] + + # calc metric + if engine.eval_metric_func is not None: + engine.eval_metric_func(embeddings_left, embeddings_right, labels, + *batch_remains) + time_info["batch_cost"].update(time.time() - tic) + + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + if engine.use_dali: + engine.eval_dataloader.reset() + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) + for key in output_info + ]) + metric_msg += ", {}".format(engine.eval_metric_func.avg_info) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return engine.eval_metric_func.avg \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/retrieval.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/retrieval.py new file mode 100644 index 000000000..53f744f1a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/evaluation/retrieval.py @@ -0,0 +1,327 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict + +import numpy as np +import paddle +import scipy + +from ppcls.utils import all_gather, logger + + +def retrieval_eval(engine, epoch_id=0): + engine.model.eval() + # step1. prepare query and gallery features + if engine.gallery_query_dataloader is not None: + gallery_feat, gallery_label, gallery_camera = compute_feature( + engine, "gallery_query") + query_feat, query_label, query_camera = gallery_feat, gallery_label, gallery_camera + else: + gallery_feat, gallery_label, gallery_camera = compute_feature( + engine, "gallery") + query_feat, query_label, query_camera = compute_feature(engine, + "query") + + # step2. split features into feature blocks for saving memory + num_query = len(query_feat) + block_size = engine.config["Global"].get("sim_block_size", 64) + sections = [block_size] * (num_query // block_size) + if num_query % block_size > 0: + sections.append(num_query % block_size) + + query_feat_blocks = paddle.split(query_feat, sections) + query_label_blocks = paddle.split(query_label, sections) + query_camera_blocks = paddle.split( + query_camera, sections) if query_camera is not None else None + metric_key = None + + # step3. compute metric + if engine.eval_loss_func is None: + metric_dict = {metric_key: 0.0} + else: + use_reranking = engine.config["Global"].get("re_ranking", False) + logger.info(f"re_ranking={use_reranking}") + if use_reranking: + # compute distance matrix + distmat = compute_re_ranking_dist( + query_feat, gallery_feat, engine.config["Global"].get( + "feature_normalize", True), 20, 6, 0.3) + # exclude illegal distance + if query_camera is not None: + camera_mask = query_camera != gallery_camera.t() + label_mask = query_label != gallery_label.t() + keep_mask = label_mask | camera_mask + distmat = keep_mask.astype(query_feat.dtype) * distmat + ( + ~keep_mask).astype(query_feat.dtype) * (distmat.max() + 1) + else: + keep_mask = None + # compute metric with all samples + metric_dict = engine.eval_metric_func(-distmat, query_label, + gallery_label, keep_mask) + else: + metric_dict = defaultdict(float) + for block_idx, block_feat in enumerate(query_feat_blocks): + # compute distance matrix + distmat = paddle.matmul( + block_feat, gallery_feat, transpose_y=True) + # exclude illegal distance + if query_camera is not None: + camera_mask = query_camera_blocks[ + block_idx] != gallery_camera.t() + label_mask = query_label_blocks[ + block_idx] != gallery_label.t() + keep_mask = label_mask | camera_mask + distmat = keep_mask.astype(query_feat.dtype) * distmat + else: + keep_mask = None + # compute metric by block + metric_block = engine.eval_metric_func( + distmat, query_label_blocks[block_idx], gallery_label, + keep_mask) + # accumulate metric + for key in metric_block: + metric_dict[key] += metric_block[key] * block_feat.shape[ + 0] / num_query + + metric_info_list = [] + for key, value in metric_dict.items(): + metric_info_list.append(f"{key}: {value:.5f}") + if metric_key is None: + metric_key = key + metric_msg = ", ".join(metric_info_list) + logger.info(f"[Eval][Epoch {epoch_id}][Avg]{metric_msg}") + + return metric_dict[metric_key] + + +def compute_feature(engine, name="gallery"): + if name == "gallery": + dataloader = engine.gallery_dataloader + elif name == "query": + dataloader = engine.query_dataloader + elif name == "gallery_query": + dataloader = engine.gallery_query_dataloader + else: + raise ValueError( + f"Only support gallery or query or gallery_query dataset, but got {name}" + ) + + all_feat = [] + all_label = [] + all_camera = [] + has_camera = False + for idx, batch in enumerate(dataloader): # load is very time-consuming + if idx % engine.config["Global"]["print_batch_step"] == 0: + logger.info( + f"{name} feature calculation process: [{idx}/{len(dataloader)}]" + ) + + batch = [paddle.to_tensor(x) for x in batch] + batch[1] = batch[1].reshape([-1, 1]).astype("int64") + if len(batch) >= 3: + has_camera = True + batch[2] = batch[2].reshape([-1, 1]).astype("int64") + with engine.auto_cast(is_eval=True): + if engine.is_rec: + out = engine.model(batch[0], batch[1]) + else: + out = engine.model(batch[0]) + if "Student" in out: + out = out["Student"] + + # get features + if engine.config["Global"].get("retrieval_feature_from", + "features") == "features": + # use output from neck as feature + batch_feat = out["features"] + else: + # use output from backbone as feature + batch_feat = out["backbone"] + + # do norm(optional) + if engine.config["Global"].get("feature_normalize", True): + batch_feat = paddle.nn.functional.normalize(batch_feat, p=2) + + # do binarize(optional) + if engine.config["Global"].get("feature_binarize") == "round": + batch_feat = paddle.round(batch_feat).astype("float32") * 2.0 - 1.0 + elif engine.config["Global"].get("feature_binarize") == "sign": + batch_feat = paddle.sign(batch_feat).astype("float32") + + if paddle.distributed.get_world_size() > 1: + all_feat.append(all_gather(batch_feat)) + all_label.append(all_gather(batch[1])) + if has_camera: + all_camera.append(all_gather(batch[2])) + else: + all_feat.append(batch_feat) + all_label.append(batch[1]) + if has_camera: + all_camera.append(batch[2]) + + if engine.use_dali: + dataloader.reset() + + all_feat = paddle.concat(all_feat) + all_label = paddle.concat(all_label) + if has_camera: + all_camera = paddle.concat(all_camera) + else: + all_camera = None + # discard redundant padding sample(s) at the end + total_samples = dataloader.size if engine.use_dali else len( + dataloader.dataset) + all_feat = all_feat[:total_samples] + all_label = all_label[:total_samples] + if has_camera: + all_camera = all_camera[:total_samples] + + logger.info(f"Build {name} done, all feat shape: {all_feat.shape}") + return all_feat, all_label, all_camera + + +def k_reciprocal_neighbor(rank: np.ndarray, p: int, k: int) -> np.ndarray: + """Implementation of k-reciprocal nearest neighbors, i.e. R(p, k) + + Args: + rank (np.ndarray): Rank mat with shape of [N, N]. + p (int): Probe index. + k (int): Parameter k for k-reciprocal nearest neighbors algorithm. + + Returns: + np.ndarray: K-reciprocal nearest neighbors of probe p with shape of [M, ]. + """ + # use k+1 for excluding probe index itself + forward_k_neigh_index = rank[p, :k + 1] + backward_k_neigh_index = rank[forward_k_neigh_index, :k + 1] + candidate = np.where(backward_k_neigh_index == p)[0] + return forward_k_neigh_index[candidate] + + +def compute_re_ranking_dist(query_feat: paddle.Tensor, + gallery_feat: paddle.Tensor, + feature_normed: bool=True, + k1: int=20, + k2: int=6, + lamb: float=0.5) -> paddle.Tensor: + """ + Re-ranking Person Re-identification with k-reciprocal Encoding + Reference: https://arxiv.org/abs/1701.08398 + Code refernence: https://github.com/michuanhaohao/reid-strong-baseline/blob/master/utils/re_ranking.py + + Args: + query_feat (paddle.Tensor): Query features with shape of [num_query, feature_dim]. + gallery_feat (paddle.Tensor): Gallery features with shape of [num_gallery, feature_dim]. + feature_normed (bool, optional): Whether input features are normalized. + k1 (int, optional): Parameter for K-reciprocal nearest neighbors. Defaults to 20. + k2 (int, optional): Parameter for K-nearest neighbors. Defaults to 6. + lamb (float, optional): Penalty factor. Defaults to 0.5. + + Returns: + paddle.Tensor: (1 - lamb) x Dj + lamb x D, with shape of [num_query, num_gallery]. + """ + num_query = query_feat.shape[0] + num_gallery = gallery_feat.shape[0] + num_all = num_query + num_gallery + feat = paddle.concat([query_feat, gallery_feat], 0) + logger.info("Using GPU to compute original distance matrix") + # use L2 distance + if feature_normed: + original_dist = 2 - 2 * paddle.matmul(feat, feat, transpose_y=True) + else: + original_dist = paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([num_all, num_all]) + \ + paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([num_all, num_all]).t() + original_dist = original_dist.addmm(feat, feat.t(), -2.0, 1.0) + original_dist = original_dist.numpy() + del feat + + original_dist = np.transpose(original_dist / np.max(original_dist, axis=0)) + V = np.zeros_like(original_dist).astype(np.float16) + initial_rank = np.argpartition(original_dist, range(1, k1 + 1)) + logger.info("Start re-ranking...") + + for p in range(num_all): + # compute R(p,k1) + p_k_reciprocal_ind = k_reciprocal_neighbor(initial_rank, p, k1) + + # compute R*(p,k1)=R(p,k1)∪R(q,k1/2) + # s.t. |R(p,k1)∩R(q,k1/2)|>=2/3|R(q,k1/2)|, ∀q∈R(p,k1) + p_k_reciprocal_exp_ind = p_k_reciprocal_ind + for _, q in enumerate(p_k_reciprocal_ind): + q_k_reciprocal_ind = k_reciprocal_neighbor(initial_rank, q, + int(np.around(k1 / 2))) + if len( + np.intersect1d( + p_k_reciprocal_ind, + q_k_reciprocal_ind, + assume_unique=True)) > 2 / 3 * len(q_k_reciprocal_ind): + p_k_reciprocal_exp_ind = np.append(p_k_reciprocal_exp_ind, + q_k_reciprocal_ind) + p_k_reciprocal_exp_ind = np.unique(p_k_reciprocal_exp_ind) + # reweight distance using gaussian kernel + weight = np.exp(-original_dist[p, p_k_reciprocal_exp_ind]) + V[p, p_k_reciprocal_exp_ind] = weight / np.sum(weight) + + # local query expansion + original_dist = original_dist[:num_query, ] + if k2 > 1: + try: + # use sparse tensor to speed up query expansion + indices = (np.repeat(np.arange(num_all), k2), + initial_rank[:, :k2].reshape([-1, ])) + values = np.array( + [1 / k2 for _ in range(num_all * k2)], dtype="float16") + V = scipy.sparse.coo_matrix( + (values, indices), V.shape, + dtype="float16") @V.astype("float16") + except Exception as e: + logger.info( + f"Failed to do local query expansion with sparse tensor for reason: \n{e}\n" + f"now use for-loop instead") + # use vanilla for-loop + V_qe = np.zeros_like(V, dtype=np.float16) + for i in range(num_all): + V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0) + V = V_qe + del V_qe + del initial_rank + + # cache k-reciprocal sets which contains gj + invIndex = [] + for gj in range(num_all): + invIndex.append(np.nonzero(V[:, gj])[0]) + + # compute jaccard distance + jaccard_dist = np.zeros_like(original_dist, dtype=np.float16) + for p in range(num_query): + sum_min = np.zeros(shape=[1, num_all], dtype=np.float16) + gj_ind = np.nonzero(V[p, :])[0] + gj_ind_inv = [invIndex[gj] for gj in gj_ind] + for j, gj in enumerate(gj_ind): + gi = gj_ind_inv[j] + sum_min[0, gi] += np.minimum(V[p, gj], V[gi, gj]) + jaccard_dist[p] = 1 - sum_min / (2 - sum_min) + + # fuse jaccard distance with original distance + final_dist = (1 - lamb) * jaccard_dist + lamb * original_dist + del original_dist + del V + del jaccard_dist + final_dist = final_dist[:num_query, num_query:] + final_dist = paddle.to_tensor(final_dist) + return final_dist diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/__init__.py new file mode 100644 index 000000000..50bf9037f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ppcls.engine.train.train import train_epoch +from ppcls.engine.train.train_fixmatch import train_epoch_fixmatch +from ppcls.engine.train.train_fixmatch_ccssl import train_epoch_fixmatch_ccssl +from ppcls.engine.train.train_progressive import train_epoch_progressive +from ppcls.engine.train.train_metabin import train_epoch_metabin diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train.py new file mode 100644 index 000000000..cf7f65734 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train.py @@ -0,0 +1,100 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import time +import paddle +from ppcls.engine.train.utils import update_loss, update_metric, log_info, type_name +from ppcls.utils import profiler + + +def train_epoch(engine, epoch_id, print_batch_step): + tic = time.time() + + if not hasattr(engine, "train_dataloader_iter"): + engine.train_dataloader_iter = iter(engine.train_dataloader) + + for iter_id in range(engine.iter_per_epoch): + # fetch data batch from dataloader + try: + batch = next(engine.train_dataloader_iter) + except Exception: + # NOTE: reset DALI dataloader manually + if engine.use_dali: + engine.train_dataloader.reset() + engine.train_dataloader_iter = iter(engine.train_dataloader) + batch = next(engine.train_dataloader_iter) + + profiler.add_profiler_step(engine.config["profiler_options"]) + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + engine.time_info["reader_cost"].update(time.time() - tic) + + batch_size = batch[0].shape[0] + if not engine.config["Global"].get("use_multilabel", False): + batch[1] = batch[1].reshape([batch_size, -1]) + engine.global_step += 1 + + # image input + with engine.auto_cast(is_eval=False): + out = forward(engine, batch) + loss_dict = engine.train_loss_func(out, batch[1]) + + # loss + loss = loss_dict["loss"] / engine.update_freq + + # backward & step opt + scaled = engine.scaler.scale(loss) + scaled.backward() + if (iter_id + 1) % engine.update_freq == 0: + for i in range(len(engine.optimizer)): + # optimizer.step() with auto amp + engine.scaler.step(engine.optimizer[i]) + engine.scaler.update() + + if (iter_id + 1) % engine.update_freq == 0: + # clear grad + for i in range(len(engine.optimizer)): + engine.optimizer[i].clear_grad() + # step lr(by step) + for i in range(len(engine.lr_sch)): + if not getattr(engine.lr_sch[i], "by_epoch", False): + engine.lr_sch[i].step() + # update ema + if engine.ema: + engine.model_ema.update(engine.model) + + # below code just for logging + # update metric_for_logger + update_metric(engine, out, batch, batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + tic = time.time() + + # step lr(by epoch) + for i in range(len(engine.lr_sch)): + if getattr(engine.lr_sch[i], "by_epoch", False) and \ + type_name(engine.lr_sch[i]) != "ReduceOnPlateau": + engine.lr_sch[i].step() + + +def forward(engine, batch): + if not engine.is_rec: + return engine.model(batch[0]) + else: + return engine.model(batch[0], batch[1]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch.py new file mode 100644 index 000000000..26a3daa74 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch.py @@ -0,0 +1,152 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import time +import paddle +from ppcls.engine.train.utils import update_loss, update_metric, log_info +from ppcls.utils import profiler +from paddle.nn import functional as F +import numpy as np + + +def train_epoch_fixmatch(engine, epoch_id, print_batch_step): + tic = time.time() + if not hasattr(engine, "train_dataloader_iter"): + engine.train_dataloader_iter = iter(engine.train_dataloader) + engine.unlabel_train_dataloader_iter = iter( + engine.unlabel_train_dataloader) + temperture = engine.config["SSL"].get("temperture", 1) + threshold = engine.config["SSL"].get("threshold", 0.95) + assert engine.iter_per_epoch is not None, "Global.iter_per_epoch need to be set." + threshold = paddle.to_tensor(threshold) + for iter_id in range(engine.iter_per_epoch): + if iter_id >= engine.iter_per_epoch: + break + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + try: + label_data_batch = engine.train_dataloader_iter.next() + except Exception: + engine.train_dataloader_iter = iter(engine.train_dataloader) + label_data_batch = engine.train_dataloader_iter.next() + try: + unlabel_data_batch = engine.unlabel_train_dataloader_iter.next() + except Exception: + engine.unlabel_train_dataloader_iter = iter( + engine.unlabel_train_dataloader) + unlabel_data_batch = engine.unlabel_train_dataloader_iter.next() + assert len(unlabel_data_batch) == 3 + assert unlabel_data_batch[0].shape == unlabel_data_batch[1].shape + engine.time_info["reader_cost"].update(time.time() - tic) + batch_size = label_data_batch[0].shape[0] + unlabel_data_batch[0].shape[0] \ + + unlabel_data_batch[1].shape[0] + engine.global_step += 1 + + # make inputs + inputs_x, targets_x = label_data_batch + inputs_u_w, inputs_u_s, targets_u = unlabel_data_batch + batch_size_label = inputs_x.shape[0] + inputs = paddle.concat([inputs_x, inputs_u_w, inputs_u_s], axis=0) + + # image input + with engine.auto_cast(is_eval=False): + loss_dict, logits_label = get_loss(engine, inputs, + batch_size_label, temperture, + threshold, targets_x) + + # loss + loss = loss_dict["loss"] + + # backward & step opt + scaled = engine.scaler.scale(loss) + scaled.backward() + + for i in range(len(engine.optimizer)): + # optimizer.step() with auto amp + engine.scaler.step(engine.optimizer[i]) + engine.scaler.update() + + # step lr(by step) + for i in range(len(engine.lr_sch)): + if not getattr(engine.lr_sch[i], "by_epoch", False): + engine.lr_sch[i].step() + # clear grad + for i in range(len(engine.optimizer)): + engine.optimizer[i].clear_grad() + + # update ema + if engine.ema: + engine.model_ema.update(engine.model) + + # below code just for logging + # update metric_for_logger + update_metric(engine, logits_label, label_data_batch, batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + tic = time.time() + + # step lr(by epoch) + for i in range(len(engine.lr_sch)): + if getattr(engine.lr_sch[i], "by_epoch", False): + engine.lr_sch[i].step() + + +def get_loss(engine, inputs, batch_size_label, temperture, threshold, + targets_x): + # For pytroch version, inputs need to use interleave and de_interleave + # to reshape and transpose inputs and logits, but it dosen't affect the + # result. So this paddle version dose not use the two transpose func. + # inputs = interleave(inputs, inputs.shape[0] // batch_size_label) + logits = engine.model(inputs) + # logits = de_interleave(logits, inputs.shape[0] // batch_size_label) + logits_x = logits[:batch_size_label] + logits_u_w, logits_u_s = logits[batch_size_label:].chunk(2) + loss_dict_label = engine.train_loss_func(logits_x, targets_x) + probs_u_w = F.softmax(logits_u_w.detach() / temperture, axis=-1) + p_targets_u, mask = get_psuedo_label_and_mask(probs_u_w, threshold) + unlabel_celoss = engine.unlabel_train_loss_func(logits_u_s, + p_targets_u)["CELoss"] + unlabel_celoss = (unlabel_celoss * mask).mean() + loss_dict = dict() + for k, v in loss_dict_label.items(): + if k != "loss": + loss_dict[k + "_label"] = v + loss_dict["CELoss_unlabel"] = unlabel_celoss + loss_dict["loss"] = loss_dict_label['loss'] + unlabel_celoss + return loss_dict, logits_x + + +def get_psuedo_label_and_mask(probs_u_w, threshold): + max_probs = paddle.max(probs_u_w, axis=-1) + p_targets_u = paddle.argmax(probs_u_w, axis=-1) + + mask = paddle.greater_equal(max_probs, threshold).astype('float') + return p_targets_u, mask + + +def interleave(x, size): + s = list(x.shape) + return x.reshape([-1, size] + s[1:]).transpose( + [1, 0, 2, 3, 4]).reshape([-1] + s[1:]) + + +def de_interleave(x, size): + s = list(x.shape) + return x.reshape([size, -1] + s[1:]).transpose( + [1, 0, 2]).reshape([-1] + s[1:]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch_ccssl.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch_ccssl.py new file mode 100644 index 000000000..43d20519f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_fixmatch_ccssl.py @@ -0,0 +1,125 @@ +from __future__ import absolute_import, division, print_function +import time +import paddle +from ppcls.engine.train.train_fixmatch import get_loss +from ppcls.engine.train.utils import update_loss, update_metric, log_info +from ppcls.utils import profiler +from paddle.nn import functional as F +import numpy as np +import paddle + + +def train_epoch_fixmatch_ccssl(engine, epoch_id, print_batch_step): + tic = time.time() + if not hasattr(engine, 'train_dataloader_iter'): + engine.train_dataloader_iter = iter(engine.train_dataloader) + engine.unlabel_train_dataloader_iter = iter(engine.unlabel_train_dataloader) + + temperture = engine.config['SSL'].get("T", 1) + threshold = engine.config['SSL'].get("threshold", 0.95) + assert engine.iter_per_epoch is not None, "Global.iter_per_epoch need to be set" + threshold = paddle.to_tensor(threshold) + + for iter_id in range(engine.iter_per_epoch): + if iter_id >= engine.iter_per_epoch: + break + + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + + try: + label_data_batch = engine.train_dataloader_iter.next() + except Exception: + engine.train_dataloader_iter = iter(engine.train_dataloader) + label_data_batch = engine.train_dataloader_iter.next() + + try: + unlabel_data_batch = engine.unlabel_train_dataloader_iter.next() + except Exception: + engine.unlabel_train_dataloader_iter = iter(engine.unlabel_train_dataloader) + unlabel_data_batch = engine.unlabel_train_dataloader_iter.next() + + assert len(unlabel_data_batch) in [3, 4] + assert unlabel_data_batch[0].shape == unlabel_data_batch[1].shape == unlabel_data_batch[2].shape + + engine.time_info['reader_cost'].update(time.time() - tic) + batch_size = label_data_batch[0].shape[0] \ + + unlabel_data_batch[0].shape[0] \ + + unlabel_data_batch[1].shape[0] \ + + unlabel_data_batch[2].shape[0] + engine.global_step += 1 + + inputs_x, targets_x = label_data_batch + inputs_w, inputs_s1, inputs_s2 = unlabel_data_batch[:3] + batch_size_label = inputs_x.shape[0] + inputs = paddle.concat([inputs_x, inputs_w, inputs_s1, inputs_s2], axis=0) + + loss_dict, logits_label = get_loss(engine, inputs, batch_size_label, + temperture, threshold, targets_x, + ) + loss = loss_dict['loss'] + loss.backward() + + for i in range(len(engine.optimizer)): + engine.optimizer[i].step() + + for i in range(len(engine.lr_sch)): + if not getattr(engine.lr_sch[i], 'by_epoch', False): + engine.lr_sch[i].step() + + for i in range(len(engine.optimizer)): + engine.optimizer[i].clear_grad() + + if engine.ema: + engine.model_ema.update(engine.model) + update_metric(engine, logits_label, label_data_batch, batch_size) + update_loss(engine, loss_dict, batch_size) + engine.time_info['batch_cost'].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, batch_size, epoch_id, iter_id) + + tic = time.time() + + for i in range(len(engine.lr_sch)): + if getattr(engine.lr_sch[i], 'by_epoch', False): + engine.lr_sch[i].step() + +def get_loss(engine, + inputs, + batch_size_label, + temperture, + threshold, + targets_x, + **kwargs + ): + out = engine.model(inputs) + + logits, feats = out['logits'], out['features'] + feat_w, feat_s1, feat_s2 = feats[batch_size_label:].chunk(3) + feat_x = feats[:batch_size_label] + logits_x = logits[:batch_size_label] + logits_w, logits_s1, logits_s2 = logits[batch_size_label:].chunk(3) + loss_dict_label = engine.train_loss_func(logits_x, targets_x) + probs_u_w = F.softmax(logits_w.detach() / temperture, axis=-1) + max_probs, p_targets_u_w = probs_u_w.max(axis=-1), probs_u_w.argmax(axis=-1) + mask = paddle.greater_equal(max_probs, threshold).astype('float') + + feats = paddle.concat([feat_s1.unsqueeze(1), feat_s2.unsqueeze(1)], axis=1) + batch = {'logits_w': logits_w, + 'logits_s1': logits_s1, + 'p_targets_u_w': p_targets_u_w, + 'mask': mask, + 'max_probs': max_probs, + } + unlabel_loss = engine.unlabel_train_loss_func(feats, batch) + loss_dict = {} + for k, v in loss_dict_label.items(): + if k != 'loss': + loss_dict[k] = v + for k, v in unlabel_loss.items(): + if k != 'loss': + loss_dict[k] = v + loss_dict['loss'] = loss_dict_label['loss'] + unlabel_loss['loss'] + + return loss_dict, logits_x diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_metabin.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_metabin.py new file mode 100644 index 000000000..d1d50f1d4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_metabin.py @@ -0,0 +1,251 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2011.14670v2 + +from __future__ import absolute_import, division, print_function + +import time +import paddle +import numpy as np +from collections import defaultdict + +from ppcls.engine.train.utils import update_loss, update_metric, log_info, type_name +from ppcls.utils import profiler +from ppcls.data import build_dataloader +from ppcls.loss import build_loss + + +def train_epoch_metabin(engine, epoch_id, print_batch_step): + tic = time.time() + + if not hasattr(engine, "train_dataloader_iter"): + engine.train_dataloader_iter = iter(engine.train_dataloader) + + if not hasattr(engine, "meta_dataloader"): + engine.meta_dataloader = build_dataloader( + config=engine.config['DataLoader']['Metalearning'], + mode='Train', + device=engine.device) + engine.meta_dataloader_iter = iter(engine.meta_dataloader) + + num_domain = engine.train_dataloader.dataset.num_cams + for iter_id in range(engine.iter_per_epoch): + # fetch data batch from dataloader + try: + train_batch = next(engine.train_dataloader_iter) + except Exception: + engine.train_dataloader_iter = iter(engine.train_dataloader) + train_batch = next(engine.train_dataloader_iter) + + try: + mtrain_batch, mtest_batch = get_meta_data( + engine.meta_dataloader_iter, num_domain) + except Exception: + engine.meta_dataloader_iter = iter(engine.meta_dataloader) + mtrain_batch, mtest_batch = get_meta_data( + engine.meta_dataloader_iter, num_domain) + + profiler.add_profiler_step(engine.config["profiler_options"]) + if iter_id == 5: + for key in engine.time_info: + engine.time_info[key].reset() + engine.time_info["reader_cost"].update(time.time() - tic) + + train_batch_size = train_batch[0].shape[0] + mtrain_batch_size = mtrain_batch[0].shape[0] + mtest_batch_size = mtest_batch[0].shape[0] + if not engine.config["Global"].get("use_multilabel", False): + train_batch[1] = train_batch[1].reshape([train_batch_size, -1]) + mtrain_batch[1] = mtrain_batch[1].reshape([mtrain_batch_size, -1]) + mtest_batch[1] = mtest_batch[1].reshape([mtest_batch_size, -1]) + + engine.global_step += 1 + + if engine.global_step == 1: # update model (execpt gate) to warmup + for i in range(engine.config["Global"]["warmup_iter"] - 1): + out, basic_loss_dict = basic_update(engine, train_batch) + loss_dict = basic_loss_dict + try: + train_batch = next(engine.train_dataloader_iter) + except Exception: + engine.train_dataloader_iter = iter( + engine.train_dataloader) + train_batch = next(engine.train_dataloader_iter) + + out, basic_loss_dict = basic_update(engine=engine, batch=train_batch) + mtrain_loss_dict, mtest_loss_dict = metalearning_update( + engine=engine, mtrain_batch=mtrain_batch, mtest_batch=mtest_batch) + loss_dict = { + ** + {"train_" + key: value + for key, value in basic_loss_dict.items()}, ** { + "mtrain_" + key: value + for key, value in mtrain_loss_dict.items() + }, ** + {"mtest_" + key: value + for key, value in mtest_loss_dict.items()} + } + # step lr (by iter) + for i in range(len(engine.lr_sch)): + if not getattr(engine.lr_sch[i], "by_epoch", False): + engine.lr_sch[i].step() + # update ema + if engine.ema: + engine.model_ema.update(engine.model) + + # below code just for logging + # update metric_for_logger + update_metric(engine, out, train_batch, train_batch_size) + # update_loss_for_logger + update_loss(engine, loss_dict, train_batch_size) + engine.time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + log_info(engine, train_batch_size, epoch_id, iter_id) + tic = time.time() + + # step lr(by epoch) + for i in range(len(engine.lr_sch)): + if getattr(engine.lr_sch[i], "by_epoch", False) and \ + type_name(engine.lr_sch[i]) != "ReduceOnPlateau": + engine.lr_sch[i].step() + + +def setup_opt(engine, stage): + assert stage in ["train", "mtrain", "mtest"] + opt = defaultdict() + if stage == "train": + opt["bn_mode"] = "general" + opt["enable_inside_update"] = False + opt["lr_gate"] = 0.0 + elif stage == "mtrain": + opt["bn_mode"] = "hold" + opt["enable_inside_update"] = False + opt["lr_gate"] = 0.0 + elif stage == "mtest": + norm_lr = engine.lr_sch[1].last_lr + cyclic_lr = engine.lr_sch[2].get_lr() + opt["bn_mode"] = "hold" + opt["enable_inside_update"] = True + opt["lr_gate"] = norm_lr * cyclic_lr + for layer in engine.model.backbone.sublayers(): + if type_name(layer) == "MetaBIN": + layer.setup_opt(opt) + engine.model.neck.setup_opt(opt) + + +def reset_opt(model): + for layer in model.backbone.sublayers(): + if type_name(layer) == "MetaBIN": + layer.reset_opt() + model.neck.reset_opt() + + +def get_meta_data(meta_dataloader_iter, num_domain): + """ + fetch data batch from dataloader then divide the batch by domains + """ + list_all = np.random.permutation(num_domain) + list_mtrain = list(list_all[:num_domain // 2]) + batch = next(meta_dataloader_iter) + domain_idx = batch[2] + cnt = 0 + for sample in list_mtrain: + if cnt == 0: + is_mtrain_domain = domain_idx == sample + else: + is_mtrain_domain = paddle.logical_or(is_mtrain_domain, + domain_idx == sample) + cnt += 1 + + # mtrain_batch + if not any(is_mtrain_domain): + mtrain_batch = None + raise RuntimeError + else: + mtrain_batch = [batch[i][is_mtrain_domain] for i in range(len(batch))] + + # mtest_batch + is_mtest_domains = is_mtrain_domain == False + if not any(is_mtest_domains): + mtest_batch = None + raise RuntimeError + else: + mtest_batch = [batch[i][is_mtest_domains] for i in range(len(batch))] + return mtrain_batch, mtest_batch + + +def forward(engine, batch, loss_func): + batch_info = defaultdict() + batch_info = {"label": batch[1], "domain": batch[2]} + + with engine.auto_cast(is_eval=False): + out = engine.model(batch[0], batch[1]) + loss_dict = loss_func(out, batch_info) + + return out, loss_dict + + +def backward(engine, loss, optimizer): + optimizer.clear_grad() + scaled = engine.scaler.scale(loss) + scaled.backward() + + # optimizer.step() with auto amp + engine.scaler.step(optimizer) + engine.scaler.update() + + for name, layer in engine.model.backbone.named_sublayers(): + if "gate" == name.split('.')[-1]: + layer.clip_gate() + + +def basic_update(engine, batch): + setup_opt(engine, "train") + train_loss_func = build_loss(engine.config["Loss"]["Basic"]) + out, train_loss_dict = forward(engine, batch, train_loss_func) + train_loss = train_loss_dict["loss"] + backward(engine, train_loss, engine.optimizer[0]) + engine.optimizer[0].clear_grad() + reset_opt(engine.model) + return out, train_loss_dict + + +def metalearning_update(engine, mtrain_batch, mtest_batch): + # meta train + mtrain_loss_func = build_loss(engine.config["Loss"]["MetaTrain"]) + setup_opt(engine, "mtrain") + + mtrain_batch_info = defaultdict() + mtrain_batch_info = {"label": mtrain_batch[1], "domain": mtrain_batch[2]} + out = engine.model(mtrain_batch[0], mtrain_batch[1]) + mtrain_loss_dict = mtrain_loss_func(out, mtrain_batch_info) + mtrain_loss = mtrain_loss_dict["loss"] + engine.optimizer[1].clear_grad() + mtrain_loss.backward() + + # meta test + mtest_loss_func = build_loss(engine.config["Loss"]["MetaTest"]) + setup_opt(engine, "mtest") + + out, mtest_loss_dict = forward(engine, mtest_batch, mtest_loss_func) + engine.optimizer[1].clear_grad() + mtest_loss = mtest_loss_dict["loss"] + backward(engine, mtest_loss, engine.optimizer[1]) + + engine.optimizer[0].clear_grad() + engine.optimizer[1].clear_grad() + reset_opt(engine.model) + + return mtrain_loss_dict, mtest_loss_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_progressive.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_progressive.py new file mode 100644 index 000000000..9999024b9 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/train_progressive.py @@ -0,0 +1,72 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +from ppcls.data import build_dataloader +from ppcls.engine.train.utils import type_name +from ppcls.utils import logger + +from .train import train_epoch + + +def train_epoch_progressive(engine, epoch_id, print_batch_step): + # 1. Build training hyper-parameters for different training stage + num_stage = 4 + ratio_list = [(i + 1) / num_stage for i in range(num_stage)] + stones = [ + int(engine.config["Global"]["epochs"] * ratio_list[i]) + for i in range(num_stage) + ] + stage_id = 0 + for i in range(num_stage): + if epoch_id > stones[i]: + stage_id = i + 1 + + # 2. Adjust training hyper-parameters for different training stage + if not hasattr(engine, 'last_stage') or engine.last_stage < stage_id: + cur_dropout_rate = 0.0 + + def _change_dp_func(m): + global cur_dropout_rate + if type_name(m) == "Head" and hasattr(m, "_dropout"): + m._dropout.p = m.dropout_rate[stage_id] + cur_dropout_rate = m.dropout_rate[stage_id] + + engine.model.apply(_change_dp_func) + + cur_image_size = engine.config["DataLoader"]["Train"]["dataset"][ + "transform_ops"][1]["RandCropImage"]["progress_size"][stage_id] + cur_magnitude = engine.config["DataLoader"]["Train"]["dataset"][ + "transform_ops"][3]["RandAugmentV2"]["progress_magnitude"][ + stage_id] + engine.config["DataLoader"]["Train"]["dataset"]["transform_ops"][1][ + "RandCropImage"]["size"] = cur_image_size + engine.config["DataLoader"]["Train"]["dataset"]["transform_ops"][3][ + "RandAugmentV2"]["magnitude"] = cur_magnitude + engine.train_dataloader = build_dataloader( + engine.config["DataLoader"], + "Train", + engine.device, + engine.use_dali, + seed=epoch_id) + engine.train_dataloader_iter = iter(engine.train_dataloader) + engine.last_stage = stage_id + logger.info(f"Training stage: [{stage_id+1}/{num_stage}](" + f"random_aug_magnitude={cur_magnitude}, " + f"train_image_size={cur_image_size}, " + f"dropout_rate={cur_dropout_rate}" + f")") + + # 3. Train one epoch as usual at current stage + train_epoch(engine, epoch_id, print_batch_step) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/utils.py new file mode 100644 index 000000000..1d5c70b2e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/engine/train/utils.py @@ -0,0 +1,94 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import paddle +import datetime +from ppcls.utils import logger +from ppcls.utils.misc import AverageMeter + + +def update_metric(trainer, out, batch, batch_size): + # calc metric + if trainer.train_metric_func is not None: + metric_dict = trainer.train_metric_func(out, batch[-1]) + for key in metric_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update( + float(metric_dict[key]), batch_size) + + +def update_loss(trainer, loss_dict, batch_size): + # update_output_info + for key in loss_dict: + if key not in trainer.output_info: + trainer.output_info[key] = AverageMeter(key, '7.5f') + trainer.output_info[key].update(float(loss_dict[key]), batch_size) + + +def log_info(trainer, batch_size, epoch_id, iter_id): + lr_msg = ", ".join([ + "lr({}): {:.8f}".format(type_name(lr), lr.get_lr()) + for i, lr in enumerate(trainer.lr_sch) + ]) + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, trainer.output_info[key].avg) + for key in trainer.output_info + ]) + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, trainer.time_info[key].avg) + for key in trainer.time_info + ]) + + ips_msg = "ips: {:.5f} samples/s".format( + batch_size / trainer.time_info["batch_cost"].avg) + + global_epochs = trainer.config["Global"]["epochs"] + eta_sec = ( + (trainer.config["Global"]["epochs"] - epoch_id + 1) * + trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg + eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) + max_mem_reserved_msg = "" + max_mem_allocated_msg = "" + max_mem_msg = "" + print_mem_info = trainer.config["Global"].get("print_mem_info", False) + if print_mem_info: + if paddle.device.is_compiled_with_cuda(): + max_mem_reserved_msg = f"max_mem_reserved: {format(paddle.device.cuda.max_memory_reserved() / (1024 ** 2), '.2f')} MB" + max_mem_allocated_msg = f"max_mem_allocated: {format(paddle.device.cuda.max_memory_allocated() / (1024 ** 2), '.2f')} MB" + max_mem_msg = f", {max_mem_reserved_msg}, {max_mem_allocated_msg}" + logger.info( + f"[Train][Epoch {epoch_id}/{global_epochs}][Iter: {iter_id}/{trainer.iter_per_epoch}]{lr_msg}, {metric_msg}, {time_msg}, {ips_msg}, {eta_msg}{max_mem_msg}" + ) + for key in trainer.time_info: + trainer.time_info[key].reset() + + for i, lr in enumerate(trainer.lr_sch): + logger.scaler( + name="lr({})".format(type_name(lr)), + value=lr.get_lr(), + step=trainer.global_step, + writer=trainer.vdl_writer) + for key in trainer.output_info: + logger.scaler( + name="train_{}".format(key), + value=trainer.output_info[key].avg, + step=trainer.global_step, + writer=trainer.vdl_writer) + + +def type_name(object: object) -> str: + """get class name of an object""" + return object.__class__.__name__ diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/__init__.py new file mode 100644 index 000000000..7ab8be4fa --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/__init__.py @@ -0,0 +1,91 @@ +import copy + +import paddle +import paddle.nn as nn +from ppcls.utils import logger + +from .celoss import CELoss, MixCELoss +from .googlenetloss import GoogLeNetLoss +from .centerloss import CenterLoss +from .contrasiveloss import ContrastiveLoss +from .contrasiveloss import ContrastiveLoss_XBM +from .emlloss import EmlLoss +from .msmloss import MSMLoss +from .npairsloss import NpairsLoss +from .trihardloss import TriHardLoss +from .triplet import TripletLoss, TripletLossV2 +from .tripletangularmarginloss import TripletAngularMarginLoss, TripletAngularMarginLoss_XBM +from .supconloss import SupConLoss +from .softsuploss import SoftSupConLoss +from .ccssl_loss import CCSSLCELoss +from .pairwisecosface import PairwiseCosface +from .dmlloss import DMLLoss +from .distanceloss import DistanceLoss +from .softtargetceloss import SoftTargetCrossEntropy +from .distillationloss import DistillationCELoss +from .distillationloss import DistillationGTCELoss +from .distillationloss import DistillationDMLLoss +from .distillationloss import DistillationDistanceLoss +from .distillationloss import DistillationRKDLoss +from .distillationloss import DistillationKLDivLoss +from .distillationloss import DistillationDKDLoss +from .distillationloss import DistillationWSLLoss +from .distillationloss import DistillationSKDLoss +from .distillationloss import DistillationMultiLabelLoss +from .distillationloss import DistillationDISTLoss +from .distillationloss import DistillationPairLoss + +from .multilabelloss import MultiLabelLoss, MultiLabelAsymmetricLoss +from .afdloss import AFDLoss + +from .deephashloss import DSHSDLoss +from .deephashloss import LCDSHLoss +from .deephashloss import DCHLoss + +from .metabinloss import CELossForMetaBIN +from .metabinloss import TripletLossForMetaBIN +from .metabinloss import InterDomainShuffleLoss +from .metabinloss import IntraDomainScatterLoss + + +class CombinedLoss(nn.Layer): + def __init__(self, config_list): + super().__init__() + self.loss_func = [] + self.loss_weight = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + name = list(config)[0] + param = config[name] + assert "weight" in param, "weight must be in param, but param just contains {}".format( + param.keys()) + self.loss_weight.append(param.pop("weight")) + self.loss_func.append(eval(name)(**param)) + self.loss_func = nn.LayerList(self.loss_func) + + def __call__(self, input, batch): + loss_dict = {} + # just for accelerate classification traing speed + if len(self.loss_func) == 1: + loss = self.loss_func[0](input, batch) + loss_dict.update(loss) + loss_dict["loss"] = list(loss.values())[0] + else: + for idx, loss_func in enumerate(self.loss_func): + loss = loss_func(input, batch) + weight = self.loss_weight[idx] + loss = {key: loss[key] * weight for key in loss} + loss_dict.update(loss) + loss_dict["loss"] = paddle.add_n(list(loss_dict.values())) + return loss_dict + + +def build_loss(config): + if config is None: + return None + module_class = CombinedLoss(copy.deepcopy(config)) + logger.debug("build loss {} success.".format(module_class)) + return module_class diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/afdloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/afdloss.py new file mode 100644 index 000000000..e2f457451 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/afdloss.py @@ -0,0 +1,130 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle.nn as nn +import paddle.nn.functional as F +import paddle +import numpy as np +import matplotlib.pyplot as plt +import cv2 +import warnings +warnings.filterwarnings('ignore') + + +class LinearBNReLU(nn.Layer): + def __init__(self, nin, nout): + super().__init__() + self.linear = nn.Linear(nin, nout) + self.bn = nn.BatchNorm1D(nout) + self.relu = nn.ReLU() + + def forward(self, x, relu=True): + if relu: + return self.relu(self.bn(self.linear(x))) + return self.bn(self.linear(x)) + + +def unique_shape(s_shapes): + n_s = [] + unique_shapes = [] + n = -1 + for s_shape in s_shapes: + if s_shape not in unique_shapes: + unique_shapes.append(s_shape) + n += 1 + n_s.append(n) + return n_s, unique_shapes + + +class AFDLoss(nn.Layer): + """ + AFDLoss + https://www.aaai.org/AAAI21Papers/AAAI-9785.JiM.pdf + https://github.com/clovaai/attention-feature-distillation + """ + + def __init__(self, + model_name_pair=["Student", "Teacher"], + student_keys=["bilinear_key", "value"], + teacher_keys=["query", "value"], + s_shapes=[[64, 16, 160], [128, 8, 160], [256, 4, 160], + [512, 2, 160]], + t_shapes=[[640, 48], [320, 96], [160, 192]], + qk_dim=128, + name="loss_afd"): + super().__init__() + assert isinstance(model_name_pair, list) + self.model_name_pair = model_name_pair + self.student_keys = student_keys + self.teacher_keys = teacher_keys + self.s_shapes = [[1] + s_i for s_i in s_shapes] + self.t_shapes = [[1] + t_i for t_i in t_shapes] + self.qk_dim = qk_dim + self.n_t, self.unique_t_shapes = unique_shape(self.t_shapes) + self.attention = Attention(self.qk_dim, self.t_shapes, self.s_shapes, + self.n_t, self.unique_t_shapes) + self.name = name + + def forward(self, predicts, batch): + s_features_dict = predicts[self.model_name_pair[0]] + t_features_dict = predicts[self.model_name_pair[1]] + + g_s = [s_features_dict[key] for key in self.student_keys] + g_t = [t_features_dict[key] for key in self.teacher_keys] + + loss = self.attention(g_s, g_t) + sum_loss = sum(loss) + + loss_dict = dict() + loss_dict[self.name] = sum_loss + + return loss_dict + + +class Attention(nn.Layer): + def __init__(self, qk_dim, t_shapes, s_shapes, n_t, unique_t_shapes): + super().__init__() + self.qk_dim = qk_dim + self.n_t = n_t + + self.p_t = self.create_parameter( + shape=[len(t_shapes), qk_dim], + default_initializer=nn.initializer.XavierNormal()) + self.p_s = self.create_parameter( + shape=[len(s_shapes), qk_dim], + default_initializer=nn.initializer.XavierNormal()) + + def forward(self, g_s, g_t): + bilinear_key, h_hat_s_all = g_s + query, h_t_all = g_t + + p_logit = paddle.matmul(self.p_t, self.p_s.t()) + + logit = paddle.add( + paddle.einsum('bstq,btq->bts', bilinear_key, query), + p_logit) / np.sqrt(self.qk_dim) + atts = F.softmax(logit, axis=2) # b x t x s + + loss = [] + + for i, (n, h_t) in enumerate(zip(self.n_t, h_t_all)): + h_hat_s = h_hat_s_all[n] + diff = self.cal_diff(h_hat_s, h_t, atts[:, i]) + loss.append(diff) + return loss + + def cal_diff(self, v_s, v_t, att): + diff = (v_s - v_t.unsqueeze(1)).pow(2).mean(2) + diff = paddle.multiply(diff, att).sum(1).mean() + return diff diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/ccssl_loss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/ccssl_loss.py new file mode 100644 index 000000000..1b3b71d56 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/ccssl_loss.py @@ -0,0 +1,19 @@ +from ppcls.engine.train.train import forward +from .softsuploss import SoftSupConLoss +import copy +import paddle.nn as nn + + +class CCSSLCELoss(nn.Layer): + def __init__(self, **kwargs): + super(CCSSLCELoss, self).__init__() + self.celoss = nn.CrossEntropyLoss(reduction='none') + + def forward(self, inputs, batch, **kwargs): + p_targets_u_w = batch['p_targets_u_w'] + logits_s1 = batch['logits_s1'] + mask = batch['mask'] + loss_u = self.celoss(logits_s1, p_targets_u_w) * mask + loss_u = loss_u.mean() + + return {'CCSSLCELoss': loss_u} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/celoss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/celoss.py new file mode 100644 index 000000000..2715dee19 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/celoss.py @@ -0,0 +1,76 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppcls.utils import logger + + +class CELoss(nn.Layer): + """ + Cross entropy loss + """ + + def __init__(self, reduction="mean", epsilon=None): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + assert reduction in ["mean", "sum", "none"] + self.reduction = reduction + + def _labelsmoothing(self, target, class_num): + if len(target.shape) == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + if self.epsilon is not None: + class_num = x.shape[-1] + label = self._labelsmoothing(label, class_num) + x = -F.log_softmax(x, axis=-1) + loss = paddle.sum(x * label, axis=-1) + if self.reduction == 'mean': + loss = loss.mean() + elif self.reduction == 'sum': + loss = loss.sum() + else: + if label.shape[-1] == x.shape[-1]: + label = F.softmax(label, axis=-1) + soft_label = True + else: + soft_label = False + loss = F.cross_entropy( + x, + label=label, + soft_label=soft_label, + reduction=self.reduction) + return {"CELoss": loss} + + +class MixCELoss(object): + def __init__(self, *args, **kwargs): + msg = "\"MixCELos\" is deprecated, please use \"CELoss\" instead." + logger.error(DeprecationWarning(msg)) + raise DeprecationWarning(msg) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/centerloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/centerloss.py new file mode 100644 index 000000000..23a86ee88 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/centerloss.py @@ -0,0 +1,80 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Dict + +import paddle +import paddle.nn as nn + + +class CenterLoss(nn.Layer): + """Center loss + paper : [A Discriminative Feature Learning Approach for Deep Face Recognition](https://link.springer.com/content/pdf/10.1007%2F978-3-319-46478-7_31.pdf) + code reference: https://github.com/michuanhaohao/reid-strong-baseline/blob/master/layers/center_loss.py#L7 + Args: + num_classes (int): number of classes. + feat_dim (int): number of feature dimensions. + feature_from (str): feature from "backbone" or "features" + """ + + def __init__(self, + num_classes: int, + feat_dim: int, + feature_from: str="features"): + super(CenterLoss, self).__init__() + self.num_classes = num_classes + self.feat_dim = feat_dim + self.feature_from = feature_from + random_init_centers = paddle.randn( + shape=[self.num_classes, self.feat_dim]) + self.centers = self.create_parameter( + shape=(self.num_classes, self.feat_dim), + default_initializer=nn.initializer.Assign(random_init_centers)) + self.add_parameter("centers", self.centers) + + def __call__(self, input: Dict[str, paddle.Tensor], + target: paddle.Tensor) -> Dict[str, paddle.Tensor]: + """compute center loss. + + Args: + input (Dict[str, paddle.Tensor]): {'features': (batch_size, feature_dim), ...}. + target (paddle.Tensor): ground truth label with shape (batch_size, ). + + Returns: + Dict[str, paddle.Tensor]: {'CenterLoss': loss}. + """ + feats = input[self.feature_from] + labels = target + + # squeeze labels to shape (batch_size, ) + if labels.ndim >= 2 and labels.shape[-1] == 1: + labels = paddle.squeeze(labels, axis=[-1]) + + batch_size = feats.shape[0] + distmat = paddle.pow(feats, 2).sum(axis=1, keepdim=True).expand([batch_size, self.num_classes]) + \ + paddle.pow(self.centers, 2).sum(axis=1, keepdim=True).expand([self.num_classes, batch_size]).t() + distmat = distmat.addmm(x=feats, y=self.centers.t(), beta=1, alpha=-2) + + classes = paddle.arange(self.num_classes).astype(labels.dtype) + labels = labels.unsqueeze(1).expand([batch_size, self.num_classes]) + mask = labels.equal(classes.expand([batch_size, self.num_classes])) + + dist = distmat * mask.astype(feats.dtype) + loss = dist.clip(min=1e-12, max=1e+12).sum() / batch_size + # return loss + return {'CenterLoss': loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/comfunc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/comfunc.py new file mode 100644 index 000000000..277bdd6b5 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/comfunc.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def rerange_index(batch_size, samples_each_class): + tmp = np.arange(0, batch_size * batch_size) + tmp = tmp.reshape(-1, batch_size) + rerange_index = [] + + for i in range(batch_size): + step = i // samples_each_class + start = step * samples_each_class + end = (step + 1) * samples_each_class + + pos_idx = [] + neg_idx = [] + for j, k in enumerate(tmp[i]): + if j >= start and j < end: + if j == i: + pos_idx.insert(0, k) + else: + pos_idx.append(k) + else: + neg_idx.append(k) + rerange_index += (pos_idx + neg_idx) + + rerange_index = np.array(rerange_index).astype(np.int32) + return rerange_index diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/contrasiveloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/contrasiveloss.py new file mode 100644 index 000000000..d27dbe22e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/contrasiveloss.py @@ -0,0 +1,152 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Dict + +import paddle +import paddle.nn as nn +from ppcls.loss.xbm import CrossBatchMemory + + +class ContrastiveLoss(nn.Layer): + """ContrastiveLoss + + Args: + margin (float): margin + embedding_size (int): number of embedding's dimension + normalize_feature (bool, optional): whether to normalize embedding. Defaults to True. + epsilon (float, optional): epsilon. Defaults to 1e-5. + feature_from (str, optional): which key embedding from input dict. Defaults to "features". + """ + + def __init__(self, + margin: float, + embedding_size: int, + normalize_feature=True, + epsilon: float=1e-5, + feature_from: str="features"): + super(ContrastiveLoss, self).__init__() + self.margin = margin + self.embedding_size = embedding_size + self.normalize_feature = normalize_feature + self.epsilon = epsilon + self.feature_from = feature_from + + def forward(self, input: Dict[str, paddle.Tensor], + target: paddle.Tensor) -> Dict[str, paddle.Tensor]: + feats = input[self.feature_from] + labels = target + + # normalize along feature dim + if self.normalize_feature: + feats = nn.functional.normalize(feats, p=2, axis=1) + + # squeeze labels to shape (batch_size, ) + if labels.ndim >= 2 and labels.shape[-1] == 1: + labels = paddle.squeeze(labels, axis=[-1]) + + loss = self._compute_loss(feats, target, feats, target) + + return {'ContrastiveLoss': loss} + + def _compute_loss(self, + inputs_q: paddle.Tensor, + targets_q: paddle.Tensor, + inputs_k: paddle.Tensor, + targets_k: paddle.Tensor) -> paddle.Tensor: + batch_size = inputs_q.shape[0] + # Compute similarity matrix + sim_mat = paddle.matmul(inputs_q, inputs_k.t()) + + loss = [] + for i in range(batch_size): + pos_pair_ = paddle.masked_select(sim_mat[i], + targets_q[i] == targets_k) + pos_pair_ = paddle.masked_select(pos_pair_, + pos_pair_ < 1 - self.epsilon) + + neg_pair_ = paddle.masked_select(sim_mat[i], + targets_q[i] != targets_k) + neg_pair = paddle.masked_select(neg_pair_, neg_pair_ > self.margin) + + pos_loss = paddle.sum(-pos_pair_ + 1) + + if len(neg_pair) > 0: + neg_loss = paddle.sum(neg_pair) + else: + neg_loss = 0 + loss.append(pos_loss + neg_loss) + + loss = sum(loss) / batch_size + return loss + + +class ContrastiveLoss_XBM(ContrastiveLoss): + """ContrastiveLoss with CrossBatchMemory + + Args: + xbm_size (int): size of memory bank + xbm_weight (int): weight of CrossBatchMemory's loss + start_iter (int): store embeddings after start_iter + margin (float): margin + embedding_size (int): number of embedding's dimension + epsilon (float, optional): epsilon. Defaults to 1e-5. + normalize_feature (bool, optional): whether to normalize embedding. Defaults to True. + feature_from (str, optional): which key embedding from input dict. Defaults to "features". + """ + + def __init__(self, + xbm_size: int, + xbm_weight: int, + start_iter: int, + margin: float, + embedding_size: int, + epsilon: float=1e-5, + normalize_feature=True, + feature_from: str="features"): + super(ContrastiveLoss_XBM, self).__init__( + margin, embedding_size, normalize_feature, epsilon, feature_from) + self.xbm = CrossBatchMemory(xbm_size, embedding_size) + self.xbm_weight = xbm_weight + self.start_iter = start_iter + self.iter = 0 + + def __call__(self, input: Dict[str, paddle.Tensor], + target: paddle.Tensor) -> Dict[str, paddle.Tensor]: + feats = input[self.feature_from] + labels = target + + # normalize along feature dim + if self.normalize_feature: + feats = nn.functional.normalize(feats, p=2, axis=1) + + # squeeze labels to shape (batch_size, ) + if labels.ndim >= 2 and labels.shape[-1] == 1: + labels = paddle.squeeze(labels, axis=[-1]) + + loss = self._compute_loss(feats, labels, feats, labels) + + # compute contrastive loss from memory bank + self.iter += 1 + if self.iter > self.start_iter: + self.xbm.enqueue_dequeue(feats.detach(), labels.detach()) + xbm_feats, xbm_labels = self.xbm.get() + xbm_loss = self._compute_loss(feats, labels, xbm_feats, xbm_labels) + loss = loss + self.xbm_weight * xbm_loss + + return {'ContrastiveLoss_XBM': loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/deephashloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/deephashloss.py new file mode 100644 index 000000000..7dda519a8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/deephashloss.py @@ -0,0 +1,149 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn + + +class DSHSDLoss(nn.Layer): + """ + # DSHSD(IEEE ACCESS 2019) + # paper [Deep Supervised Hashing Based on Stable Distribution](https://ieeexplore.ieee.org/document/8648432/) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DSHSD.py + """ + + def __init__(self, alpha, multi_label=False): + super(DSHSDLoss, self).__init__() + self.alpha = alpha + self.multi_label = multi_label + + def forward(self, input, label): + features = input["features"] + logits = input["logits"] + + features_temp1 = paddle.unsqueeze(features, 1) + features_temp2 = paddle.unsqueeze(features, 0) + dist = features_temp1 - features_temp2 + dist = paddle.square(dist) + dist = paddle.sum(dist, axis=2) + + n_class = logits.shape[1] + labels = paddle.nn.functional.one_hot(label, n_class) + labels = labels.squeeze().astype("float32") + + s = paddle.matmul(labels, labels, transpose_y=True) + s = (s == 0).astype("float32") + margin = 2 * features.shape[1] + Ld = (1 - s) / 2 * dist + s / 2 * (margin - dist).clip(min=0) + Ld = Ld.mean() + + if self.multi_label: + Lc_temp = (1 + (-logits).exp()).log() + Lc = (logits - labels * logits + Lc_temp).sum(axis=1) + else: + probs = paddle.nn.functional.softmax(logits) + Lc = (-probs.log() * labels).sum(axis=1) + Lc = Lc.mean() + + loss = Lc + Ld * self.alpha + return {"dshsdloss": loss} + + +class LCDSHLoss(nn.Layer): + """ + # paper [Locality-Constrained Deep Supervised Hashing for Image Retrieval](https://www.ijcai.org/Proceedings/2017/0499.pdf) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/LCDSH.py + """ + + def __init__(self, n_class, _lambda): + super(LCDSHLoss, self).__init__() + self._lambda = _lambda + self.n_class = n_class + + def forward(self, input, label): + features = input["features"] + labels = paddle.nn.functional.one_hot(label, self.n_class) + labels = labels.squeeze().astype("float32") + + s = paddle.matmul(labels, labels, transpose_y=True) + s = 2 * (s > 0).astype("float32") - 1 + + inner_product = paddle.matmul(features, features, transpose_y=True) + inner_product = inner_product * 0.5 + inner_product = inner_product.clip(min=-50, max=50) + L1 = paddle.log(1 + paddle.exp(-s * inner_product)) + L1 = L1.mean() + + binary_features = features.sign() + + inner_product_ = paddle.matmul( + binary_features, binary_features, transpose_y=True) + inner_product_ = inner_product_ * 0.5 + sigmoid = paddle.nn.Sigmoid() + L2 = (sigmoid(inner_product) - sigmoid(inner_product_)).pow(2) + L2 = L2.mean() + + loss = L1 + self._lambda * L2 + return {"lcdshloss": loss} + + +class DCHLoss(paddle.nn.Layer): + """ + # paper [Deep Cauchy Hashing for Hamming Space Retrieval] + URL:(http://ise.thss.tsinghua.edu.cn/~mlong/doc/deep-cauchy-hashing-cvpr18.pdf) + # code reference: https://github.com/swuxyj/DeepHash-pytorch/blob/master/DCH.py + """ + + def __init__(self, gamma, _lambda, n_class): + super(DCHLoss, self).__init__() + self.gamma = gamma + self._lambda = _lambda + self.n_class = n_class + + def distance(self, feature_i, feature_j): + assert feature_i.shape[1] == feature_j.shape[ + 1], "feature len of feature_i and feature_j is different, please check whether the featurs are right" + K = feature_i.shape[1] + inner_product = paddle.matmul(feature_i, feature_j, transpose_y=True) + + len_i = feature_i.pow(2).sum(axis=1, keepdim=True).pow(0.5) + len_j = feature_j.pow(2).sum(axis=1, keepdim=True).pow(0.5) + norm = paddle.matmul(len_i, len_j, transpose_y=True) + cos = inner_product / norm.clip(min=0.0001) + dist = (1 - cos.clip(max=0.99)) * K / 2 + return dist + + def forward(self, input, label): + features = input["features"] + labels = paddle.nn.functional.one_hot(label, self.n_class) + labels = labels.squeeze().astype("float32") + + s = paddle.matmul(labels, labels, transpose_y=True).astype("float32") + if (1 - s).sum() != 0 and s.sum() != 0: + positive_w = s * s.numel() / s.sum() + negative_w = (1 - s) * s.numel() / (1 - s).sum() + w = positive_w + negative_w + else: + w = 1 + + dist_matric = self.distance(features, features) + cauchy_loss = w * (s * paddle.log(dist_matric / self.gamma) + + paddle.log(1 + self.gamma / dist_matric)) + + all_one = paddle.ones_like(features, dtype="float32") + dist_to_one = self.distance(features.abs(), all_one) + quantization_loss = paddle.log(1 + dist_to_one / self.gamma) + + loss = cauchy_loss.mean() + self._lambda * quantization_loss.mean() + return {"dchloss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dist_loss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dist_loss.py new file mode 100644 index 000000000..78c8e12ff --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dist_loss.py @@ -0,0 +1,52 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +def cosine_similarity(a, b, eps=1e-8): + return (a * b).sum(1) / (a.norm(axis=1) * b.norm(axis=1) + eps) + + +def pearson_correlation(a, b, eps=1e-8): + return cosine_similarity(a - a.mean(1).unsqueeze(1), + b - b.mean(1).unsqueeze(1), eps) + + +def inter_class_relation(y_s, y_t): + return 1 - pearson_correlation(y_s, y_t).mean() + + +def intra_class_relation(y_s, y_t): + return inter_class_relation(y_s.transpose([1, 0]), y_t.transpose([1, 0])) + + +class DISTLoss(nn.Layer): + # DISTLoss + # paper [Knowledge Distillation from A Stronger Teacher](https://arxiv.org/pdf/2205.10536v1.pdf) + # code reference: https://github.com/hunto/image_classification_sota/blob/d4f15a0494/lib/models/losses/dist_kd.py + def __init__(self, beta=1.0, gamma=1.0): + super().__init__() + self.beta = beta + self.gamma = gamma + + def forward(self, z_s, z_t): + y_s = F.softmax(z_s, axis=-1) + y_t = F.softmax(z_t, axis=-1) + inter_loss = inter_class_relation(y_s, y_t) + intra_loss = intra_class_relation(y_s, y_t) + kd_loss = self.beta * inter_loss + self.gamma * intra_loss + return kd_loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distanceloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distanceloss.py new file mode 100644 index 000000000..0a09f0cb2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distanceloss.py @@ -0,0 +1,43 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddle.nn import L1Loss +from paddle.nn import MSELoss as L2Loss +from paddle.nn import SmoothL1Loss + + +class DistanceLoss(nn.Layer): + """ + DistanceLoss: + mode: loss mode + """ + + def __init__(self, mode="l2", **kargs): + super().__init__() + assert mode in ["l1", "l2", "smooth_l1"] + if mode == "l1": + self.loss_func = nn.L1Loss(**kargs) + elif mode == "l2": + self.loss_func = nn.MSELoss(**kargs) + elif mode == "smooth_l1": + self.loss_func = nn.SmoothL1Loss(**kargs) + self.mode = mode + + def forward(self, x, y): + loss = self.loss_func(x, y) + return {"loss_{}".format(self.mode): loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distillationloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distillationloss.py new file mode 100644 index 000000000..6ccbbb840 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/distillationloss.py @@ -0,0 +1,426 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from .celoss import CELoss +from .dmlloss import DMLLoss +from .distanceloss import DistanceLoss +from .rkdloss import RKdAngle, RkdDistance +from .kldivloss import KLDivLoss +from .dkdloss import DKDLoss +from .wslloss import WSLLoss +from .dist_loss import DISTLoss +from .multilabelloss import MultiLabelLoss +from .mgd_loss import MGDLoss +from .skdloss import SKDLoss +from .pefdloss import PEFDLoss + + +class DistillationCELoss(CELoss): + """ + DistillationCELoss + """ + + def __init__(self, + model_name_pairs=[], + epsilon=None, + key=None, + name="loss_ce"): + super().__init__(epsilon=epsilon) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + for key in loss: + loss_dict["{}_{}_{}".format(key, pair[0], pair[1])] = loss[key] + return loss_dict + + +class DistillationGTCELoss(CELoss): + """ + DistillationGTCELoss + """ + + def __init__(self, + model_names=[], + epsilon=None, + key=None, + name="loss_gt_ce"): + super().__init__(epsilon=epsilon) + assert isinstance(model_names, list) + self.key = key + self.model_names = model_names + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for name in self.model_names: + out = predicts[name] + if self.key is not None: + out = out[self.key] + loss = super().forward(out, batch) + for key in loss: + loss_dict["{}_{}".format(key, name)] = loss[key] + return loss_dict + + +class DistillationDMLLoss(DMLLoss): + """ + """ + + def __init__(self, + model_name_pairs=[], + act="softmax", + weight_ratio=False, + sum_across_class_dim=False, + key=None, + name="loss_dml"): + super().__init__(act=act, sum_across_class_dim=sum_across_class_dim) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + self.weight_ratio = weight_ratio + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + if self.weight_ratio is True: + loss = super().forward(out1, out2, batch) + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + return loss_dict + + +class DistillationDistanceLoss(DistanceLoss): + """ + """ + + def __init__(self, + mode="l2", + model_name_pairs=[], + act=None, + key=None, + name="loss_", + **kargs): + super().__init__(mode=mode, **kargs) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + mode + assert act in [None, "sigmoid", "softmax"] + if act == "sigmoid": + self.act = nn.Sigmoid() + elif act == "softmax": + self.act = nn.Softmax(axis=-1) + else: + self.act = None + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + if self.act is not None: + out1 = self.act(out1) + out2 = self.act(out2) + loss = super().forward(out1, out2) + for key in loss: + loss_dict["{}_{}_{}".format(self.name, key, idx)] = loss[key] + return loss_dict + + +class DistillationRKDLoss(nn.Layer): + def __init__(self, + target_size=None, + model_name_pairs=(["Student", "Teacher"], ), + student_keepkeys=[], + teacher_keepkeys=[]): + super().__init__() + self.student_keepkeys = student_keepkeys + self.teacher_keepkeys = teacher_keepkeys + self.model_name_pairs = model_name_pairs + assert len(self.student_keepkeys) == len(self.teacher_keepkeys) + + self.rkd_angle_loss = RKdAngle(target_size=target_size) + self.rkd_dist_loss = RkdDistance(target_size=target_size) + + def __call__(self, predicts, batch): + loss_dict = {} + for m1, m2 in self.model_name_pairs: + for idx, ( + student_name, teacher_name + ) in enumerate(zip(self.student_keepkeys, self.teacher_keepkeys)): + student_out = predicts[m1][student_name] + teacher_out = predicts[m2][teacher_name] + + loss_dict[f"loss_angle_{idx}_{m1}_{m2}"] = self.rkd_angle_loss( + student_out, teacher_out) + loss_dict[f"loss_dist_{idx}_{m1}_{m2}"] = self.rkd_dist_loss( + student_out, teacher_out) + + return loss_dict + + +class DistillationKLDivLoss(KLDivLoss): + """ + DistillationKLDivLoss + """ + + def __init__(self, + model_name_pairs=[], + temperature=4, + key=None, + name="loss_kl"): + super().__init__(temperature=temperature) + assert isinstance(model_name_pairs, list) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + for key in loss: + loss_dict["{}_{}_{}".format(key, pair[0], pair[1])] = loss[key] + return loss_dict + + +class DistillationDKDLoss(DKDLoss): + """ + DistillationDKDLoss + """ + + def __init__(self, + model_name_pairs=[], + key=None, + temperature=1.0, + alpha=1.0, + beta=1.0, + use_target_as_gt=False, + name="loss_dkd"): + super().__init__( + temperature=temperature, + alpha=alpha, + beta=beta, + use_target_as_gt=use_target_as_gt) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2, batch) + loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss + return loss_dict + + +class DistillationWSLLoss(WSLLoss): + """ + DistillationWSLLoss + """ + + def __init__(self, + model_name_pairs=[], + key=None, + temperature=2.0, + name="wsl_loss"): + super().__init__(temperature) + self.model_name_pairs = model_name_pairs + self.key = key + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2, batch) + loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss + return loss_dict + + +class DistillationSKDLoss(SKDLoss): + """ + DistillationSKDLoss + """ + + def __init__(self, + model_name_pairs=[], + key=None, + temperature=1.0, + multiplier=2.0, + alpha=0.9, + use_target_as_gt=False, + name="skd_loss"): + super().__init__(temperature, multiplier, alpha, use_target_as_gt) + self.model_name_pairs = model_name_pairs + self.key = key + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2, batch) + loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss + return loss_dict + + +class DistillationMultiLabelLoss(MultiLabelLoss): + """ + DistillationMultiLabelLoss + """ + + def __init__(self, + model_names=[], + epsilon=None, + size_sum=False, + weight_ratio=False, + key=None, + name="loss_mll"): + super().__init__( + epsilon=epsilon, size_sum=size_sum, weight_ratio=weight_ratio) + assert isinstance(model_names, list) + self.key = key + self.model_names = model_names + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for name in self.model_names: + out = predicts[name] + if self.key is not None: + out = out[self.key] + loss = super().forward(out, batch) + for key in loss: + loss_dict["{}_{}".format(key, name)] = loss[key] + return loss_dict + + +class DistillationDISTLoss(DISTLoss): + """ + DistillationDISTLoss + """ + + def __init__(self, + model_name_pairs=[], + key=None, + beta=1.0, + gamma=1.0, + name="loss_dist"): + super().__init__(beta=beta, gamma=gamma) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss + return loss_dict + + +class DistillationPairLoss(nn.Layer): + """ + DistillationPairLoss + """ + + def __init__(self, + base_loss_name, + model_name_pairs=[], + s_key=None, + t_key=None, + name="loss", + **kwargs): + super().__init__() + self.loss_func = eval(base_loss_name)(**kwargs) + assert type(s_key) == type(t_key) + self.s_key = s_key + self.t_key = t_key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if isinstance(self.s_key, str): + out1 = out1[self.s_key] + out2 = out2[self.t_key] + else: + out1 = [out1[k] if k is not None else out1 for k in self.s_key] + out2 = [out2[k] if k is not None else out2 for k in self.t_key] + + loss = self.loss_func.forward(out1, out2) + if isinstance(loss, dict): + for k in loss: + loss_dict[ + f"{self.name}_{idx}_{pair[0]}_{pair[1]}_{k}"] = loss[k] + else: + loss_dict[f"{self.name}_{idx}_{pair[0]}_{pair[1]}"] = loss + return loss_dict diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dkdloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dkdloss.py new file mode 100644 index 000000000..bf9224e31 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dkdloss.py @@ -0,0 +1,68 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class DKDLoss(nn.Layer): + """ + DKDLoss + Reference: https://arxiv.org/abs/2203.08679 + Code was heavily based on https://github.com/megvii-research/mdistiller + """ + + def __init__(self, + temperature=1.0, + alpha=1.0, + beta=1.0, + use_target_as_gt=False): + super().__init__() + self.temperature = temperature + self.alpha = alpha + self.beta = beta + self.use_target_as_gt = use_target_as_gt + + def forward(self, logits_student, logits_teacher, target=None): + if target is None or self.use_target_as_gt: + target = logits_teacher.argmax(axis=-1) + gt_mask = _get_gt_mask(logits_student, target) + other_mask = 1 - gt_mask + pred_student = F.softmax(logits_student / self.temperature, axis=1) + pred_teacher = F.softmax(logits_teacher / self.temperature, axis=1) + pred_student = cat_mask(pred_student, gt_mask, other_mask) + pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask) + log_pred_student = paddle.log(pred_student) + tckd_loss = (F.kl_div( + log_pred_student, pred_teacher, + reduction='sum') * (self.temperature**2) / target.shape[0]) + pred_teacher_part2 = F.softmax( + logits_teacher / self.temperature - 1000.0 * gt_mask, axis=1) + log_pred_student_part2 = F.log_softmax( + logits_student / self.temperature - 1000.0 * gt_mask, axis=1) + nckd_loss = (F.kl_div( + log_pred_student_part2, pred_teacher_part2, + reduction='sum') * (self.temperature**2) / target.shape[0]) + return self.alpha * tckd_loss + self.beta * nckd_loss + + +def _get_gt_mask(logits, target): + target = target.reshape([-1]).unsqueeze(1) + updates = paddle.ones_like(target) + mask = scatter( + paddle.zeros_like(logits), target, updates.astype('float32')) + return mask + + +def cat_mask(t, mask1, mask2): + t1 = (t * mask1).sum(axis=1, keepdim=True) + t2 = (t * mask2).sum(axis=1, keepdim=True) + rt = paddle.concat([t1, t2], axis=1) + return rt + + +def scatter(x, index, updates): + i, j = index.shape + grid_x, grid_y = paddle.meshgrid(paddle.arange(i), paddle.arange(j)) + index = paddle.stack([grid_x.flatten(), index.flatten()], axis=1) + updates_index = paddle.stack([grid_x.flatten(), grid_y.flatten()], axis=1) + updates = paddle.gather_nd(updates, index=updates_index) + return paddle.scatter_nd_add(x, index, updates) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dmlloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dmlloss.py new file mode 100644 index 000000000..e8983ed08 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/dmlloss.py @@ -0,0 +1,62 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppcls.loss.multilabelloss import ratio2weight + + +class DMLLoss(nn.Layer): + """ + DMLLoss + """ + + def __init__(self, act="softmax", sum_across_class_dim=False, eps=1e-12): + super().__init__() + if act is not None: + assert act in ["softmax", "sigmoid"] + if act == "softmax": + self.act = nn.Softmax(axis=-1) + elif act == "sigmoid": + self.act = nn.Sigmoid() + else: + self.act = None + self.eps = eps + self.sum_across_class_dim = sum_across_class_dim + + def _kldiv(self, x, target): + class_num = x.shape[-1] + cost = target * paddle.log( + (target + self.eps) / (x + self.eps)) * class_num + return cost + + def forward(self, x, target, gt_label=None): + if self.act is not None: + x = self.act(x) + target = self.act(target) + loss = self._kldiv(x, target) + self._kldiv(target, x) + loss = loss / 2 + + # for multi-label dml loss + if gt_label is not None: + gt_label, label_ratio = gt_label[:, 0, :], gt_label[:, 1, :] + targets_mask = paddle.cast(gt_label > 0.5, 'float32') + weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio)) + weight = weight * (gt_label > -1) + loss = loss * weight + + loss = loss.sum(1).mean() if self.sum_across_class_dim else loss.mean() + return {"DMLLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/emlloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/emlloss.py new file mode 100644 index 000000000..38b707fe1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/emlloss.py @@ -0,0 +1,102 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +import numpy as np +from .comfunc import rerange_index + + +class EmlLoss(paddle.nn.Layer): + """Ensemble Metric Learning Loss + paper: [Large Scale Strongly Supervised Ensemble Metric Learning, with Applications to Face Verification and Retrieval](https://arxiv.org/pdf/1212.6094.pdf) + code reference: https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/metric_learning/losses/emlloss.py + """ + + def __init__(self, batch_size=40, samples_each_class=2): + super(EmlLoss, self).__init__() + assert (batch_size % samples_each_class == 0) + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + self.thresh = 20.0 + self.beta = 100000 + + def surrogate_function(self, beta, theta, bias): + x = theta * paddle.exp(bias) + output = paddle.log(1 + beta * x) / math.log(1 + beta) + return output + + def surrogate_function_approximate(self, beta, theta, bias): + output = ( + paddle.log(theta) + bias + math.log(beta)) / math.log(1 + beta) + return output + + def surrogate_function_stable(self, beta, theta, target, thresh): + max_gap = paddle.to_tensor(thresh, dtype='float32') + max_gap.stop_gradient = True + + target_max = paddle.maximum(target, max_gap) + target_min = paddle.minimum(target, max_gap) + + loss1 = self.surrogate_function(beta, theta, target_min) + loss2 = self.surrogate_function_approximate(beta, theta, target_max) + bias = self.surrogate_function(beta, theta, max_gap) + loss = loss1 + loss2 - bias + return loss + + def forward(self, input, target=None): + features = input["features"] + samples_each_class = self.samples_each_class + batch_size = self.batch_size + rerange_index = self.rerange_index + + #calc distance + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + rerange_index = paddle.to_tensor(rerange_index) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, batch_size]) + + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[ + 1, samples_each_class - 1, batch_size - samples_each_class + ], + axis=1) + ignore.stop_gradient = True + + pos_max = paddle.max(pos, axis=1, keepdim=True) + pos = paddle.exp(pos - pos_max) + pos_mean = paddle.mean(pos, axis=1, keepdim=True) + + neg_min = paddle.min(neg, axis=1, keepdim=True) + neg = paddle.exp(neg_min - neg) + neg_mean = paddle.mean(neg, axis=1, keepdim=True) + + bias = pos_max - neg_min + theta = paddle.multiply(neg_mean, pos_mean) + + loss = self.surrogate_function_stable(self.beta, theta, bias, + self.thresh) + loss = paddle.mean(loss) + return {"emlloss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/googlenetloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/googlenetloss.py new file mode 100644 index 000000000..491311831 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/googlenetloss.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class GoogLeNetLoss(nn.Layer): + """ + Cross entropy loss used after googlenet + reference paper: [https://arxiv.org/pdf/1409.4842v1.pdf](Going Deeper with Convolutions) + """ + + def __init__(self, epsilon=None): + super().__init__() + assert (epsilon is None or epsilon <= 0 or + epsilon >= 1), "googlenet is not support label_smooth" + + def forward(self, inputs, label): + input0, input1, input2 = inputs + if isinstance(input0, dict): + input0 = input0["logits"] + if isinstance(input1, dict): + input1 = input1["logits"] + if isinstance(input2, dict): + input2 = input2["logits"] + + loss0 = F.cross_entropy(input0, label=label, soft_label=False) + loss1 = F.cross_entropy(input1, label=label, soft_label=False) + loss2 = F.cross_entropy(input2, label=label, soft_label=False) + loss = loss0 + 0.3 * loss1 + 0.3 * loss2 + loss = loss.mean() + return {"GooleNetLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/kldivloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/kldivloss.py new file mode 100644 index 000000000..da6ab02fb --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/kldivloss.py @@ -0,0 +1,33 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class KLDivLoss(nn.Layer): + """ + Distilling the Knowledge in a Neural Network + """ + + def __init__(self, temperature=4): + super(KLDivLoss, self).__init__() + self.T = temperature + + def forward(self, y_s, y_t): + p_s = F.log_softmax(y_s / self.T, axis=1) + p_t = F.softmax(y_t / self.T, axis=1) + loss = F.kl_div(p_s, p_t, reduction='sum') * (self.T**2) / y_s.shape[0] + return {"loss_kldiv": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/metabinloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/metabinloss.py new file mode 100644 index 000000000..34159bdcd --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/metabinloss.py @@ -0,0 +1,206 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# reference: https://arxiv.org/abs/2011.14670 + +import copy +import numpy as np +import paddle +from paddle import nn +from paddle.nn import functional as F + +from .dist_loss import cosine_similarity +from .celoss import CELoss + + +def euclidean_dist(x, y): + m, n = x.shape[0], y.shape[0] + xx = paddle.pow(x, 2).sum(1, keepdim=True).expand([m, n]) + yy = paddle.pow(y, 2).sum(1, keepdim=True).expand([n, m]).t() + dist = xx + yy - 2 * paddle.matmul(x, y.t()) + dist = dist.clip(min=1e-12).sqrt() # for numerical stability + return dist + + +def hard_example_mining(dist_mat, is_pos, is_neg): + """For each anchor, find the hardest positive and negative sample. + Args: + dist_mat: pairwise distance between samples, shape [N, M] + is_pos: positive index with shape [N, M] + is_neg: negative index with shape [N, M] + Returns: + dist_ap: distance(anchor, positive); shape [N, 1] + dist_an: distance(anchor, negative); shape [N, 1] + """ + + inf = float("inf") + + def _masked_max(tensor, mask, axis): + masked = paddle.multiply(tensor, mask.astype(tensor.dtype)) + neg_inf = paddle.zeros_like(tensor) + neg_inf.stop_gradient = True + neg_inf[paddle.logical_not(mask)] = -inf + return paddle.max(masked + neg_inf, axis=axis, keepdim=True) + + def _masked_min(tensor, mask, axis): + masked = paddle.multiply(tensor, mask.astype(tensor.dtype)) + pos_inf = paddle.zeros_like(tensor) + pos_inf.stop_gradient = True + pos_inf[paddle.logical_not(mask)] = inf + return paddle.min(masked + pos_inf, axis=axis, keepdim=True) + + assert len(dist_mat.shape) == 2 + dist_ap = _masked_max(dist_mat, is_pos, axis=1) + dist_an = _masked_min(dist_mat, is_neg, axis=1) + return dist_ap, dist_an + + +class IntraDomainScatterLoss(nn.Layer): + """ + IntraDomainScatterLoss + + enhance intra-domain diversity and disarrange inter-domain distributions like confusing multiple styles. + + reference: https://arxiv.org/abs/2011.14670 + """ + + def __init__(self, normalize_feature, feature_from): + super(IntraDomainScatterLoss, self).__init__() + self.normalize_feature = normalize_feature + self.feature_from = feature_from + + def forward(self, input, batch): + domains = batch["domain"] + inputs = input[self.feature_from] + + if self.normalize_feature: + inputs = 1. * inputs / (paddle.expand_as( + paddle.norm( + inputs, p=2, axis=-1, keepdim=True), inputs) + 1e-12) + + unique_label = paddle.unique(domains) + features_per_domain = list() + for i, x in enumerate(unique_label): + features_per_domain.append(inputs[x == domains]) + num_domain = len(features_per_domain) + losses = [] + for i in range(num_domain): + features_in_same_domain = features_per_domain[i] + center = paddle.mean(features_in_same_domain, 0) + cos_sim = cosine_similarity( + center.unsqueeze(0), features_in_same_domain) + losses.append(paddle.mean(cos_sim)) + loss = paddle.mean(paddle.stack(losses)) + return {"IntraDomainScatterLoss": loss} + + +class InterDomainShuffleLoss(nn.Layer): + """ + InterDomainShuffleLoss + + pull the negative sample of the interdomain and push the negative sample of the intra-domain, + so that the inter-domain distributions are shuffled. + + reference: https://arxiv.org/abs/2011.14670 + """ + + def __init__(self, normalize_feature=True, feature_from="features"): + super(InterDomainShuffleLoss, self).__init__() + self.feature_from = feature_from + self.normalize_feature = normalize_feature + + def forward(self, input, batch): + target = batch["label"] + domains = batch["domain"] + inputs = input[self.feature_from] + bs = inputs.shape[0] + + if self.normalize_feature: + inputs = 1. * inputs / (paddle.expand_as( + paddle.norm( + inputs, p=2, axis=-1, keepdim=True), inputs) + 1e-12) + + # compute distance + dist_mat = euclidean_dist(inputs, inputs) + + is_same_img = np.zeros(shape=[bs, bs], dtype=bool) + np.fill_diagonal(is_same_img, True) + is_same_img = paddle.to_tensor(is_same_img) + is_diff_instance = target.reshape([bs, 1]).expand([bs, bs])\ + .not_equal(target.reshape([bs, 1]).expand([bs, bs]).t()) + is_same_domain = domains.reshape([bs, 1]).expand([bs, bs])\ + .equal(domains.reshape([bs, 1]).expand([bs, bs]).t()) + is_diff_domain = is_same_domain == False + + is_pos = paddle.logical_or(is_same_img, is_diff_domain) + is_neg = paddle.logical_and(is_diff_instance, is_same_domain) + + dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg) + + y = paddle.ones_like(dist_an) + loss = F.soft_margin_loss(dist_an - dist_ap, y) + if loss == float('Inf'): + loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3) + return {"InterDomainShuffleLoss": loss} + + +class CELossForMetaBIN(CELoss): + def _labelsmoothing(self, target, class_num): + if len(target.shape) == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + # epsilon is different from the one in original CELoss + epsilon = class_num / (class_num - 1) * self.epsilon + soft_target = F.label_smooth(one_hot_target, epsilon=epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def forward(self, x, batch): + label = batch["label"] + return super().forward(x, label) + + +class TripletLossForMetaBIN(nn.Layer): + def __init__(self, + margin=1, + normalize_feature=False, + feature_from="feature"): + super(TripletLossForMetaBIN, self).__init__() + self.margin = margin + self.feature_from = feature_from + self.normalize_feature = normalize_feature + + def forward(self, input, batch): + inputs = input[self.feature_from] + targets = batch["label"] + bs = inputs.shape[0] + all_targets = targets + + if self.normalize_feature: + inputs = 1. * inputs / (paddle.expand_as( + paddle.norm( + inputs, p=2, axis=-1, keepdim=True), inputs) + 1e-12) + + dist_mat = euclidean_dist(inputs, inputs) + + is_pos = all_targets.reshape([bs, 1]).expand([bs, bs]).equal( + all_targets.reshape([bs, 1]).expand([bs, bs]).t()) + is_neg = all_targets.reshape([bs, 1]).expand([bs, bs]).not_equal( + all_targets.reshape([bs, 1]).expand([bs, bs]).t()) + dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg) + + y = paddle.ones_like(dist_an) + loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self.margin) + return {"TripletLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/mgd_loss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/mgd_loss.py new file mode 100644 index 000000000..799a91431 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/mgd_loss.py @@ -0,0 +1,84 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from ppcls.utils.initializer import kaiming_normal_ + + +class MGDLoss(nn.Layer): + """Paddle version of `Masked Generative Distillation` + MGDLoss + Reference: https://arxiv.org/abs/2205.01529 + Code was heavily based on https://github.com/yzd-v/MGD + """ + + def __init__( + self, + student_channels, + teacher_channels, + alpha_mgd=1.756, + lambda_mgd=0.15, ): + super().__init__() + self.alpha_mgd = alpha_mgd + self.lambda_mgd = lambda_mgd + + if student_channels != teacher_channels: + self.align = nn.Conv2D( + student_channels, + teacher_channels, + kernel_size=1, + stride=1, + padding=0) + else: + self.align = None + + self.generation = nn.Sequential( + nn.Conv2D( + teacher_channels, teacher_channels, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2D( + teacher_channels, teacher_channels, kernel_size=3, padding=1)) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Conv2D): + kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + + def forward(self, pred_s, pred_t): + """Forward function. + Args: + pred_s(Tensor): Bs*C*H*W, student's feature map + pred_t(Tensor): Bs*C*H*W, teacher's feature map + """ + assert pred_s.shape[-2:] == pred_t.shape[-2:] + + if self.align is not None: + pred_s = self.align(pred_s) + + loss = self.get_dis_loss(pred_s, pred_t) * self.alpha_mgd + + return loss + + def get_dis_loss(self, pred_s, pred_t): + loss_mse = nn.MSELoss(reduction='mean') + N, C, _, _ = pred_t.shape + mat = paddle.rand([N, C, 1, 1]) + mat = paddle.where(mat < self.lambda_mgd, 0, 1).astype("float32") + masked_fea = paddle.multiply(pred_s, mat) + new_fea = self.generation(masked_fea) + dis_loss = loss_mse(new_fea, pred_t) + return dis_loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/msmloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/msmloss.py new file mode 100644 index 000000000..adf03ef8e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/msmloss.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle +from .comfunc import rerange_index + + +class MSMLoss(paddle.nn.Layer): + """ + paper : [Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification](https://arxiv.org/pdf/1710.00478.pdf) + code reference: https://github.com/michuanhaohao/keras_reid/blob/master/reid_tripletcls.py + Margin Sample Mining Loss, based on triplet loss. USE P * K samples. + the batch size is fixed. Batch_size = P * K; but the K may vary between batches. + same label gather together + + supported_metrics = [ + 'euclidean', + 'sqeuclidean', + 'cityblock', + ] + only consider samples_each_class = 2 + """ + + def __init__(self, batch_size=120, samples_each_class=2, margin=0.1): + super(MSMLoss, self).__init__() + self.margin = margin + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + + def forward(self, input, target=None): + #normalization + features = input["features"] + features = self._nomalize(features) + samples_each_class = self.samples_each_class + rerange_index = paddle.to_tensor(self.rerange_index) + + #calc sm + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + #rerange + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) + + #split + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[1, samples_each_class - 1, -1], + axis=1) + ignore.stop_gradient = True + + hard_pos = paddle.max(pos) + hard_neg = paddle.min(neg) + + loss = hard_pos + self.margin - hard_neg + loss = paddle.nn.ReLU()(loss) + return {"msmloss": loss} + + def _nomalize(self, input): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + return paddle.divide(input, input_norm) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/multilabelloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/multilabelloss.py new file mode 100644 index 000000000..51ae97838 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/multilabelloss.py @@ -0,0 +1,119 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +def ratio2weight(targets, ratio): + pos_weights = targets * (1. - ratio) + neg_weights = (1. - targets) * ratio + weights = paddle.exp(neg_weights + pos_weights) + + # for RAP dataloader, targets element may be 2, with or without smooth, some element must great than 1 + weights = weights - weights * (targets > 1).astype(weights.dtype) + + return weights + + +class MultiLabelLoss(nn.Layer): + """ + Multi-label loss + """ + + def __init__(self, epsilon=None, size_sum=False, weight_ratio=False): + super().__init__() + if epsilon is not None and (epsilon <= 0 or epsilon >= 1): + epsilon = None + self.epsilon = epsilon + self.weight_ratio = weight_ratio + self.size_sum = size_sum + + def _labelsmoothing(self, target, class_num): + if target.ndim == 1 or target.shape[-1] != class_num: + one_hot_target = F.one_hot(target, class_num) + else: + one_hot_target = target + soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon) + soft_target = paddle.reshape(soft_target, shape=[-1, class_num]) + return soft_target + + def _binary_crossentropy(self, input, target, class_num): + if self.weight_ratio: + target, label_ratio = target[:, 0, :], target[:, 1, :] + elif target.ndim == 3: + target = target[:, 0, :] + if self.epsilon is not None: + target = self._labelsmoothing(target, class_num) + cost = F.binary_cross_entropy_with_logits( + logit=input, label=target, reduction='none') + + if self.weight_ratio: + targets_mask = paddle.cast(target > 0.5, 'float32') + weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio)) + weight = weight * (target > -1).astype(weight.dtype) + cost = cost * weight + + if self.size_sum: + cost = cost.sum(1).mean() if self.size_sum else cost.mean() + + return cost + + def forward(self, x, target): + if isinstance(x, dict): + x = x["logits"] + class_num = x.shape[-1] + loss = self._binary_crossentropy(x, target, class_num) + loss = loss.mean() + return {"MultiLabelLoss": loss} + + +class MultiLabelAsymmetricLoss(nn.Layer): + """ + Multi-label asymmetric loss, introduced by + Emanuel Ben-Baruch at el. in https://arxiv.org/pdf/2009.14119v4.pdf. + """ + + def __init__(self, + gamma_pos=1, + gamma_neg=4, + clip=0.05, + epsilon=1e-8, + disable_focal_loss_grad=True, + reduction="sum"): + super().__init__() + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.epsilon = epsilon + self.disable_focal_loss_grad = disable_focal_loss_grad + assert reduction in ["mean", "sum", "none"] + self.reduction = reduction + + def forward(self, x, target): + if isinstance(x, dict): + x = x["logits"] + pred_sigmoid = F.sigmoid(x) + target = target.astype(pred_sigmoid.dtype) + + # Asymmetric Clipping and Basic CE calculation + if self.clip and self.clip > 0: + pt = (1 - pred_sigmoid + self.clip).clip(max=1) \ + * (1 - target) + pred_sigmoid * target + else: + pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + + # Asymmetric Focusing + if self.disable_focal_loss_grad: + paddle.set_grad_enabled(False) + asymmetric_weight = ( + 1 - pt + ).pow(self.gamma_pos * target + self.gamma_neg * (1 - target)) + if self.disable_focal_loss_grad: + paddle.set_grad_enabled(True) + + loss = -paddle.log(pt.clip(min=self.epsilon)) * asymmetric_weight + + if self.reduction == 'mean': + loss = loss.mean() + elif self.reduction == 'sum': + loss = loss.sum() + return {"MultiLabelAsymmetricLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/npairsloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/npairsloss.py new file mode 100644 index 000000000..131c799a4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/npairsloss.py @@ -0,0 +1,43 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle + + +class NpairsLoss(paddle.nn.Layer): + """Npair_loss_ + paper [Improved deep metric learning with multi-class N-pair loss objective](https://dl.acm.org/doi/10.5555/3157096.3157304) + code reference: https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/contrib/losses/metric_learning/npairs_loss + """ + + def __init__(self, reg_lambda=0.01): + super(NpairsLoss, self).__init__() + self.reg_lambda = reg_lambda + + def forward(self, input, target=None): + """ + anchor and positive(should include label) + """ + features = input["features"] + reg_lambda = self.reg_lambda + batch_size = features.shape[0] + fea_dim = features.shape[1] + num_class = batch_size // 2 + + #reshape + out_feas = paddle.reshape(features, shape=[-1, 2, fea_dim]) + anc_feas, pos_feas = paddle.split(out_feas, num_or_sections=2, axis=1) + anc_feas = paddle.squeeze(anc_feas, axis=1) + pos_feas = paddle.squeeze(pos_feas, axis=1) + + #get simi matrix + similarity_matrix = paddle.matmul( + anc_feas, pos_feas, transpose_y=True) #get similarity matrix + sparse_labels = paddle.arange(0, num_class, dtype='int64') + xentloss = paddle.nn.CrossEntropyLoss()( + similarity_matrix, sparse_labels) #by default: mean + + #l2 norm + reg = paddle.mean(paddle.sum(paddle.square(features), axis=1)) + l2loss = 0.5 * reg_lambda * reg + return {"npairsloss": xentloss + l2loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pairwisecosface.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pairwisecosface.py new file mode 100644 index 000000000..f1fc73024 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pairwisecosface.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class PairwiseCosface(nn.Layer): + """ + paper: Circle Loss: A Unified Perspective of Pair Similarity Optimization + code reference: https://github.com/leoluopy/circle-loss-demonstration/blob/main/circle_loss.py + """ + + def __init__(self, margin, gamma): + super(PairwiseCosface, self).__init__() + self.margin = margin + self.gamma = gamma + + def forward(self, embedding, targets): + if isinstance(embedding, dict): + embedding = embedding['features'] + # Normalize embedding features + embedding = F.normalize(embedding, axis=1) + dist_mat = paddle.matmul(embedding, embedding, transpose_y=True) + + N = dist_mat.shape[0] + is_pos = targets.reshape([N, 1]).expand([N, N]).equal( + paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float32') + is_neg = targets.reshape([N, 1]).expand([N, N]).not_equal( + paddle.t(targets.reshape([N, 1]).expand([N, N]))).astype('float32') + + # Mask scores related to itself + is_pos = is_pos - paddle.eye(N, N) + + s_p = dist_mat * is_pos + s_n = dist_mat * is_neg + + logit_p = -self.gamma * s_p + (-99999999.) * (1 - is_pos) + logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg + ) + + loss = F.softplus( + paddle.logsumexp( + logit_p, axis=1) + paddle.logsumexp( + logit_n, axis=1)).mean() + + return {"PairwiseCosface": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pefdloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pefdloss.py new file mode 100644 index 000000000..f16a8d5dc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/pefdloss.py @@ -0,0 +1,83 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppcls.utils.initializer import kaiming_normal_, kaiming_uniform_ + + +class Regressor(nn.Layer): + """Linear regressor""" + + def __init__(self, dim_in=1024, dim_out=1024): + super(Regressor, self).__init__() + self.conv = nn.Linear(dim_in, dim_out) + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + +class PEFDLoss(nn.Layer): + """Improved Feature Distillation via Projector Ensemble + Reference: https://arxiv.org/pdf/2210.15274.pdf + Code reference: https://github.com/chenyd7/PEFD + """ + + def __init__(self, + student_channel, + teacher_channel, + num_projectors=3, + mode="flatten"): + super().__init__() + + if num_projectors <= 0: + raise ValueError("Number of projectors must be greater than 0.") + + if mode not in ["flatten", "gap"]: + raise ValueError("Mode must be \"flatten\" or \"gap\".") + + self.mode = mode + self.projectors = nn.LayerList() + + for _ in range(num_projectors): + self.projectors.append(Regressor(student_channel, teacher_channel)) + + def forward(self, student_feature, teacher_feature): + if self.mode == "gap": + student_feature = F.adaptive_avg_pool2d(student_feature, (1, 1)) + teacher_feature = F.adaptive_avg_pool2d(teacher_feature, (1, 1)) + + student_feature = student_feature.flatten(1) + f_t = teacher_feature.flatten(1) + + q = len(self.projectors) + f_s = 0.0 + for i in range(q): + f_s += self.projectors[i](student_feature) + f_s = f_s / q + + # inner product (normalize first and inner product) + normft = f_t.pow(2).sum(1, keepdim=True).pow(1. / 2) + outft = f_t / normft + normfs = f_s.pow(2).sum(1, keepdim=True).pow(1. / 2) + outfs = f_s / normfs + + cos_theta = (outft * outfs).sum(1, keepdim=True) + loss = paddle.mean(1 - cos_theta) + + return loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/rkdloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/rkdloss.py new file mode 100644 index 000000000..aa6ae2324 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/rkdloss.py @@ -0,0 +1,99 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +def pdist(e, squared=False, eps=1e-12): + e_square = e.pow(2).sum(axis=1) + prod = paddle.mm(e, e.t()) + res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clip( + min=eps) + + if not squared: + res = res.sqrt() + return res + + +class RKdAngle(nn.Layer): + # paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG) + # reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py + def __init__(self, target_size=None): + super().__init__() + if target_size is not None: + self.avgpool = paddle.nn.AdaptiveAvgPool2D(target_size) + else: + self.avgpool = None + + def forward(self, student, teacher): + # GAP to reduce memory + if self.avgpool is not None: + # NxC1xH1xW1 -> NxC1x1x1 + student = self.avgpool(student) + # NxC2xH2xW2 -> NxC2x1x1 + teacher = self.avgpool(teacher) + + # reshape for feature map distillation + bs = student.shape[0] + student = student.reshape([bs, -1]) + teacher = teacher.reshape([bs, -1]) + + td = (teacher.unsqueeze(0) - teacher.unsqueeze(1)) + norm_td = F.normalize(td, p=2, axis=2) + t_angle = paddle.bmm(norm_td, norm_td.transpose([0, 2, 1])).reshape( + [-1, 1]) + + sd = (student.unsqueeze(0) - student.unsqueeze(1)) + norm_sd = F.normalize(sd, p=2, axis=2) + s_angle = paddle.bmm(norm_sd, norm_sd.transpose([0, 2, 1])).reshape( + [-1, 1]) + loss = F.smooth_l1_loss(s_angle, t_angle, reduction='mean') + return loss + + +class RkdDistance(nn.Layer): + # paper : [Relational Knowledge Distillation](https://arxiv.org/abs/1904.05068?context=cs.LG) + # reference: https://github.com/lenscloth/RKD/blob/master/metric/loss.py + def __init__(self, eps=1e-12, target_size=1): + super().__init__() + self.eps = eps + if target_size is not None: + self.avgpool = paddle.nn.AdaptiveAvgPool2D(target_size) + else: + self.avgpool = None + + def forward(self, student, teacher): + # GAP to reduce memory + if self.avgpool is not None: + # NxC1xH1xW1 -> NxC1x1x1 + student = self.avgpool(student) + # NxC2xH2xW2 -> NxC2x1x1 + teacher = self.avgpool(teacher) + + bs = student.shape[0] + student = student.reshape([bs, -1]) + teacher = teacher.reshape([bs, -1]) + + t_d = pdist(teacher, squared=False) + mean_td = t_d.mean() + t_d = t_d / (mean_td + self.eps) + + d = pdist(student, squared=False) + mean_d = d.mean() + d = d / (mean_d + self.eps) + + loss = F.smooth_l1_loss(d, t_d, reduction="mean") + return loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/skdloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/skdloss.py new file mode 100644 index 000000000..fe8e8e14d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/skdloss.py @@ -0,0 +1,72 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class SKDLoss(nn.Layer): + """ + Spherical Knowledge Distillation + paper: https://arxiv.org/pdf/2010.07485.pdf + code reference: https://github.com/forjiuzhou/Spherical-Knowledge-Distillation + """ + + def __init__(self, + temperature, + multiplier=2.0, + alpha=0.9, + use_target_as_gt=False): + super().__init__() + self.temperature = temperature + self.multiplier = multiplier + self.alpha = alpha + self.use_target_as_gt = use_target_as_gt + + def forward(self, logits_student, logits_teacher, target=None): + """Compute Spherical Knowledge Distillation loss. + Args: + logits_student: student's logits with shape (batch_size, num_classes) + logits_teacher: teacher's logits with shape (batch_size, num_classes) + """ + if target is None or self.use_target_as_gt: + target = logits_teacher.argmax(axis=-1) + + target = F.one_hot( + target.reshape([-1]), num_classes=logits_student[0].shape[0]) + + logits_student = F.layer_norm( + logits_student, + logits_student.shape[1:], + weight=None, + bias=None, + epsilon=1e-7) * self.multiplier + logits_teacher = F.layer_norm( + logits_teacher, + logits_teacher.shape[1:], + weight=None, + bias=None, + epsilon=1e-7) * self.multiplier + + kd_loss = -paddle.sum(F.softmax(logits_teacher / self.temperature) * + F.log_softmax(logits_student / self.temperature), + axis=1) + + kd_loss = paddle.mean(kd_loss) * self.temperature**2 + + ce_loss = paddle.mean(-paddle.sum( + target * F.log_softmax(logits_student), axis=1)) + + return kd_loss * self.alpha + ce_loss * (1 - self.alpha) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softsuploss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softsuploss.py new file mode 100644 index 000000000..b1389a0ba --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softsuploss.py @@ -0,0 +1,75 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn + +class SoftSupConLoss(nn.Layer): + """ + Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf. + It also supports the unsupervised contrastive loss in SimCLR + """ + def __init__(self, temperature=0.07, contrast_mode='all', base_temperature=0.07): + super(SoftSupConLoss, self).__init__() + self.temperature = temperature + self.contrast_mode = contrast_mode + self.base_temperature = base_temperature + + def __call__(self, feat, batch, max_probs=None, labels=None, mask=None, reduction="mean", select_matrix=None): + """Compute loss for model. If both `labels` and `mask` are None, + it degenerates to SimCLR unsupervised loss: + https://arxiv.org/pdf/2002.05709.pdf + + Args: + feat: hidden vector of shape [batch_size, n_views, ...]. + labels: ground truth of shape [batch_size]. + mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j + has the same class as sample i. Can be asymmetric. + Returns: + A loss scalar. + """ + max_probs = batch['max_probs'] + labels = batch['p_targets_u_w'] + # reduction = batch['reduction'] + batch_size = feat.shape[0] + if labels is not None: + labels = labels.reshape((-1, 1)) + mask = paddle.equal(labels, labels.T).astype('float32') + max_probs = max_probs.reshape((-1, 1)) + score_mask = paddle.matmul(max_probs, max_probs.T) + mask = paddle.multiply(mask, score_mask) + + contrast_count = feat.shape[1] + contrast_feat = paddle.concat(paddle.unbind(feat, axis=1), axis=0) # (2n, d) + if self.contrast_mode == 'all': + anchor_feat = contrast_feat + anchor_count = contrast_count + anchor_dot_contrast = paddle.matmul(anchor_feat, contrast_feat.T) / self.temperature + logits_max = anchor_dot_contrast.max(axis=1, keepdim=True) + logits = anchor_dot_contrast - logits_max.detach() + mask = paddle.concat([mask, mask], axis=0) + mask = paddle.concat([mask, mask], axis=1) + + logits_mask = 1 - paddle.eye(batch_size * contrast_count, dtype=paddle.float64) + mask = mask * logits_mask + exp_logits = paddle.exp(logits) * logits_mask + log_prob = logits - paddle.log(exp_logits.sum(axis=1, keepdim=True)) + + mean_log_prob_pos = (mask * log_prob).sum(axis=1) / mask.sum(axis=1) + loss = -(self.temperature / self.base_temperature) * mean_log_prob_pos + loss = loss.reshape((anchor_count, batch_size)) + if reduction == 'mean': + loss = loss.mean() + + return {"SoftSupConLoss": loss} \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softtargetceloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softtargetceloss.py new file mode 100644 index 000000000..351db50e3 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/softtargetceloss.py @@ -0,0 +1,16 @@ +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class SoftTargetCrossEntropy(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, x, target): + loss = paddle.sum(-target * F.log_softmax(x, axis=-1), axis=-1) + loss = loss.mean() + return {"SoftTargetCELoss": loss} + + def __str__(self, ): + return type(self).__name__ diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/supconloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/supconloss.py new file mode 100644 index 000000000..753ceaf41 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/supconloss.py @@ -0,0 +1,109 @@ +import paddle +from paddle import nn + + +class SupConLoss(nn.Layer): + """Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf. + code reference: https://github.com/HobbitLong/SupContrast/blob/master/losses.py + It also supports the unsupervised contrastive loss in SimCLR""" + + def __init__(self, + views=16, + temperature=0.07, + contrast_mode='all', + base_temperature=0.07, + normalize_feature=True): + super(SupConLoss, self).__init__() + self.temperature = paddle.to_tensor(temperature) + self.contrast_mode = contrast_mode + self.base_temperature = paddle.to_tensor(base_temperature) + self.num_ids = None + self.views = views + self.normalize_feature = normalize_feature + + def forward(self, features, labels, mask=None): + """Compute loss for model. If both `labels` and `mask` are None, + it degenerates to SimCLR unsupervised loss: + https://arxiv.org/pdf/2002.05709.pdf + Args: + features: hidden vector of shape [bsz, n_views, ...]. + labels: ground truth of shape [bsz]. + mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j + has the same class as sample i. Can be asymmetric. + Returns: + A loss scalar. + """ + features = features["features"] + if self.num_ids is None: + self.num_ids = int(features.shape[0] / self.views) + + if self.normalize_feature: + features = 1. * features / (paddle.expand_as( + paddle.norm( + features, p=2, axis=-1, keepdim=True), features) + 1e-12) + features = features.reshape([self.num_ids, self.views, -1]) + labels = labels.reshape([self.num_ids, self.views])[:, 0] + + if len(features.shape) < 3: + raise ValueError('`features` needs to be [bsz, n_views, ...],' + 'at least 3 dimensions are required') + if len(features.shape) > 3: + features = features.reshape( + [features.shape[0], features.shape[1], -1]) + + batch_size = features.shape[0] + if labels is not None and mask is not None: + raise ValueError('Cannot define both `labels` and `mask`') + elif labels is None and mask is None: + mask = paddle.eye(batch_size, dtype='float32') + elif labels is not None: + labels = labels.reshape([-1, 1]) + if labels.shape[0] != batch_size: + raise ValueError( + 'Num of labels does not match num of features') + mask = paddle.cast( + paddle.equal(labels, paddle.t(labels)), 'float32') + else: + mask = paddle.cast(mask, 'float32') + + contrast_count = features.shape[1] + contrast_feature = paddle.concat( + paddle.unbind( + features, axis=1), axis=0) + if self.contrast_mode == 'one': + anchor_feature = features[:, 0] + anchor_count = 1 + elif self.contrast_mode == 'all': + anchor_feature = contrast_feature + anchor_count = contrast_count + else: + raise ValueError('Unknown mode: {}'.format(self.contrast_mode)) + + # compute logits + anchor_dot_contrast = paddle.divide( + paddle.matmul(anchor_feature, paddle.t(contrast_feature)), + self.temperature) + # for numerical stability + logits_max = paddle.max(anchor_dot_contrast, axis=1, keepdim=True) + logits = anchor_dot_contrast - logits_max.detach() + + # tile mask + mask = paddle.tile(mask, [anchor_count, contrast_count]) + + logits_mask = 1 - paddle.eye(batch_size * anchor_count) + mask = mask * logits_mask + + # compute log_prob + exp_logits = paddle.exp(logits) * logits_mask + log_prob = logits - paddle.log( + paddle.sum(exp_logits, axis=1, keepdim=True)) + + # compute mean of log-likelihood over positive + mean_log_prob_pos = paddle.sum((mask * log_prob), + axis=1) / paddle.sum(mask, axis=1) + + # loss + loss = -(self.temperature / self.base_temperature) * mean_log_prob_pos + loss = paddle.mean(loss.reshape([anchor_count, batch_size])) + + return {"SupConLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/trihardloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/trihardloss.py new file mode 100644 index 000000000..96cb42cb4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/trihardloss.py @@ -0,0 +1,84 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from .comfunc import rerange_index + + +class TriHardLoss(paddle.nn.Layer): + """ + paper: In Defense of the Triplet Loss for Person Re-Identification + code reference: https://github.com/VisualComputingInstitute/triplet-reid/blob/master/loss.py + TriHard Loss, based on triplet loss. USE P * K samples. + the batch size is fixed. Batch_size = P * K; but the K may vary between batches. + same label gather together + + supported_metrics = [ + 'euclidean', + 'sqeuclidean', + 'cityblock', + ] + only consider samples_each_class = 2 + """ + + def __init__(self, batch_size=120, samples_each_class=2, margin=0.1): + super(TriHardLoss, self).__init__() + self.margin = margin + self.samples_each_class = samples_each_class + self.batch_size = batch_size + self.rerange_index = rerange_index(batch_size, samples_each_class) + + def forward(self, input, target=None): + features = input["features"] + assert (self.batch_size == features.shape[0]) + + #normalization + features = self._nomalize(features) + samples_each_class = self.samples_each_class + rerange_index = paddle.to_tensor(self.rerange_index) + + #calc sm + diffs = paddle.unsqueeze( + features, axis=1) - paddle.unsqueeze( + features, axis=0) + similary_matrix = paddle.sum(paddle.square(diffs), axis=-1) + + #rerange + tmp = paddle.reshape(similary_matrix, shape=[-1, 1]) + tmp = paddle.gather(tmp, index=rerange_index) + similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size]) + + #split + ignore, pos, neg = paddle.split( + similary_matrix, + num_or_sections=[1, samples_each_class - 1, -1], + axis=1) + + ignore.stop_gradient = True + hard_pos = paddle.max(pos, axis=1) + hard_neg = paddle.min(neg, axis=1) + + loss = hard_pos + self.margin - hard_neg + loss = paddle.nn.ReLU()(loss) + loss = paddle.mean(loss) + return {"trihardloss": loss} + + def _nomalize(self, input): + input_norm = paddle.sqrt( + paddle.sum(paddle.square(input), axis=1, keepdim=True)) + return paddle.divide(input, input_norm) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/triplet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/triplet.py new file mode 100644 index 000000000..0da7cc5df --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/triplet.py @@ -0,0 +1,157 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn + + +class TripletLossV2(nn.Layer): + """Triplet loss with hard positive/negative mining. + paper : [Facenet: A unified embedding for face recognition and clustering](https://arxiv.org/pdf/1503.03832.pdf) + code reference: https://github.com/okzhili/Cartoon-face-recognition/blob/master/loss/triplet_loss.py + Args: + margin (float): margin for triplet. + """ + + def __init__(self, + margin=0.5, + normalize_feature=True, + feature_from="features"): + super(TripletLossV2, self).__init__() + self.margin = margin + self.feature_from = feature_from + self.ranking_loss = paddle.nn.loss.MarginRankingLoss(margin=margin) + self.normalize_feature = normalize_feature + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (num_classes) + """ + inputs = input[self.feature_from] + + if self.normalize_feature: + inputs = 1. * inputs / (paddle.expand_as( + paddle.norm( + inputs, p=2, axis=-1, keepdim=True), inputs) + 1e-12) + + bs = inputs.shape[0] + + # compute distance + dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs]) + dist = dist + dist.t() + dist = paddle.addmm( + input=dist, x=inputs, y=inputs.t(), alpha=-2.0, beta=1.0) + dist = paddle.clip(dist, min=1e-12).sqrt() + + # hard negative mining + is_pos = paddle.expand(target, ( + bs, bs)).equal(paddle.expand(target, (bs, bs)).t()) + is_neg = paddle.expand(target, ( + bs, bs)).not_equal(paddle.expand(target, (bs, bs)).t()) + + # `dist_ap` means distance(anchor, positive) + ## both `dist_ap` and `relative_p_inds` with shape [N, 1] + ''' + dist_ap, relative_p_inds = paddle.max( + paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True) + # `dist_an` means distance(anchor, negative) + # both `dist_an` and `relative_n_inds` with shape [N, 1] + dist_an, relative_n_inds = paddle.min( + paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True) + ''' + dist_ap = paddle.max(paddle.reshape( + paddle.masked_select(dist, is_pos), (bs, -1)), + axis=1, + keepdim=True) + # `dist_an` means distance(anchor, negative) + # both `dist_an` and `relative_n_inds` with shape [N, 1] + dist_an = paddle.min(paddle.reshape( + paddle.masked_select(dist, is_neg), (bs, -1)), + axis=1, + keepdim=True) + # shape [N] + dist_ap = paddle.squeeze(dist_ap, axis=1) + dist_an = paddle.squeeze(dist_an, axis=1) + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_an, dist_ap, y) + return {"TripletLossV2": loss} + + +class TripletLoss(nn.Layer): + """Triplet loss with hard positive/negative mining. + Reference: + Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737. + Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py. + Args: + margin (float): margin for triplet. + """ + + def __init__(self, margin=1.0): + super(TripletLoss, self).__init__() + self.margin = margin + self.ranking_loss = paddle.nn.loss.MarginRankingLoss(margin=margin) + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (num_classes) + """ + inputs = input["features"] + + bs = inputs.shape[0] + # Compute pairwise distance, replace by the official when merged + dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs]) + dist = dist + dist.t() + dist = paddle.addmm( + input=dist, x=inputs, y=inputs.t(), alpha=-2.0, beta=1.0) + dist = paddle.clip(dist, min=1e-12).sqrt() + + mask = paddle.equal( + target.expand([bs, bs]), target.expand([bs, bs]).t()) + mask_numpy_idx = mask.numpy() + dist_ap, dist_an = [], [] + for i in range(bs): + # dist_ap_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i]].max(),dtype='float64').unsqueeze(0) + # dist_ap_i.stop_gradient = False + # dist_ap.append(dist_ap_i) + dist_ap.append( + max([ + dist[i][j] if mask_numpy_idx[i][j] == True else float( + "-inf") for j in range(bs) + ]).unsqueeze(0)) + # dist_an_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i] == False].min(), dtype='float64').unsqueeze(0) + # dist_an_i.stop_gradient = False + # dist_an.append(dist_an_i) + dist_an.append( + min([ + dist[i][k] if mask_numpy_idx[i][k] == False else float( + "inf") for k in range(bs) + ]).unsqueeze(0)) + + dist_ap = paddle.concat(dist_ap, axis=0) + dist_an = paddle.concat(dist_an, axis=0) + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_an, dist_ap, y) + return {"TripletLoss": loss} diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/tripletangularmarginloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/tripletangularmarginloss.py new file mode 100644 index 000000000..df03489f2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/tripletangularmarginloss.py @@ -0,0 +1,241 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn +from ppcls.loss.xbm import CrossBatchMemory + + +class TripletAngularMarginLoss(nn.Layer): + """A more robust triplet loss with hard positive/negative mining on angular margin instead of relative distance between d(a,p) and d(a,n). + + Args: + margin (float, optional): angular margin. Defaults to 0.5. + normalize_feature (bool, optional): whether to apply L2-norm in feature before computing distance(cos-similarity). Defaults to True. + reduction (str, optional): reducing option within an batch . Defaults to "mean". + add_absolute (bool, optional): whether add absolute loss within d(a,p) or d(a,n). Defaults to False. + absolute_loss_weight (float, optional): weight for absolute loss. Defaults to 1.0. + ap_value (float, optional): weight for d(a, p). Defaults to 0.9. + an_value (float, optional): weight for d(a, n). Defaults to 0.5. + feature_from (str, optional): which key feature from. Defaults to "features". + """ + + def __init__(self, + margin=0.5, + normalize_feature=True, + reduction="mean", + add_absolute=False, + absolute_loss_weight=1.0, + ap_value=0.9, + an_value=0.5, + feature_from="features"): + super(TripletAngularMarginLoss, self).__init__() + self.margin = margin + self.feature_from = feature_from + self.ranking_loss = paddle.nn.loss.MarginRankingLoss( + margin=margin, reduction=reduction) + self.normalize_feature = normalize_feature + self.add_absolute = add_absolute + self.ap_value = ap_value + self.an_value = an_value + self.absolute_loss_weight = absolute_loss_weight + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (batch_size) + """ + inputs = input[self.feature_from] + + if self.normalize_feature: + inputs = paddle.divide( + inputs, paddle.norm( + inputs, p=2, axis=-1, keepdim=True)) + + bs = inputs.shape[0] + + # compute distance(cos-similarity) + dist = paddle.matmul(inputs, inputs.t()) + + # hard negative mining + is_pos = paddle.expand(target, ( + bs, bs)).equal(paddle.expand(target, (bs, bs)).t()) + is_neg = paddle.expand(target, ( + bs, bs)).not_equal(paddle.expand(target, (bs, bs)).t()) + + # `dist_ap` means distance(anchor, positive) + # both `dist_ap` and `relative_p_inds` with shape [N, 1] + dist_ap = paddle.min(paddle.reshape( + paddle.masked_select(dist, is_pos), (bs, -1)), + axis=1, + keepdim=True) + # `dist_an` means distance(anchor, negative) + # both `dist_an` and `relative_n_inds` with shape [N, 1] + dist_an = paddle.max(paddle.reshape( + paddle.masked_select(dist, is_neg), (bs, -1)), + axis=1, + keepdim=True) + # shape [N] + dist_ap = paddle.squeeze(dist_ap, axis=1) + dist_an = paddle.squeeze(dist_an, axis=1) + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_ap, dist_an, y) + + if self.add_absolute: + absolut_loss_ap = self.ap_value - dist_ap + absolut_loss_ap = paddle.where(absolut_loss_ap > 0, + absolut_loss_ap, + paddle.zeros_like(absolut_loss_ap)) + + absolut_loss_an = dist_an - self.an_value + absolut_loss_an = paddle.where(absolut_loss_an > 0, + absolut_loss_an, + paddle.ones_like(absolut_loss_an)) + + loss = (absolut_loss_an.mean() + absolut_loss_ap.mean() + ) * self.absolute_loss_weight + loss.mean() + + return {"TripletAngularMarginLoss": loss} + + +class TripletAngularMarginLoss_XBM(TripletAngularMarginLoss): + """TripletAngularMarginLoss combined with CrossBatchMemory + + Args: + start_iter: (int): from which step CrossBatchMemory is enabled + xbm_size: (int): Size of CrossBatchMemory + xbm_weight: (float): Weight of CrossBatchMemory loss + feat_dim: (int): Channels of features in CrossBatchMemory + margin (float, optional): angular margin. Defaults to 0.5. + normalize_feature (bool, optional): whether to apply L2-norm in feature before computing distance(cos-similarity). Defaults to True. + reduction (str, optional): reducing option within an batch . Defaults to "mean". + add_absolute (bool, optional): whether add absolute loss within d(a,p) or d(a,n). Defaults to False. + absolute_loss_weight (float, optional): weight for absolute loss. Defaults to 1.0. + ap_value (float, optional): weight for d(a, p). Defaults to 0.9. + an_value (float, optional): weight for d(a, n). Defaults to 0.5. + feature_from (str, optional): which key feature from. Defaults to "features". + """ + + def __init__(self, + start_iter: int, + xbm_size: int, + xbm_weight: float, + feat_dim: int, + margin=0.5, + normalize_feature=True, + reduction="mean", + add_absolute=False, + absolute_loss_weight=1.0, + ap_value=0.9, + an_value=0.5, + feature_from="features"): + super(TripletAngularMarginLoss_XBM, self).__init__( + margin, normalize_feature, reduction, add_absolute, + absolute_loss_weight, ap_value, an_value, feature_from) + self.start_iter = start_iter + self.xbm = CrossBatchMemory(xbm_size, feat_dim) + self.xbm_weight = xbm_weight + self.inf = 10 # 10 is big enough as inf for cos-similarity + self.register_buffer("iter", paddle.to_tensor(0, dtype="int64")) + + def forward(self, input, target): + """ + Args: + inputs: feature matrix with shape (batch_size, feat_dim) + target: ground truth labels with shape (batch_size) + """ + feats = input[self.feature_from] + if self.normalize_feature: + feats = nn.functional.normalize(feats, p=2, axis=1) + + labels = target + if labels.ndim >= 2 and labels.shape[-1] == 1: + labels = paddle.squeeze(labels, axis=[-1]) + + loss = self._compute_loss(feats, labels, feats, labels) + + # XBM loss below + self.iter += 1 + if self.iter.item() > self.start_iter: + self.xbm.enqueue_dequeue(feats.detach(), labels.detach()) + xbm_feats, xbm_labels = self.xbm.get() + xbm_loss = self._compute_loss(feats, labels, xbm_feats, xbm_labels) + loss = loss + self.xbm_weight * xbm_loss + + return {"TripletAngularMarginLoss_XBM": loss} + + def _masked_max(self, tensor, mask, axis): + masked = paddle.multiply(tensor, mask.astype(tensor.dtype)) + neg_inf = paddle.zeros_like(tensor) + neg_inf.stop_gradient = True + neg_inf[paddle.logical_not(mask)] = -self.inf + return paddle.max(masked + neg_inf, axis=axis, keepdim=True) + + def _masked_min(self, tensor, mask, axis): + masked = paddle.multiply(tensor, mask.astype(tensor.dtype)) + pos_inf = paddle.zeros_like(tensor) + pos_inf.stop_gradient = True + pos_inf[paddle.logical_not(mask)] = self.inf + return paddle.min(masked + pos_inf, axis=axis, keepdim=True) + + def _compute_loss(self, + inputs_q: paddle.Tensor, + targets_q: paddle.Tensor, + inputs_k: paddle.Tensor, + targets_k: paddle.Tensor) -> paddle.Tensor: + Q = inputs_q.shape[0] + K = inputs_k.shape[0] + + # compute distance(cos-similarity) + dist = paddle.matmul(inputs_q, inputs_k.t()) # [Q, K] + + # hard negative mining + is_pos = paddle.expand(paddle.unsqueeze(targets_q, 1), (Q, K)).equal( + paddle.expand(paddle.unsqueeze(targets_k, 1), + (K, Q)).t()) # [Q, K] + is_neg = paddle.expand(paddle.unsqueeze(targets_q, 1), + (Q, K)).not_equal( + paddle.expand( + paddle.unsqueeze(targets_k, 1), + (K, Q)).t()) # [Q, K] + + dist_ap = self._masked_min(dist, is_pos, axis=1) # [Q, ] + dist_an = self._masked_max(dist, is_neg, axis=1) # [Q, ] + + # Compute ranking hinge loss + y = paddle.ones_like(dist_an) + loss = self.ranking_loss(dist_ap, dist_an, y) + + if self.add_absolute: + absolut_loss_ap = self.ap_value - dist_ap + absolut_loss_ap = paddle.where(absolut_loss_ap > 0, + absolut_loss_ap, + paddle.zeros_like(absolut_loss_ap)) + + absolut_loss_an = dist_an - self.an_value + absolut_loss_an = paddle.where(absolut_loss_an > 0, + absolut_loss_an, + paddle.ones_like(absolut_loss_an)) + + loss = (absolut_loss_an.mean() + absolut_loss_ap.mean() + ) * self.absolute_loss_weight + loss.mean() + + return loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/wslloss.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/wslloss.py new file mode 100644 index 000000000..8bdfaf8cc --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/wslloss.py @@ -0,0 +1,66 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +class WSLLoss(nn.Layer): + """ + Weighted Soft Labels Loss + paper: https://arxiv.org/pdf/2102.00650.pdf + code reference: https://github.com/bellymonster/Weighted-Soft-Label-Distillation + """ + + def __init__(self, temperature=2.0, use_target_as_gt=False): + super().__init__() + self.temperature = temperature + self.use_target_as_gt = use_target_as_gt + + def forward(self, logits_student, logits_teacher, target=None): + """Compute weighted soft labels loss. + Args: + logits_student: student's logits with shape (batch_size, num_classes) + logits_teacher: teacher's logits with shape (batch_size, num_classes) + target: ground truth labels with shape (batch_size) + """ + if target is None or self.use_target_as_gt: + target = logits_teacher.argmax(axis=-1) + + target = F.one_hot( + target.reshape([-1]), num_classes=logits_student[0].shape[0]) + + s_input_for_softmax = logits_student / self.temperature + t_input_for_softmax = logits_teacher / self.temperature + + ce_loss_s = -paddle.sum(target * + F.log_softmax(logits_student.detach()), + axis=1) + ce_loss_t = -paddle.sum(target * + F.log_softmax(logits_teacher.detach()), + axis=1) + + ratio = ce_loss_s / (ce_loss_t + 1e-7) + ratio = paddle.maximum(ratio, paddle.zeros_like(ratio)) + + kd_loss = -paddle.sum(F.softmax(t_input_for_softmax) * + F.log_softmax(s_input_for_softmax), + axis=1) + weight = 1 - paddle.exp(-ratio) + + weighted_kd_loss = (self.temperature**2) * paddle.mean(kd_loss * + weight) + + return weighted_kd_loss diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/xbm.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/xbm.py new file mode 100644 index 000000000..d63583e44 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/loss/xbm.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from typing import Tuple + +import paddle + + +class CrossBatchMemory(paddle.nn.Layer): + """ + CrossBatchMemory Implementation. refer to "Cross-Batch Memory for Embedding Learning". + + code heavily based on https://github.com/msight-tech/research-xbm/blob/master/ret_benchmark/modeling/xbm.py + + Args: + size (int): Size of memory bank + embedding_size (int): number of embedding dimension for memory bank + """ + + def __init__(self, size: int, embedding_size: int): + super().__init__() + self.size = size + self.embedding_size = embedding_size + + # initialize and register feature queue for resume training + feats = paddle.zeros([self.size, self.embedding_size]) + self.register_buffer("feats", feats) + + # initialize and register label queue for resume training + targets = paddle.zeros([self.size, ], dtype="int64") + self.register_buffer("targets", targets) + + self.ptr = 0 + # self.accumulated_size = 0 + + @property + def _is_full(self) -> bool: + # return self.accumulated_size >= self.size + return self.targets[-1].item() != 0 # author's usage + + def get(self) -> Tuple[paddle.Tensor, paddle.Tensor]: + """return features and targets in memory bank + + Returns: + Tuple[paddle.Tensor, paddle.Tensor]: [features, targets] + """ + if self._is_full: + return self.feats, self.targets + else: + return self.feats[:self.ptr], self.targets[:self.ptr] + + def enqueue_dequeue(self, feats: paddle.Tensor, + targets: paddle.Tensor) -> None: + """put newest feats and targets into memory bank and pop oldest feats and targets from momory bank + + Args: + feats (paddle.Tensor): features to enque + targets (paddle.Tensor): targets to enque + """ + input_size = len(targets) + if self.ptr + input_size > self.size: + self.feats[-input_size:] = feats + self.targets[-input_size:] = targets + self.ptr = 0 + else: + self.feats[self.ptr:self.ptr + input_size] = feats + self.targets[self.ptr:self.ptr + input_size] = targets + self.ptr += input_size + # self.accumulated_size += input_size + + def forward(self, *kargs, **kwargs): + raise NotImplementedError( + "CrossBatchMemory module is for memory-bank, forward method is not needed" + ) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/__init__.py new file mode 100644 index 000000000..fc720b349 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/__init__.py @@ -0,0 +1,72 @@ +#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import copy +from collections import OrderedDict + +from .avg_metrics import AvgMetrics +from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk +from .metrics import DistillationTopkAcc +from .metrics import GoogLeNetTopkAcc +from .metrics import HammingDistance, AccuracyScore +from .metrics import ATTRMetric +from .metrics import TprAtFpr, MultilabelMeanAccuracy +from .metrics import MultiLabelMAP +from .face_metrics import FaceAccuracy, FaceAccOnFiveDatasets + + +class CombinedMetrics(AvgMetrics): + def __init__(self, config_list): + super().__init__() + self.metric_func_list = [] + assert isinstance(config_list, list), ( + 'operator config should be a list') + for config in config_list: + assert isinstance(config, + dict) and len(config) == 1, "yaml format error" + metric_name = list(config)[0] + metric_params = config[metric_name] + if metric_params is not None: + self.metric_func_list.append( + eval(metric_name)(**metric_params)) + else: + self.metric_func_list.append(eval(metric_name)()) + self.reset() + + def forward(self, *args, **kwargs): + metric_dict = OrderedDict() + for idx, metric_func in enumerate(self.metric_func_list): + metric_dict.update(metric_func(*args, **kwargs)) + return metric_dict + + @property + def avg_info(self): + return ", ".join([metric.avg_info for metric in self.metric_func_list]) + + @property + def avg(self): + return self.metric_func_list[0].avg + + def attr_res(self): + return self.metric_func_list[0].attrmeter.res() + + def reset(self): + for metric in self.metric_func_list: + if hasattr(metric, "reset"): + metric.reset() + + +def build_metrics(config): + metrics_list = CombinedMetrics(copy.deepcopy(config)) + return metrics_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/avg_metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/avg_metrics.py new file mode 100644 index 000000000..6f4b62290 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/avg_metrics.py @@ -0,0 +1,20 @@ +from paddle import nn + + +class AvgMetrics(nn.Layer): + def __init__(self): + super().__init__() + self.avg_meters = {} + + def reset(self): + self.avg_meters = {} + + @property + def avg(self): + if self.avg_meters: + for metric_key in self.avg_meters: + return self.avg_meters[metric_key].avg + + @property + def avg_info(self): + return ", ".join([self.avg_meters[key].avg_info for key in self.avg_meters]) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/face_metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/face_metrics.py new file mode 100644 index 000000000..cad9553c8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/face_metrics.py @@ -0,0 +1,201 @@ +# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cmath import nan +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from sklearn.model_selection import KFold +from sklearn.decomposition import PCA +from sklearn.preprocessing import normalize + +from ppcls.utils import logger + + +class FaceAccuracy(nn.Layer): + """ + This code is modified from https://github.com/deepinsight/insightface/blob/master/recognition/arcface_torch/eval/verification.py + """ + def __init__(self): + super().__init__() + self.embedding_left_list = [] + self.embedding_right_list = [] + self.label_list = [] + self.best_acc = 0. + + def forward(self, embeddings_left, embeddings_right, labels, *args): + assert len(embeddings_left) == len(embeddings_right) == len(labels) + self.embedding_left_list.append(normalize(embeddings_left.numpy())) + self.embedding_right_list.append(normalize(embeddings_right.numpy())) + self.label_list.append(labels.numpy()) + + return {} + + def reset(self): + self.embedding_left_list = [] + self.embedding_right_list = [] + self.label_list = [] + self.best_acc = 0. + + @property + def avg(self): + return self.best_acc + + @property + def avg_info(self): + embeddings_left = np.concatenate(self.embedding_left_list) + embeddings_right = np.concatenate(self.embedding_right_list) + labels = np.concatenate(self.label_list) + num_samples = len(embeddings_left) + + thresholds = np.arange(0, 4, 0.01) + _, _, accuracy, best_thresholds = self.calculate_roc(thresholds, + embeddings_left, + embeddings_right, + labels) + self.best_acc = accuracy.mean() + return "best_threshold: {:.4f}, acc: {:.4f}, num_samples: {}".format( + best_thresholds.mean(), accuracy.mean(), num_samples) + + @staticmethod + def calculate_roc(thresholds, + embeddings1, + embeddings2, + actual_issame, + nrof_folds=10, + pca=0): + assert (embeddings1.shape[0] == embeddings2.shape[0]) + assert (embeddings1.shape[1] == embeddings2.shape[1]) + nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) + nrof_thresholds = len(thresholds) + k_fold = KFold(n_splits=nrof_folds, shuffle=False) + + tprs = np.zeros((nrof_folds, nrof_thresholds)) + fprs = np.zeros((nrof_folds, nrof_thresholds)) + accuracy = np.zeros((nrof_folds)) + best_thresholds = np.zeros((nrof_folds)) + indices = np.arange(nrof_pairs) + # print('pca', pca) + dist = None + + if pca == 0: + diff = np.subtract(embeddings1, embeddings2) + dist = np.sum(np.square(diff), 1) + + for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): + if pca > 0: + print('doing pca on', fold_idx) + embed1_train = embeddings1[train_set] + embed2_train = embeddings2[train_set] + _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) + pca_model = PCA(n_components=pca) + pca_model.fit(_embed_train) + embed1 = pca_model.transform(embeddings1) + embed2 = pca_model.transform(embeddings2) + embed1 = normalize(embed1) + embed2 = normalize(embed2) + diff = np.subtract(embed1, embed2) + dist = np.sum(np.square(diff), 1) + + # Find the best threshold for the fold + acc_train = np.zeros((nrof_thresholds)) + for threshold_idx, threshold in enumerate(thresholds): + _, _, acc_train[threshold_idx] = FaceAccuracy.calculate_accuracy( + threshold, dist[train_set], actual_issame[train_set]) + best_threshold_index = np.argmax(acc_train) + best_thresholds[fold_idx] = thresholds[best_threshold_index] + for threshold_idx, threshold in enumerate(thresholds): + tprs[fold_idx, threshold_idx], fprs[ + fold_idx, threshold_idx], _ = FaceAccuracy.calculate_accuracy( + threshold, dist[test_set], actual_issame[test_set]) + _, _, accuracy[fold_idx] = FaceAccuracy.calculate_accuracy( + thresholds[best_threshold_index], dist[test_set], + actual_issame[test_set]) + + tpr = np.mean(tprs, 0) + fpr = np.mean(fprs, 0) + return tpr, fpr, accuracy, best_thresholds + + + @staticmethod + def calculate_accuracy(threshold, dist, actual_issame): + predict_issame = np.less(dist, threshold) + tp = np.sum(np.logical_and(predict_issame, actual_issame)) + fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) + tn = np.sum( + np.logical_and( + np.logical_not(predict_issame), np.logical_not(actual_issame))) + fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) + + tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) + fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) + acc = float(tp + tn) / dist.size + return tpr, fpr, acc + + +class FaceAccOnFiveDatasets(FaceAccuracy): + dataname_to_idx = { + "agedb_30": 0, + "cfp_fp": 1, + "lfw": 2, + "cplfw": 3, + "calfw": 4 + } + idx_to_dataname = {v: k for k, v in dataname_to_idx.items()} + + def __init__(self): + super().__init__() + self.dataname_idx_list = [] + + def forward(self, embeddings_left, embeddings_right, labels, + dataname_idxs, *args): + assert len(embeddings_left) == len(dataname_idxs) + dataname_idxs = dataname_idxs.astype('int64').numpy() + self.dataname_idx_list.append(dataname_idxs) + + return super().forward(embeddings_left, embeddings_right, labels) + + def reset(self): + super().reset() + self.dataname_idx_list = [] + + @property + def avg_info(self): + results = {} + all_embeddings_left = np.concatenate(self.embedding_left_list) + all_embeddings_right = np.concatenate(self.embedding_right_list) + all_labels = np.concatenate(self.label_list) + dataname_idxs = np.concatenate(self.dataname_idx_list) + + acc = [] + for dataname_idx in np.unique(dataname_idxs): + dataname = self.idx_to_dataname[dataname_idx] + mask = dataname_idxs == dataname_idx + embeddings_left = all_embeddings_left[mask] + embeddings_right = all_embeddings_right[mask] + labels = all_labels[mask] + + thresholds = np.arange(0, 4, 0.01) + _, _, accuracy, best_thresholds = self.calculate_roc( + thresholds, embeddings_left, embeddings_right, labels) + acc.append(accuracy.mean()) + results[f'{dataname}-best_threshold'] = f'{best_thresholds.mean():.4f}' + results[f'{dataname}-acc'] = f'{accuracy.mean():.4f}' + results[f'{dataname}-num_samples'] = f'{len(embeddings_left)}' + self.best_acc = np.mean(acc) + results['avg_acc'] = f'{self.best_acc:.4f}' + + info = ", ".join([f"{k}: {v}" for k, v in results.items()]) + return info diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/metrics.py new file mode 100644 index 000000000..e39aafbe1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/metric/metrics.py @@ -0,0 +1,661 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cmath import nan +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.preprocessing import binarize + +from easydict import EasyDict + +from ppcls.metric.avg_metrics import AvgMetrics +from ppcls.utils.misc import AverageMeter, AttrMeter +from ppcls.utils import logger + + +class TopkAcc(AvgMetrics): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + self.reset() + self.warned = False + + def reset(self): + self.avg_meters = { + f"top{k}": AverageMeter(f"top{k}") + for k in self.topk + } + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + + output_dims = x.shape[-1] + + metric_dict = dict() + for idx, k in enumerate(self.topk): + if output_dims < k: + if not self.warned: + msg = f"The output dims({output_dims}) is less than k({k}), so the Top-{k} metric is meaningless." + logger.warning(msg) + self.warned = True + metric_dict[f"top{k}"] = 1 + else: + metric_dict[f"top{k}"] = paddle.metric.accuracy(x, label, k=k) + self.avg_meters[f"top{k}"].update(metric_dict[f"top{k}"], + x.shape[0]) + return metric_dict + + +class mAP(nn.Layer): + def __init__(self, descending=True): + super().__init__() + self.descending = descending + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=self.descending) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + + if paddle.numel(num_rel_index).item() == 0: + metric_dict["mAP"] = np.nan + return metric_dict + + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + div = paddle.arange(acc_sum.shape[1]).astype("float32") + 1 + precision = paddle.divide(acc_sum, div) + + #calc map + precision_mask = paddle.multiply(equal_flag, precision) + ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, + axis=1) + metric_dict["mAP"] = float(paddle.mean(ap)) + return metric_dict + + +class mINP(nn.Layer): + def __init__(self, descending=True): + super().__init__() + self.descending = descending + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=self.descending) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.indechmx_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + num_rel = paddle.sum(equal_flag, axis=1) + num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.)) + num_rel_index = paddle.nonzero(num_rel.astype("int")) + num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]]) + equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0) + + #do accumulative sum + div = paddle.arange(equal_flag.shape[1]).astype("float32") + 2 + minus = paddle.divide(equal_flag, div) + auxilary = paddle.subtract(equal_flag, minus) + hard_index = paddle.argmax(auxilary, axis=1).astype("float32") + all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) + mINP = paddle.mean(all_INP) + metric_dict["mINP"] = float(mINP) + return metric_dict + + +class TprAtFpr(nn.Layer): + def __init__(self, max_fpr=1 / 1000.): + super().__init__() + self.gt_pos_score_list = [] + self.gt_neg_score_list = [] + self.softmax = nn.Softmax(axis=-1) + self.max_fpr = max_fpr + self.max_tpr = 0. + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + x = self.softmax(x) + for i, label_i in enumerate(label): + if label_i[0] == 0: + self.gt_neg_score_list.append(x[i][1].numpy()) + else: + self.gt_pos_score_list.append(x[i][1].numpy()) + return {} + + def reset(self): + self.gt_pos_score_list = [] + self.gt_neg_score_list = [] + self.max_tpr = 0. + + @property + def avg(self): + return self.max_tpr + + @property + def avg_info(self): + max_tpr = 0. + result = "" + gt_pos_score_list = np.array(self.gt_pos_score_list) + gt_neg_score_list = np.array(self.gt_neg_score_list) + for i in range(0, 10000): + threshold = i / 10000. + if len(gt_pos_score_list) == 0: + continue + tpr = np.sum( + gt_pos_score_list > threshold) / len(gt_pos_score_list) + if len(gt_neg_score_list) == 0 and tpr > max_tpr: + max_tpr = tpr + result = "threshold: {}, fpr: 0.0, tpr: {:.5f}".format( + threshold, tpr) + msg = f"The number of negative samples is 0, please add negative samples." + logger.warning(msg) + fpr = np.sum( + gt_neg_score_list > threshold) / len(gt_neg_score_list) + if fpr <= self.max_fpr and tpr > max_tpr: + max_tpr = tpr + result = "threshold: {}, fpr: {}, tpr: {:.5f}".format( + threshold, fpr, tpr) + self.max_tpr = max_tpr + return result + + +class MultilabelMeanAccuracy(nn.Layer): + def __init__(self, + start_threshold=0.4, + num_iterations=10, + end_threshold=0.9): + super().__init__() + self.start_threshold = start_threshold + self.num_iterations = num_iterations + self.end_threshold = end_threshold + self.gt_all_score_list = [] + self.gt_label_score_list = [] + self.max_acc = 0. + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + x = F.sigmoid(x) + label = label[:, 0, :] + for i in range(len(x)): + self.gt_all_score_list.append(x[i].numpy()) + self.gt_label_score_list.append(label[i].numpy()) + return {} + + def reset(self): + self.gt_all_score_list = [] + self.gt_label_score_list = [] + self.max_acc = 0. + + @property + def avg(self): + return self.max_acc + + @property + def avg_info(self): + max_acc = 0. + result = "" + gt_all_score_list = np.array(self.gt_all_score_list) + gt_label_score_list = np.array(self.gt_label_score_list) + for i in range(self.num_iterations): + threshold = self.start_threshold + i * (self.end_threshold - + self.start_threshold + ) / self.num_iterations + pred_label = (gt_all_score_list > threshold).astype(int) + TP = np.sum( + (gt_label_score_list == 1) * (pred_label == 1)).astype(float) + TN = np.sum( + (gt_label_score_list == 0) * (pred_label == 0)).astype(float) + acc = (TP + TN) / len(gt_all_score_list) + if max_acc <= acc: + max_acc = acc + result = "threshold: {}, mean_acc: {}".format( + threshold, max_acc / len(gt_label_score_list[0])) + self.max_acc = max_acc / len(gt_label_score_list[0]) + return result + + +class Recallk(nn.Layer): + def __init__(self, topk=(1, 5), descending=True): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + self.descending = descending + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + # get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=self.descending) + gallery_labels_transpose = gallery_img_id.t() + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype("float32"), choosen_indices) + equal_flag = equal_flag & keep_mask.astype("bool") + equal_flag = paddle.cast(equal_flag, "float32") + real_query_num = paddle.sum(equal_flag, axis=1) + real_query_num = paddle.sum((real_query_num > 0.0).astype("float32")) + + acc_sum = paddle.cumsum(equal_flag, axis=1) + mask = (acc_sum > 0.0).astype("float32") + all_cmc = (paddle.sum(mask, axis=0) / real_query_num).numpy() + + for k in self.topk: + metric_dict["recall{}".format(k)] = all_cmc[k - 1] + return metric_dict + + +class Precisionk(nn.Layer): + def __init__(self, topk=(1, 5), descending=True): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + self.descending = descending + + def forward(self, similarities_matrix, query_img_id, gallery_img_id, + keep_mask): + metric_dict = dict() + + #get cmc + choosen_indices = paddle.argsort( + similarities_matrix, axis=1, descending=self.descending) + gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) + gallery_labels_transpose = paddle.broadcast_to( + gallery_labels_transpose, + shape=[ + choosen_indices.shape[0], gallery_labels_transpose.shape[1] + ]) + choosen_label = paddle.index_sample(gallery_labels_transpose, + choosen_indices) + equal_flag = paddle.equal(choosen_label, query_img_id) + if keep_mask is not None: + keep_mask = paddle.index_sample( + keep_mask.astype('float32'), choosen_indices) + equal_flag = paddle.logical_and(equal_flag, + keep_mask.astype('bool')) + equal_flag = paddle.cast(equal_flag, 'float32') + + Ns = paddle.arange(gallery_img_id.shape[0]) + 1 + equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1) + Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy() + + for k in self.topk: + metric_dict["precision@{}".format(k)] = Precision_at_k[k - 1] + + return metric_dict + + +class DistillationTopkAcc(TopkAcc): + def __init__(self, model_key, feature_key=None, topk=(1, 5)): + super().__init__(topk=topk) + self.model_key = model_key + self.feature_key = feature_key + + def forward(self, x, label): + if isinstance(x, dict): + x = x[self.model_key] + if self.feature_key is not None: + x = x[self.feature_key] + return super().forward(x, label) + + +class GoogLeNetTopkAcc(TopkAcc): + def __init__(self, topk=(1, 5)): + super().__init__() + assert isinstance(topk, (int, list, tuple)) + if isinstance(topk, int): + topk = [topk] + self.topk = topk + + def forward(self, x, label): + return super().forward(x[0], label) + + +class MultiLabelMetric(AvgMetrics): + def __init__(self, bi_threshold=0.5): + super().__init__() + self.bi_threshold = bi_threshold + + def _multi_hot_encode(self, output): + logits = F.sigmoid(output).numpy() + return binarize(logits, threshold=self.bi_threshold) + + +class HammingDistance(MultiLabelMetric): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + def __init__(self): + super().__init__() + self.reset() + + def reset(self): + self.avg_meters = {"HammingDistance": AverageMeter("HammingDistance")} + + def forward(self, output, target): + preds = super()._multi_hot_encode(output) + metric_dict = dict() + metric_dict["HammingDistance"] = paddle.to_tensor( + hamming_loss(target, preds)) + self.avg_meters["HammingDistance"].update( + float(metric_dict["HammingDistance"]), output.shape[0]) + return metric_dict + + +class AccuracyScore(MultiLabelMetric): + """ + Hard metric for multilabel classification + Args: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + def __init__(self, base="label"): + super().__init__() + assert base in ["sample", "label" + ], 'must be one of ["sample", "label"]' + self.base = base + self.reset() + + def reset(self): + self.avg_meters = {"AccuracyScore": AverageMeter("AccuracyScore")} + + def forward(self, output, target): + preds = super()._multi_hot_encode(output) + metric_dict = dict() + if self.base == "sample": + accuracy = accuracy_metric(target, preds) + elif self.base == "label": + mcm = multilabel_confusion_matrix(target, preds) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + accuracy = (sum(tps) + sum(tns)) / ( + sum(tps) + sum(tns) + sum(fns) + sum(fps)) + metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) + self.avg_meters["AccuracyScore"].update( + float(metric_dict["AccuracyScore"]), output.shape[0]) + return metric_dict + + +def get_attr_metrics(gt_label, preds_probs, threshold): + """ + index: evaluated label index + adapted from "https://github.com/valencebond/Rethinking_of_PAR/blob/master/metrics/pedestrian_metrics.py" + """ + pred_label = (preds_probs > threshold).astype(int) + + eps = 1e-20 + result = EasyDict() + + has_fuyi = gt_label == -1 + pred_label[has_fuyi] = -1 + + ############################### + # label metrics + # TP + FN + result.gt_pos = np.sum((gt_label == 1), axis=0).astype(float) + # TN + FP + result.gt_neg = np.sum((gt_label == 0), axis=0).astype(float) + # TP + result.true_pos = np.sum((gt_label == 1) * (pred_label == 1), + axis=0).astype(float) + # TN + result.true_neg = np.sum((gt_label == 0) * (pred_label == 0), + axis=0).astype(float) + # FP + result.false_pos = np.sum(((gt_label == 0) * (pred_label == 1)), + axis=0).astype(float) + # FN + result.false_neg = np.sum(((gt_label == 1) * (pred_label == 0)), + axis=0).astype(float) + + ################ + # instance metrics + result.gt_pos_ins = np.sum((gt_label == 1), axis=1).astype(float) + result.true_pos_ins = np.sum((pred_label == 1), axis=1).astype(float) + # true positive + result.intersect_pos = np.sum((gt_label == 1) * (pred_label == 1), + axis=1).astype(float) + # IOU + result.union_pos = np.sum(((gt_label == 1) + (pred_label == 1)), + axis=1).astype(float) + + return result + + +class ATTRMetric(nn.Layer): + def __init__(self, threshold=0.5): + super().__init__() + self.threshold = threshold + + def reset(self): + self.attrmeter = AttrMeter(threshold=0.5) + + def forward(self, output, target): + metric_dict = get_attr_metrics(target[:, 0, :].numpy(), + output.numpy(), self.threshold) + self.attrmeter.update(metric_dict) + return metric_dict + + +class MultiLabelMAP(nn.Layer): + """ + Calculate multi-label classification mean average precision. + Currently, support two types: 11point and integral + + The code base on: + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/metrics/map_utils.py + + Args: + map_type (str): Calculation method of mean average. + """ + + def __init__(self, map_type='integral'): + super().__init__() + assert map_type in ['11point', 'integral'], \ + "map_type currently only support '11point' and 'integral'" + self.map_type = map_type + + self.reset() + + def reset(self): + self.is_latest = True + self.class_score_poss = None + self.class_gt_counts = None + self.mAP = 0.0 + + def one_class_update(self, score, gt_label, class_idx): + topk_idx = np.argsort(score)[::-1] + topk_score = score[topk_idx] + topk_gt_label = gt_label[topk_idx] + for s, l in zip(topk_score, topk_gt_label): + if int(l) == 1: + self.class_score_poss[class_idx].append([s, 1.]) + self.class_gt_counts[class_idx] += 1 + else: + self.class_score_poss[class_idx].append([s, 0.]) + + @staticmethod + def get_tp_fp_accum(score_pos_list): + """ + Calculate accumulating true/false positive results from + [score, pos] records + """ + sorted_list = sorted(score_pos_list, key=lambda s: s[0], reverse=True) + + accum_tp = 0 + accum_fp = 0 + accum_tp_list = [] + accum_fp_list = [] + for (score, pos) in sorted_list: + accum_tp += int(pos) + accum_tp_list.append(accum_tp) + accum_fp += 1 - int(pos) + accum_fp_list.append(accum_fp) + + return accum_tp_list, accum_fp_list + + def compute_mAP(self): + if not self.is_latest: + mAP = 0. + valid_cnt = 0 + for score_pos, count in zip(self.class_score_poss, + self.class_gt_counts): + if count == 0: + continue + + if len(score_pos) == 0: + valid_cnt += 1 + continue + + accum_tp_list, accum_fp_list = \ + self.get_tp_fp_accum(score_pos) + precision = [] + recall = [] + for ac_tp, ac_fp in zip(accum_tp_list, accum_fp_list): + precision.append(float(ac_tp) / (ac_tp + ac_fp)) + recall.append(float(ac_tp) / count) + + one_class_ap = 0.0 + if self.map_type == '11point': + max_precisions = [0.] * 11 + start_idx = len(precision) - 1 + for j in range(10, -1, -1): + for i in range(start_idx, -1, -1): + if recall[i] < float(j) / 10.: + start_idx = i + if j > 0: + max_precisions[j - 1] = max_precisions[j] + break + else: + if max_precisions[j] < precision[i]: + max_precisions[j] = precision[i] + one_class_ap = sum(max_precisions) / 11. + mAP += one_class_ap + valid_cnt += 1 + elif self.map_type == 'integral': + import math + prev_recall = 0. + for i in range(len(precision)): + recall_gap = math.fabs(recall[i] - prev_recall) + if recall_gap > 1e-6: + one_class_ap += precision[i] * recall_gap + prev_recall = recall[i] + mAP += one_class_ap + valid_cnt += 1 + else: + raise NotImplementedError( + f"Unsupported mAP type {self.map_type}") + + self.mAP = mAP / float(valid_cnt) if valid_cnt > 0 else mAP + + self.is_latest = True + + def forward(self, output, target): + scores = F.sigmoid(output).numpy() + gt_labels = target.numpy() + + if self.class_score_poss is None: + self.class_score_poss = [[] for _ in range(scores.shape[-1])] + if self.class_gt_counts is None: + self.class_gt_counts = [0] * scores.shape[-1] + + for class_idx in range(scores.shape[-1]): + score = scores[:, class_idx] + gt_label = gt_labels[:, class_idx] + self.one_class_update(score, gt_label, class_idx) + + self.is_latest = False + + return {} + + @property + def avg_info(self): + self.compute_mAP() + return f"MultiLabelMAP({self.map_type}): {self.mAP:.3f}" + + @property + def avg(self): + self.compute_mAP() + return self.mAP diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/__init__.py new file mode 100644 index 000000000..992bc57a1 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/__init__.py @@ -0,0 +1,137 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re +import copy +import paddle +from typing import Dict, List + +from ppcls.engine.train.utils import type_name +from ppcls.utils import logger + +from . import optimizer + +__all__ = ['build_optimizer'] + + +def build_lr_scheduler(lr_config, epochs, step_each_epoch): + from . import learning_rate + lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) + if 'name' in lr_config: + lr_name = lr_config.pop('name') + lr = getattr(learning_rate, lr_name)(**lr_config) + if isinstance(lr, paddle.optimizer.lr.LRScheduler): + return lr + else: + return lr() + else: + lr = lr_config['learning_rate'] + return lr + + +# model_list is None in static graph +def build_optimizer(config, epochs, step_each_epoch, model_list=None): + optim_config = copy.deepcopy(config) + if isinstance(optim_config, dict): + # convert {'name': xxx, **optim_cfg} to [{name: {scope: xxx, **optim_cfg}}] + optim_name = optim_config.pop("name") + optim_config: List[Dict[str, Dict]] = [{ + optim_name: { + 'scope': "all", + ** + optim_config + } + }] + optim_list = [] + lr_list = [] + """NOTE: + Currently only support optim objets below. + 1. single optimizer config. + 2. next level uner Arch, such as Arch.backbone, Arch.neck, Arch.head. + 3. loss which has parameters, such as CenterLoss. + """ + for optim_item in optim_config: + # optim_cfg = {optim_name: {scope: xxx, **optim_cfg}} + # step1 build lr + optim_name = list(optim_item.keys())[0] # get optim_name + optim_scope = optim_item[optim_name].pop('scope') # get optim_scope + optim_cfg = optim_item[optim_name] # get optim_cfg + + lr = build_lr_scheduler(optim_cfg.pop('lr'), epochs, step_each_epoch) + logger.debug("build lr ({}) for scope ({}) success..".format( + lr, optim_scope)) + # step2 build regularization + if 'regularizer' in optim_cfg and optim_cfg['regularizer'] is not None: + if 'weight_decay' in optim_cfg: + logger.warning( + "ConfigError: Only one of regularizer and weight_decay can be set in Optimizer Config. \"weight_decay\" has been ignored." + ) + reg_config = optim_cfg.pop('regularizer') + reg_name = reg_config.pop('name') + 'Decay' + reg = getattr(paddle.regularizer, reg_name)(**reg_config) + optim_cfg["weight_decay"] = reg + logger.debug("build regularizer ({}) for scope ({}) success..". + format(reg, optim_scope)) + # step3 build optimizer + if 'clip_norm' in optim_cfg: + clip_norm = optim_cfg.pop('clip_norm') + grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_norm) + else: + grad_clip = None + optim_model = [] + + # for static graph + if model_list is None: + optim = getattr(optimizer, optim_name)( + learning_rate=lr, grad_clip=grad_clip, + **optim_cfg)(model_list=optim_model) + return optim, lr + + # for dynamic graph + for i in range(len(model_list)): + if len(model_list[i].parameters()) == 0: + continue + if optim_scope == "all": + # optimizer for all + optim_model.append(model_list[i]) + else: + if optim_scope.endswith("Loss"): + # optimizer for loss + for m in model_list[i].sublayers(True): + if type_name(m) == optim_scope: + optim_model.append(m) + else: + # opmizer for module in model, such as backbone, neck, head... + if optim_scope == type_name(model_list[i]): + optim_model.append(model_list[i]) + elif hasattr(model_list[i], optim_scope): + optim_model.append(getattr(model_list[i], optim_scope)) + else: + for name, layer in model_list[i].named_sublayers(): + if len(layer.parameters()) != 0 \ + and re.fullmatch(optim_scope, name): + optim_model.append(layer) + + optim = getattr(optimizer, optim_name)( + learning_rate=lr, grad_clip=grad_clip, + **optim_cfg)(model_list=optim_model) + logger.debug("build optimizer ({}) for scope ({}) success..".format( + optim, optim_scope)) + optim_list.append(optim) + lr_list.append(lr) + return optim_list, lr_list diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/learning_rate.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/learning_rate.py new file mode 100644 index 000000000..168bf5e39 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/learning_rate.py @@ -0,0 +1,687 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +import math +import types +from abc import abstractmethod +from typing import Union +from paddle.optimizer import lr +from ppcls.utils import logger + + +class LRBase(object): + """Base class for custom learning rates + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + warmup_epoch (int): number of warmup epoch(s) + warmup_start_lr (float): start learning rate within warmup + last_epoch (int): last epoch + by_epoch (bool): learning rate decays by epoch when by_epoch is True, else by iter + verbose (bool): If True, prints a message to stdout for each update. Defaults to False + """ + + def __init__(self, + epochs: int, + step_each_epoch: int, + learning_rate: float, + warmup_epoch: int, + warmup_start_lr: float, + last_epoch: int, + by_epoch: bool, + verbose: bool=False) -> None: + """Initialize and record the necessary parameters + """ + super(LRBase, self).__init__() + if warmup_epoch >= epochs: + msg = f"When using warm up, the value of \"Global.epochs\" must be greater than value of \"Optimizer.lr.warmup_epoch\". The value of \"Optimizer.lr.warmup_epoch\" has been set to {epochs}." + logger.warning(msg) + warmup_epoch = epochs + self.epochs = epochs + self.step_each_epoch = step_each_epoch + self.learning_rate = learning_rate + self.warmup_epoch = warmup_epoch + self.warmup_steps = self.warmup_epoch if by_epoch else round( + self.warmup_epoch * self.step_each_epoch) + self.warmup_start_lr = warmup_start_lr + self.last_epoch = last_epoch + self.by_epoch = by_epoch + self.verbose = verbose + + @abstractmethod + def __call__(self, *kargs, **kwargs) -> lr.LRScheduler: + """generate an learning rate scheduler + + Returns: + lr.LinearWarmup: learning rate scheduler + """ + pass + + def linear_warmup( + self, + learning_rate: Union[float, lr.LRScheduler]) -> lr.LinearWarmup: + """Add an Linear Warmup before learning_rate + + Args: + learning_rate (Union[float, lr.LRScheduler]): original learning rate without warmup + + Returns: + lr.LinearWarmup: learning rate scheduler with warmup + """ + warmup_lr = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch, + verbose=self.verbose) + return warmup_lr + + +class Constant(lr.LRScheduler): + """Constant learning rate Class implementation + + Args: + learning_rate (float): The initial learning rate + last_epoch (int, optional): The index of last epoch. Default: -1. + """ + + def __init__(self, learning_rate, last_epoch=-1, **kwargs): + self.learning_rate = learning_rate + self.last_epoch = last_epoch + super(Constant, self).__init__() + + def get_lr(self) -> float: + """always return the same learning rate + """ + return self.learning_rate + + +class ConstLR(LRBase): + """Constant learning rate + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + warmup_epoch (int): number of warmup epoch(s) + warmup_start_lr (float): start learning rate within warmup + last_epoch (int): last epoch + by_epoch (bool): learning rate decays by epoch when by_epoch is True, else by iter + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(ConstLR, self).__init__(epochs, step_each_epoch, learning_rate, + warmup_epoch, warmup_start_lr, + last_epoch, by_epoch) + + def __call__(self): + learning_rate = Constant( + learning_rate=self.learning_rate, last_epoch=self.last_epoch) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Linear(LRBase): + """Linear learning rate decay + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + end_lr (float, optional): The minimum final learning rate. Defaults to 0.0. + power (float, optional): Power of polynomial. Defaults to 1.0. + warmup_epoch (int): number of warmup epoch(s) + warmup_start_lr (float): start learning rate within warmup + last_epoch (int): last epoch + by_epoch (bool): learning rate decays by epoch when by_epoch is True, else by iter + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + end_lr=0.0, + power=1.0, + cycle=False, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(Linear, self).__init__(epochs, step_each_epoch, learning_rate, + warmup_epoch, warmup_start_lr, last_epoch, + by_epoch) + self.decay_steps = (epochs - self.warmup_epoch) * step_each_epoch + self.end_lr = end_lr + self.power = power + self.cycle = cycle + self.warmup_steps = round(self.warmup_epoch * step_each_epoch) + if self.by_epoch: + self.decay_steps = self.epochs - self.warmup_epoch + + def __call__(self): + learning_rate = lr.PolynomialDecay( + learning_rate=self.learning_rate, + decay_steps=self.decay_steps, + end_lr=self.end_lr, + power=self.power, + cycle=self.cycle, + last_epoch=self.last_epoch) if self.decay_steps > 0 else Constant( + self.learning_rate) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Cosine(LRBase): + """Cosine learning rate decay + + ``lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1)`` + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + eta_min (float, optional): Minimum learning rate. Defaults to 0.0. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + eta_min=0.0, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(Cosine, self).__init__(epochs, step_each_epoch, learning_rate, + warmup_epoch, warmup_start_lr, last_epoch, + by_epoch) + self.T_max = (self.epochs - self.warmup_epoch) * self.step_each_epoch + self.eta_min = eta_min + if self.by_epoch: + self.T_max = self.epochs - self.warmup_epoch + + def __call__(self): + learning_rate = lr.CosineAnnealingDecay( + learning_rate=self.learning_rate, + T_max=self.T_max, + eta_min=self.eta_min, + last_epoch=self.last_epoch) if self.T_max > 0 else Constant( + self.learning_rate) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Cyclic(LRBase): + """Cyclic learning rate decay + + Args: + epochs (int): Total epoch(s). + step_each_epoch (int): Number of iterations within an epoch. + base_learning_rate (float): Initial learning rate, which is the lower boundary in the cycle. The paper recommends + that set the base_learning_rate to 1/3 or 1/4 of max_learning_rate. + max_learning_rate (float): Maximum learning rate in the cycle. It defines the cycle amplitude as above. + Since there is some scaling operation during process of learning rate adjustment, + max_learning_rate may not actually be reached. + warmup_epoch (int): Number of warmup epoch(s). + warmup_start_lr (float): Start learning rate within warmup. + step_size_up (int): Number of training steps, which is used to increase learning rate in a cycle. + The step size of one cycle will be defined by step_size_up + step_size_down. According to the paper, step + size should be set as at least 3 or 4 times steps in one epoch. + step_size_down (int, optional): Number of training steps, which is used to decrease learning rate in a cycle. + If not specified, it's value will initialize to `` step_size_up `` . Default: None. + mode (str, optional): One of 'triangular', 'triangular2' or 'exp_range'. + If scale_fn is specified, this argument will be ignored. Default: 'triangular'. + exp_gamma (float): Constant in 'exp_range' scaling function: exp_gamma**iterations. Used only when mode = 'exp_range'. Default: 1.0. + scale_fn (function, optional): A custom scaling function, which is used to replace three build-in methods. + It should only have one argument. For all x >= 0, 0 <= scale_fn(x) <= 1. + If specified, then 'mode' will be ignored. Default: None. + scale_mode (str, optional): One of 'cycle' or 'iterations'. Defines whether scale_fn is evaluated on cycle + number or cycle iterations (total iterations since start of training). Default: 'cycle'. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + by_epoch (bool): Learning rate decays by epoch when by_epoch is True, else by iter. + verbose: (bool, optional): If True, prints a message to stdout for each update. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + base_learning_rate, + max_learning_rate, + warmup_epoch, + warmup_start_lr, + step_size_up, + step_size_down=None, + mode='triangular', + exp_gamma=1.0, + scale_fn=None, + scale_mode='cycle', + by_epoch=False, + last_epoch=-1, + verbose=False): + super(Cyclic, self).__init__( + epochs, step_each_epoch, base_learning_rate, warmup_epoch, + warmup_start_lr, last_epoch, by_epoch, verbose) + self.base_learning_rate = base_learning_rate + self.max_learning_rate = max_learning_rate + self.step_size_up = step_size_up + self.step_size_down = step_size_down + self.mode = mode + self.exp_gamma = exp_gamma + self.scale_fn = scale_fn + self.scale_mode = scale_mode + + def __call__(self): + learning_rate = lr.CyclicLR( + base_learning_rate=self.base_learning_rate, + max_learning_rate=self.max_learning_rate, + step_size_up=self.step_size_up, + step_size_down=self.step_size_down, + mode=self.mode, + exp_gamma=self.exp_gamma, + scale_fn=self.scale_fn, + scale_mode=self.scale_mode, + last_epoch=self.last_epoch, + verbose=self.verbose) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Step(LRBase): + """Step learning rate decay + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + step_size (int|float): the interval to update. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma``. It should be less than 1.0. Default: 0.1. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + step_size, + gamma, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(Step, self).__init__(epochs, step_each_epoch, learning_rate, + warmup_epoch, warmup_start_lr, last_epoch, + by_epoch) + self.step_size = int(step_size * step_each_epoch) + self.gamma = gamma + if self.by_epoch: + self.step_size = step_size + + def __call__(self): + learning_rate = lr.StepDecay( + learning_rate=self.learning_rate, + step_size=self.step_size, + gamma=self.gamma, + last_epoch=self.last_epoch) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Piecewise(LRBase): + """Piecewise learning rate decay + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + decay_epochs (List[int]): A list of steps numbers. The type of element in the list is python int. + values (List[float]): A list of learning rate values that will be picked during different epoch boundaries. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + decay_epochs, + values, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + learning_rate=None, + **kwargs): + if learning_rate: + decay_epochs = list(range(0, epochs, 30)) + values = [ + learning_rate * (0.1**i) for i in range(len(decay_epochs)) + ] + # when total epochs < 30, decay_epochs and values should be + # [] and [lr] respectively, but paddle dont support. + if len(decay_epochs) == 1: + decay_epochs = [epochs] + values = [values[0], values[0]] + else: + decay_epochs = decay_epochs[1:] + logger.warning( + "When 'learning_rate' of Piecewise has beed set, " + "the learning rate scheduler would be set by the rule that lr decay 10 times every 30 epochs. " + f"So, the 'decay_epochs' and 'values' have been set to {decay_epochs} and {values} respectively." + ) + super(Piecewise, + self).__init__(epochs, step_each_epoch, values[0], warmup_epoch, + warmup_start_lr, last_epoch, by_epoch) + + self.values = values + self.boundaries_steps = [e * step_each_epoch for e in decay_epochs] + if self.by_epoch is True: + self.boundaries_steps = decay_epochs + + def __call__(self): + learning_rate = lr.PiecewiseDecay( + boundaries=self.boundaries_steps, + values=self.values, + last_epoch=self.last_epoch) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class MultiStepDecay(LRBase): + """MultiStepDecay learning rate decay + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + milestones (List[int]): List of each boundaries. Must be increasing. + gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma``. It should be less than 1.0. Defaults to 0.1. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + milestones, + gamma=0.1, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(MultiStepDecay, self).__init__( + epochs, step_each_epoch, learning_rate, warmup_epoch, + warmup_start_lr, last_epoch, by_epoch) + self.milestones = [x * step_each_epoch for x in milestones] + self.gamma = gamma + if self.by_epoch: + self.milestones = milestones + + def __call__(self): + learning_rate = lr.MultiStepDecay( + learning_rate=self.learning_rate, + milestones=self.milestones, + gamma=self.gamma, + last_epoch=self.last_epoch) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class ReduceOnPlateau(LRBase): + """ReduceOnPlateau learning rate decay + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + mode (str, optional): ``'min'`` or ``'max'`` can be selected. Normally, it is ``'min'`` , which means that the + learning rate will reduce when ``loss`` stops descending. Specially, if it's set to ``'max'``, the learning + rate will reduce when ``loss`` stops ascending. Defaults to ``'min'``. + factor (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * factor`` . + It should be less than 1.0. Defaults to 0.1. + patience (int, optional): When ``loss`` doesn't improve for this number of epochs, learing rate will be reduced. + Defaults to 10. + threshold (float, optional): ``threshold`` and ``threshold_mode`` will determine the minimum change of ``loss`` . + This make tiny changes of ``loss`` will be ignored. Defaults to 1e-4. + threshold_mode (str, optional): ``'rel'`` or ``'abs'`` can be selected. In ``'rel'`` mode, the minimum change of ``loss`` + is ``last_loss * threshold`` , where ``last_loss`` is ``loss`` in last epoch. In ``'abs'`` mode, the minimum + change of ``loss`` is ``threshold`` . Defaults to ``'rel'`` . + cooldown (int, optional): The number of epochs to wait before resuming normal operation. Defaults to 0. + min_lr (float, optional): The lower bound of the learning rate after reduction. Defaults to 0. + epsilon (float, optional): Minimal decay applied to lr. If the difference between new and old lr is smaller than epsilon, + the update is ignored. Defaults to 1e-8. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + mode='min', + factor=0.1, + patience=10, + threshold=1e-4, + threshold_mode='rel', + cooldown=0, + min_lr=0, + epsilon=1e-8, + warmup_epoch=0, + warmup_start_lr=0.0, + last_epoch=-1, + by_epoch=False, + **kwargs): + super(ReduceOnPlateau, self).__init__( + epochs, step_each_epoch, learning_rate, warmup_epoch, + warmup_start_lr, last_epoch, by_epoch) + self.mode = mode + self.factor = factor + self.patience = patience + self.threshold = threshold + self.threshold_mode = threshold_mode + self.cooldown = cooldown + self.min_lr = min_lr + self.epsilon = epsilon + + def __call__(self): + learning_rate = lr.ReduceOnPlateau( + learning_rate=self.learning_rate, + mode=self.mode, + factor=self.factor, + patience=self.patience, + threshold=self.threshold, + threshold_mode=self.threshold_mode, + cooldown=self.cooldown, + min_lr=self.min_lr, + epsilon=self.epsilon) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + # NOTE: Implement get_lr() method for class `ReduceOnPlateau`, + # which is called in `log_info` function + def get_lr(self): + return self.last_lr + + learning_rate.get_lr = types.MethodType(get_lr, learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class CosineFixmatch(LRBase): + """Cosine decay in FixMatch style + + Args: + epochs (int): total epoch(s) + step_each_epoch (int): number of iterations within an epoch + learning_rate (float): learning rate + num_warmup_steps (int): the number warmup steps. + warmunum_cycles (float, optional): the factor for cosine in FixMatch learning rate. Defaults to 7 / 16. + last_epoch (int, optional): last epoch. Defaults to -1. + by_epoch (bool, optional): learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + learning_rate, + num_warmup_steps, + num_cycles=7 / 16, + last_epoch=-1, + by_epoch=False): + self.epochs = epochs + self.step_each_epoch = step_each_epoch + self.learning_rate = learning_rate + self.num_warmup_steps = num_warmup_steps + self.num_cycles = num_cycles + self.last_epoch = last_epoch + self.by_epoch = by_epoch + + def __call__(self): + def _lr_lambda(current_step): + if current_step < self.num_warmup_steps: + return float(current_step) / float( + max(1, self.num_warmup_steps)) + no_progress = float(current_step - self.num_warmup_steps) / \ + float(max(1, self.epochs * self.step_each_epoch - self.num_warmup_steps)) + return max(0., math.cos(math.pi * self.num_cycles * no_progress)) + + learning_rate = lr.LambdaDecay( + learning_rate=self.learning_rate, + lr_lambda=_lr_lambda, + last_epoch=self.last_epoch) + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class OneCycleLR(LRBase): + """OneCycleLR learning rate decay + + Args: + epochs (int): Total epoch(s). + step_each_epoch (int): Number of iterations within an epoch. + max_learning_rate (float): Maximum learning rate in the cycle. It defines the cycle amplitude as above. + Since there is some scaling operation during process of learning rate adjustment, + max_learning_rate may not actually be reached. + warmup_epoch (int): Number of warmup epoch(s). + warmup_start_lr (float): Start learning rate within warmup. + divide_factor (float, optional): Initial learning rate will be determined by initial_learning_rate = max_learning_rate / divide_factor. Default: 25. + phase_pct (float): The percentage of total steps which used to increasing learning rate. Default: 0.3. + end_learning_rate (float, optional): The minimum learning rate during training, it should be much less than initial learning rate. + anneal_strategy (str, optional): Strategy of adjusting learning rate.'cos' for cosine annealing, 'linear' for linear annealing. Default: 'cos'. + three_phase (bool, optional): Whether to use three-phase. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + by_epoch (bool): Learning rate decays by epoch when by_epoch is True, else by iter. + verbose: (bool, optional): If True, prints a message to stdout for each update. Defaults to False. + """ + + def __init__(self, + epochs, + step_each_epoch, + max_learning_rate, + warmup_epoch=0, + warmup_start_lr=0.0, + divide_factor=25.0, + end_learning_rate=1e-10, + phase_pct=0.3, + anneal_strategy='cos', + three_phase=False, + by_epoch=False, + last_epoch=-1, + verbose=False): + super().__init__( + epochs, step_each_epoch, max_learning_rate, + warmup_epoch, warmup_start_lr, last_epoch, by_epoch, verbose) + self.max_learning_rate = max_learning_rate + self.total_steps = epochs * step_each_epoch + self.divide_factor = divide_factor + self.end_learning_rate = end_learning_rate + self.phase_pct = phase_pct + self.anneal_strategy = anneal_strategy + self.three_phase = three_phase + self.last_epoch = last_epoch + + def __call__(self): + learning_rate = lr.OneCycleLR( + max_learning_rate=self.max_learning_rate, + total_steps=self.total_steps, + end_learning_rate=self.end_learning_rate, + divide_factor=self.divide_factor, + phase_pct=self.phase_pct, + anneal_strategy=self.anneal_strategy, + three_phase=self.three_phase, + last_epoch=self.last_epoch, + verbose=self.verbose) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/optimizer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/optimizer.py new file mode 100644 index 000000000..adc417c87 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/optimizer/optimizer.py @@ -0,0 +1,518 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import paddle +from paddle import optimizer as optim +from ppcls.utils import logger +from functools import partial + + +class SGD(object): + """ + Args: + learning_rate (float|Tensor|LearningRateDecay, optional): The learning rate used to update ``Parameter``. + It can be a float value, a ``Tensor`` with a float type or a LearningRateDecay. The default value is 0.001. + parameters (list|tuple, optional): List/Tuple of ``Tensor`` to update to minimize ``loss``. \ + This parameter is required in dygraph mode. \ + The default value is None in static mode, at this time all parameters will be updated. + weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ + It canbe a float value as coeff of L2 regularization or \ + :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + the regularization setting here in optimizer will be ignored for this parameter. \ + Otherwise, the regularization setting here in optimizer will take effect. \ + Default None, meaning there is no regularization. + grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of + some derived class of ``GradientClipBase`` . There are three cliping strategies + ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , + :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + name (str, optional): The default value is None. Normally there is no need for user + to set this property. + """ + + def __init__(self, + learning_rate=0.001, + weight_decay=None, + grad_clip=None, + multi_precision=False, + name=None): + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.multi_precision = multi_precision + self.name = name + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + argspec = inspect.getfullargspec(optim.SGD.__init__).args + if 'multi_precision' in argspec: + opt = optim.SGD(learning_rate=self.learning_rate, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + name=self.name) + else: + opt = optim.SGD(learning_rate=self.learning_rate, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name) + return opt + + +class Momentum(object): + """ + Simple Momentum optimizer with velocity state. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum, + weight_decay=None, + grad_clip=None, + use_nesterov=False, + multi_precision=True, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.multi_precision = multi_precision + self.use_nesterov = use_nesterov + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + # model_list is None in static graph + parameters = None + if model_list: + # TODO(gaotingquan): to avoid cause issues for unset no_weight_decay models + if len(self.no_weight_decay_name_list) > 0: + params_with_decay = [] + params_without_decay = [] + for m in model_list: + for n, p in m.named_parameters(): + if any(nd in n for nd in self.no_weight_decay_name_list) \ + or (self.one_dim_param_no_weight_decay and len(p.shape) == 1): + params_without_decay.append(p) + else: + params_with_decay.append(p) + parameters = [{ + "params": params_with_decay, + "weight_decay": self.weight_decay + }, { + "params": params_without_decay, + "weight_decay": 0.0 + }] + else: + parameters = sum([m.parameters() for m in model_list], []) + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + use_nesterov=self.use_nesterov, + parameters=parameters) + if hasattr(opt, '_use_multi_tensor'): + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + parameters=parameters, + use_nesterov=self.use_nesterov, + use_multi_tensor=True) + return opt + + +class Adam(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + parameter_list=None, + weight_decay=None, + grad_clip=None, + name=None, + lazy_mode=False, + multi_precision=False): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.parameter_list = parameter_list + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.name = name + self.lazy_mode = lazy_mode + self.multi_precision = multi_precision + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + opt = optim.Adam( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name, + lazy_mode=self.lazy_mode, + multi_precision=self.multi_precision, + parameters=parameters) + return opt + + +class RMSProp(object): + """ + Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. + Args: + learning_rate (float|Variable) - The learning rate used to update parameters. + Can be a float value or a Variable with one float value as data element. + momentum (float) - Momentum factor. + rho (float) - rho value in equation. + epsilon (float) - avoid division by zero, default is 1e-6. + regularization (WeightDecayRegularizer, optional) - The strategy of regularization. + """ + + def __init__(self, + learning_rate, + momentum=0.0, + rho=0.95, + epsilon=1e-6, + weight_decay=None, + grad_clip=None, + multi_precision=False, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.rho = rho + self.epsilon = epsilon + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + # model_list is None in static graph + parameters = None + if model_list: + params_with_decay = [] + params_without_decay = [] + for m in model_list: + for n, p in m.named_parameters(): + if any(nd in n for nd in self.no_weight_decay_name_list) \ + or (self.one_dim_param_no_weight_decay and len(p.shape) == 1): + params_without_decay.append(p) + else: + params_with_decay.append(p) + if params_without_decay: + parameters = [{ + "params": params_with_decay, + "weight_decay": self.weight_decay + }, { + "params": params_without_decay, + "weight_decay": 0.0 + }] + else: + parameters = params_with_decay + opt = optim.RMSProp( + learning_rate=self.learning_rate, + momentum=self.momentum, + rho=self.rho, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters) + return opt + + +class AdamW(object): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + weight_decay=None, + multi_precision=False, + grad_clip=None, + no_weight_decay_name=None, + one_dim_param_no_weight_decay=False, + **args): + super().__init__() + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.multi_precision = multi_precision + self.no_weight_decay_name_list = no_weight_decay_name.split( + ) if no_weight_decay_name else [] + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + + def __call__(self, model_list): + # model_list is None in static graph + parameters = sum([m.parameters() for m in model_list], + []) if model_list else None + + # TODO(gaotingquan): model_list is None when in static graph, "no_weight_decay" not work. + if model_list is None: + if self.one_dim_param_no_weight_decay or len( + self.no_weight_decay_name_list) != 0: + msg = "\"AdamW\" does not support setting \"no_weight_decay\" in static graph. Please use dynamic graph." + logger.error(Exception(msg)) + raise Exception(msg) + + self.no_weight_decay_param_name_list = [ + p.name for model in model_list for n, p in model.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] if model_list else [] + + if self.one_dim_param_no_weight_decay: + self.no_weight_decay_param_name_list += [ + p.name + for model in model_list for n, p in model.named_parameters() + if len(p.shape) == 1 + ] if model_list else [] + + opt = optim.AdamW( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + parameters=parameters, + weight_decay=self.weight_decay, + multi_precision=self.multi_precision, + grad_clip=self.grad_clip, + apply_decay_param_fun=self._apply_decay_param_fun) + return opt + + def _apply_decay_param_fun(self, name): + return name not in self.no_weight_decay_param_name_list + + +class AdamWDL(object): + """ + The AdamWDL optimizer is implemented based on the AdamW Optimization with dynamic lr setting. + Generally it's used for transformer model. + """ + + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + weight_decay=None, + multi_precision=False, + grad_clip=None, + layerwise_decay=None, + filter_bias_and_bn=True, + **args): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.multi_precision = multi_precision + self.layerwise_decay = layerwise_decay + self.filter_bias_and_bn = filter_bias_and_bn + + class AdamWDLImpl(optim.AdamW): + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameters=None, + weight_decay=0.01, + apply_decay_param_fun=None, + grad_clip=None, + lazy_mode=False, + multi_precision=False, + layerwise_decay=1.0, + n_layers=12, + name_dict=None, + name=None): + if not isinstance(layerwise_decay, float) and \ + not isinstance(layerwise_decay, paddle.static.Variable): + raise TypeError("coeff should be float or Tensor.") + self.layerwise_decay = layerwise_decay + self.name_dict = name_dict + self.n_layers = n_layers + self._coeff = weight_decay + self._lr_to_coeff = dict() + self.set_param_lr_func = partial( + self._layerwise_lr_decay, layerwise_decay, name_dict, n_layers) + super().__init__( + learning_rate=learning_rate, + parameters=parameters, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + grad_clip=grad_clip, + name=name, + apply_decay_param_fun=apply_decay_param_fun, + weight_decay=weight_decay, + lazy_mode=lazy_mode, + multi_precision=multi_precision,) + + # Layerwise decay + def _layerwise_lr_decay(self, decay_rate, name_dict, n_layers, param): + """ + Args: + decay_rate (float): + The layer-wise decay ratio. + name_dict (dict): + The keys of name_dict is dynamic name of model while the value + of name_dict is static name. + Use model.named_parameters() to get name_dict. + n_layers (int): + Total number of layers in the transformer encoder. + """ + ratio = 1.0 + static_name = name_dict[param.name] + if "blocks" in static_name: + idx = static_name.find("blocks.") + layer = int(static_name[idx:].split(".")[1]) + ratio = decay_rate**(n_layers - layer) + elif any([ + key in static_name + for key in ["embed", "token", "conv1", "ln_pre"] + ]): + ratio = decay_rate**(n_layers + 1) + # param.optimize_attr["learning_rate"] *= ratio + return ratio + def _append_decoupled_weight_decay(self, block, param_and_grad): + """ + Add decoupled weight decay op. + parameter = parameter - parameter * coeff * lr + Args: + block: block in which variable is to be created + param_and_grad: (parameters, gradients) pairs, + the parameters need to decay. + Raises: + Exception: The type of coeff and parameter is not consistent. + """ + if isinstance(param_and_grad, dict): + param_and_grad = self._update_param_group(param_and_grad) + param, grad = param_and_grad + + if self._apply_decay_param_fun is not None and not self._apply_decay_param_fun(param.name): + return + + if isinstance(self._learning_rate, float): + learning_rate = self._learning_rate + else: + # NOTE. We add this function to the _append_optimize_op(), + # for we must make sure _create_param_lr() be called after + # optimizer._create_global_learning_rate(). + learning_rate = self._create_param_lr(param_and_grad) + + with block.program._optimized_guard([param, grad]), paddle.static.name_scope("weight decay"): + self._params_name.add(param.name) + + # If it has been calculated, the result will be reused. + # NOTE(wangxi): In dygraph mode, apply_gradient will be executed + # every step, so need clear _lr_to_coeff every step, + # we do this in _create_optimization_pass + decay_coeff = self._lr_to_coeff.get(learning_rate, None) + if decay_coeff is None: + # NOTE(wangxi): for pipeline to set device:all + with paddle.static.device_guard(None): + decay_coeff = 1.0 - learning_rate * self._coeff + self._lr_to_coeff[learning_rate] = decay_coeff + + find_master = self._multi_precision and param.dtype == paddle.float16 + if find_master: + master_weight = self._master_weights[param.name] + scaled_param = master_weight * decay_coeff + paddle.assign(scaled_param, output=master_weight) + else: + scaled_param = param * decay_coeff + paddle.assign(scaled_param, output=param) + + def _append_optimize_op(self, block, param_and_grad): + if self.set_param_lr_func is None: + return super()._append_optimize_op(block, param_and_grad) + + self._append_decoupled_weight_decay(block, param_and_grad) + prev_lr = param_and_grad[0].optimize_attr["learning_rate"] + ratio = self.set_param_lr_func(param_and_grad[0]) + param_and_grad[0].optimize_attr["learning_rate"] *= ratio + + # excute Adam op + res = super()._append_optimize_op(block, param_and_grad) + param_and_grad[0].optimize_attr["learning_rate"] = prev_lr + return res + + def __call__(self, model_list): + model = model_list[0] + if self.weight_decay and self.filter_bias_and_bn: + skip = {} + if hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + decay_dict = { + param.name: not (len(param.shape) == 1 or + name.endswith(".bias") or name in skip) + for name, param in model.named_parameters() + if not 'teacher' in name + } + parameters = [ + param for param in model.parameters() + if 'teacher' not in param.name + ] + weight_decay = 0. + else: + parameters = model.parameters() + opt_args = dict( + learning_rate=self.learning_rate, weight_decay=self.weight_decay) + opt_args['parameters'] = parameters + if decay_dict is not None: + opt_args['apply_decay_param_fun'] = lambda n: decay_dict[n] + opt_args['epsilon'] = self.epsilon + opt_args['beta1'] = self.beta1 + opt_args['beta2'] = self.beta2 + if self.layerwise_decay and self.layerwise_decay < 1.0: + opt_args['layerwise_decay'] = self.layerwise_decay + name_dict = dict() + for n, p in model.named_parameters(): + name_dict[p.name] = n + opt_args['name_dict'] = name_dict + opt_args['n_layers'] = model.get_num_layers() + optimizer = self.AdamWDLImpl(**opt_args) + + return optimizer \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/README.md b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/README.md new file mode 100644 index 000000000..24093f4d0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/README.md @@ -0,0 +1,19 @@ +# 使用静态图模式训练 + +飞桨框架支持动态图模式和静态图模式,通常情况下使用动态图训练,即可满足大部分场景需求。飞桨经过多个版本的持续优化,动态图模型训练的性能已经可以和静态图媲美。如果在某些场景下确实需要使用静态图模式训练,我们推荐优先使用动转静训练功能,即仍然采用更易用的动态图方式构建模型,再转为静态图模式进行训练。同时,考虑到对静态图训练的兼容,PaddleClas 同样提供了静态图训练功能。 + +## 一、动转静训练 + +仅需在训练配置文件中设置参数 `Global.to_static` 为 `True`,同时通过参数 `Global.image_shape` 输入数据 shape 即可,如 `[3, 224, 224]`。 + +## 二、静态图训练 + +在 PaddleClas 中,静态图训练与动态图类似,同样使用配置文件的方式指定训练参数,训练入口脚本为 `ppcls/static/train.py`,以 ResNet50 模型为例,训练启动命令如下: + +```bash +export CUDA_VISIBLE_DEVICES="0,1,2,3" +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + ppcls/static/train.py \ + -c ./ppcls/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml +``` diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/program.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/program.py new file mode 100644 index 000000000..0e8d0990d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/program.py @@ -0,0 +1,445 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +import numpy as np + +from collections import OrderedDict + +import paddle +import paddle.nn.functional as F + +from paddle.distributed import fleet +from paddle.distributed.fleet import DistributedStrategy + +# from ppcls.optimizer import OptimizerBuilder +# from ppcls.optimizer.learning_rate import LearningRateBuilder + +from ppcls.arch import build_model +from ppcls.loss import build_loss +from ppcls.metric import build_metrics +from ppcls.optimizer import build_optimizer +from ppcls.optimizer import build_lr_scheduler + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger, profiler + + +def create_feeds(image_shape, use_mix=False, class_num=None, dtype="float32"): + """ + Create feeds as model input + + Args: + image_shape(list[int]): model input shape, such as [3, 224, 224] + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + class_num(int): the class number of network, required if use_mix + + Returns: + feeds(dict): dict of model input variables + """ + feeds = OrderedDict() + feeds['data'] = paddle.static.data( + name="data", shape=[None] + image_shape, dtype=dtype) + + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + feeds['target'] = paddle.static.data( + name="target", shape=[None, class_num], dtype="float32") + else: + feeds['label'] = paddle.static.data( + name="label", shape=[None, 1], dtype="int64") + + return feeds + + +def create_fetchs(out, + feeds, + architecture, + topk=5, + epsilon=None, + class_num=None, + use_mix=False, + config=None, + mode="Train"): + """ + Create fetchs as model outputs(included loss and measures), + will call create_loss and create_metric(if use_mix). + Args: + out(variable): model output variable + feeds(dict): dict of model input variables. + If use mix_up, it will not include label. + architecture(dict): architecture information, + name(such as ResNet50) is needed + topk(int): usually top5 + epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0 + class_num(int): the class number of network, required if use_mix + use_mix(bool): whether to use mix(include mixup, cutmix, fmix) + config(dict): model config + + Returns: + fetchs(dict): dict of model outputs(included loss and measures) + """ + fetchs = OrderedDict() + # build loss + if use_mix: + if class_num is None: + msg = "When use MixUp, CutMix and so on, you must set class_num." + logger.error(msg) + raise Exception(msg) + target = paddle.reshape(feeds['target'], [-1, class_num]) + else: + target = paddle.reshape(feeds['label'], [-1, 1]) + + loss_func = build_loss(config["Loss"][mode]) + loss_dict = loss_func(out, target) + + loss_out = loss_dict["loss"] + fetchs['loss'] = (loss_out, AverageMeter('loss', '7.4f', need_avg=True)) + + # build metric + if not use_mix: + metric_func = build_metrics(config["Metric"][mode]) + + metric_dict = metric_func(out, target) + + for key in metric_dict: + if mode != "Train" and paddle.distributed.get_world_size() > 1: + paddle.distributed.all_reduce( + metric_dict[key], op=paddle.distributed.ReduceOp.SUM) + metric_dict[key] = metric_dict[ + key] / paddle.distributed.get_world_size() + + fetchs[key] = (metric_dict[key], AverageMeter( + key, '7.4f', need_avg=True)) + + return fetchs + + +def create_optimizer(config, step_each_epoch): + # create learning_rate instance + optimizer, lr_sch = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], step_each_epoch) + return optimizer, lr_sch + + +def create_strategy(config): + """ + Create build strategy and exec strategy. + + Args: + config(dict): config + + Returns: + build_strategy: build strategy + exec_strategy: exec strategy + """ + build_strategy = paddle.static.BuildStrategy() + + fuse_op = True if 'AMP' in config and config['AMP'].get('use_amp', + True) else False + + fuse_bn_act_ops = config.get('fuse_bn_act_ops', fuse_op) + fuse_elewise_add_act_ops = config.get('fuse_elewise_add_act_ops', fuse_op) + fuse_bn_add_act_ops = config.get('fuse_bn_add_act_ops', fuse_op) + enable_addto = config.get('enable_addto', fuse_op) + + build_strategy.fuse_bn_act_ops = fuse_bn_act_ops + build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops + build_strategy.fuse_bn_add_act_ops = fuse_bn_add_act_ops + build_strategy.enable_addto = enable_addto + + return build_strategy + + +def dist_optimizer(config, optimizer): + """ + Create a distributed optimizer based on a normal optimizer + + Args: + config(dict): + optimizer(): a normal optimizer + + Returns: + optimizer: a distributed optimizer + """ + build_strategy = create_strategy(config) + + dist_strategy = DistributedStrategy() + dist_strategy.build_strategy = build_strategy + + dist_strategy.nccl_comm_num = 1 + dist_strategy.fuse_all_reduce_ops = True + dist_strategy.fuse_grad_size_in_MB = 16 + optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) + + return optimizer + + +def mixed_precision_optimizer(config, optimizer): + if 'AMP' in config and config['AMP'].get('use_amp', True): + amp_cfg = config.AMP if config.AMP else dict() + scale_loss = amp_cfg.get('scale_loss', 1.0) + use_dynamic_loss_scaling = amp_cfg.get('use_dynamic_loss_scaling', + False) + use_pure_fp16 = amp_cfg.get("level", "O1") == "O2" + optimizer = paddle.static.amp.decorate( + optimizer, + init_loss_scaling=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling, + use_pure_fp16=use_pure_fp16, + use_fp16_guard=True) + + return optimizer + + +def build(config, + main_prog, + startup_prog, + class_num=None, + step_each_epoch=100, + is_train=True, + is_distributed=True): + """ + Build a program using a model and an optimizer + 1. create feeds + 2. create a dataloader + 3. create a model + 4. create fetchs + 5. create an optimizer + + Args: + config(dict): config + main_prog(): main program + startup_prog(): startup program + class_num(int): the class number of network, required if use_mix + is_train(bool): train or eval + is_distributed(bool): whether to use distributed training method + + Returns: + dataloader(): a bridge between the model and the data + fetchs(dict): dict of model outputs(included loss and measures) + """ + with paddle.static.program_guard(main_prog, startup_prog): + with paddle.utils.unique_name.guard(): + mode = "Train" if is_train else "Eval" + use_mix = "batch_transform_ops" in config["DataLoader"][mode][ + "dataset"] + data_dtype = "float32" + if 'AMP' in config and config['AMP'].get( + 'use_amp', True) and config["AMP"]["level"] == 'O2': + data_dtype = "float16" + feeds = create_feeds( + config["Global"]["image_shape"], + use_mix, + class_num=class_num, + dtype=data_dtype) + + # build model + # data_format should be assigned in arch-dict + input_image_channel = config["Global"]["image_shape"][ + 0] # default as [3, 224, 224] + model = build_model(config) + out = model(feeds["data"]) + # end of build model + + fetchs = create_fetchs( + out, + feeds, + config["Arch"], + epsilon=config.get('ls_epsilon'), + class_num=class_num, + use_mix=use_mix, + config=config, + mode=mode) + lr_scheduler = None + optimizer = None + if is_train: + optimizer, lr_scheduler = build_optimizer( + config["Optimizer"], config["Global"]["epochs"], + step_each_epoch) + optimizer = mixed_precision_optimizer(config, optimizer) + if is_distributed: + optimizer = dist_optimizer(config, optimizer) + optimizer.minimize(fetchs['loss'][0]) + return fetchs, lr_scheduler, feeds, optimizer + + +def compile(config, program, loss_name=None, share_prog=None): + """ + Compile the program + + Args: + config(dict): config + program(): the program which is wrapped by + loss_name(str): loss name + share_prog(): the shared program, used for evaluation during training + + Returns: + compiled_program(): a compiled program + """ + build_strategy = create_strategy(config) + + compiled_program = paddle.static.CompiledProgram( + program, build_strategy=build_strategy) + + return compiled_program + + +total_step = 0 + + +def run(dataloader, + exe, + program, + feeds, + fetchs, + epoch=0, + mode='train', + config=None, + vdl_writer=None, + lr_scheduler=None, + profiler_options=None): + """ + Feed data to the model and fetch the measures and loss + + Args: + dataloader(paddle io dataloader): + exe(): + program(): + fetchs(dict): dict of measures and the loss + epoch(int): epoch of training or evaluation + model(str): log only + + Returns: + """ + fetch_list = [f[0] for f in fetchs.values()] + metric_dict = OrderedDict([("lr", AverageMeter( + 'lr', 'f', postfix=",", need_avg=False))]) + + for k in fetchs: + metric_dict[k] = fetchs[k][1] + + metric_dict["batch_time"] = AverageMeter( + 'batch_cost', '.5f', postfix=" s,") + metric_dict["reader_time"] = AverageMeter( + 'reader_cost', '.5f', postfix=" s,") + + for m in metric_dict.values(): + m.reset() + + use_dali = config["Global"].get('use_dali', False) + tic = time.time() + + if not use_dali: + dataloader = dataloader() + + idx = 0 + batch_size = None + while True: + # The DALI maybe raise RuntimeError for some particular images, such as ImageNet1k/n04418357_26036.JPEG + try: + batch = next(dataloader) + except StopIteration: + break + except RuntimeError: + logger.warning( + "Except RuntimeError when reading data from dataloader, try to read once again..." + ) + continue + except IndexError: + logger.warning( + "Except IndexError when reading data from dataloader, try to read once again..." + ) + continue + idx += 1 + # ignore the warmup iters + if idx == 5: + metric_dict["batch_time"].reset() + metric_dict["reader_time"].reset() + + metric_dict['reader_time'].update(time.time() - tic) + + profiler.add_profiler_step(profiler_options) + + batch_size = batch[0].shape()[0] + feed_dict = { + key.name: batch[idx] + for idx, key in enumerate(feeds.values()) + } + + metrics = exe.run(program=program, + feed=feed_dict, + fetch_list=fetch_list) + + for name, m in zip(fetchs.keys(), metrics): + metric_dict[name].update(np.mean(m), batch_size) + metric_dict["batch_time"].update(time.time() - tic) + if mode == "train": + metric_dict['lr'].update(lr_scheduler.get_lr()) + + fetchs_str = ' '.join([ + str(metric_dict[key].mean) + if "time" in key else str(metric_dict[key].value) + for key in metric_dict + ]) + ips_info = " ips: {:.5f} samples/sec.".format( + batch_size / metric_dict["batch_time"].avg) + fetchs_str += ips_info + + if lr_scheduler is not None: + lr_scheduler.step() + + if vdl_writer: + global total_step + logger.scaler('loss', metrics[0][0], total_step, vdl_writer) + total_step += 1 + if mode == 'eval': + if idx % config.get('print_interval', 10) == 0: + logger.info("{:s} step:{:<4d} {:s}".format(mode, idx, + fetchs_str)) + else: + epoch_str = "epoch:{:<3d}".format(epoch) + step_str = "{:s} step:{:<4d}".format(mode, idx) + max_mem_reserved_str = f"max_mem_reserved: {paddle.device.cuda.max_memory_reserved()}" + max_mem_allocated_str = f"max_mem_allocated: {paddle.device.cuda.max_memory_allocated()}" + if idx % config.get('print_interval', 10) == 0: + logger.info( + f"{epoch_str} {step_str} {fetchs_str} {max_mem_reserved_str} {max_mem_allocated_str}" + ) + + tic = time.time() + + end_str = ' '.join([str(m.mean) for m in metric_dict.values()] + + [metric_dict["batch_time"].total]) + ips_info = "ips: {:.5f} samples/sec.".format(batch_size / + metric_dict["batch_time"].avg) + if mode == 'eval': + logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) + else: + end_epoch_str = "END epoch:{:<3d}".format(epoch) + logger.info("{:s} {:s} {:s}".format(end_epoch_str, mode, end_str)) + if use_dali: + dataloader.reset() + + # return top1_acc in order to save the best model + if mode == 'eval': + return fetchs["top1"][1].avg diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/run_dali.sh b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/run_dali.sh new file mode 100644 index 000000000..5bf0ef4ca --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/run_dali.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + ppcls/static/train.py \ + -c ./ppcls/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/save_load.py new file mode 100644 index 000000000..5d124fcf7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/save_load.py @@ -0,0 +1,139 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import re +import shutil +import tempfile + +import paddle + +from ppcls.utils import logger + +__all__ = ['init_model', 'save_model'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def _load_state(path): + if os.path.exists(path + '.pdopt'): + # XXX another hack to ignore the optimizer state + tmp = tempfile.mkdtemp() + dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) + shutil.copy(path + '.pdparams', dst + '.pdparams') + state = paddle.static.load_program_state(dst) + shutil.rmtree(tmp) + else: + state = paddle.static.load_program_state(path) + return state + + +def load_params(exe, prog, path, ignore_params=None): + """ + Load model from the given path. + Args: + exe (paddle.static.Executor): The paddle.static.Executor object. + prog (paddle.static.Program): load weight to which Program object. + path (string): URL string or loca model path. + ignore_params (list): ignore variable to load when finetuning. + It can be specified by finetune_exclude_pretrained_params + and the usage can refer to the document + docs/advanced_tutorials/TRANSFER_LEARNING.md + """ + if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): + raise ValueError("Model pretrain path {} does not " + "exists.".format(path)) + + logger.info("Loading parameters from {}...".format(path)) + + ignore_set = set() + state = _load_state(path) + + # ignore the parameter which mismatch the shape + # between the model and pretrain weight. + all_var_shape = {} + for block in prog.blocks: + for param in block.all_parameters(): + all_var_shape[param.name] = param.shape + ignore_set.update([ + name for name, shape in all_var_shape.items() + if name in state and shape != state[name].shape + ]) + + if ignore_params: + all_var_names = [var.name for var in prog.list_vars()] + ignore_list = filter( + lambda var: any([re.match(name, var) for name in ignore_params]), + all_var_names) + ignore_set.update(list(ignore_list)) + + if len(ignore_set) > 0: + for k in ignore_set: + if k in state: + logger.warning( + 'variable {} is already excluded automatically'.format(k)) + del state[k] + + paddle.static.set_program_state(prog, state) + + +def init_model(config, program, exe): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints: + paddle.static.load(program, checkpoints, exe) + logger.info("Finish initing model from {}".format(checkpoints)) + return + + pretrained_model = config.get('pretrained_model') + if pretrained_model: + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + for pretrain in pretrained_model: + load_params(exe, program, pretrain) + logger.info("Finish initing model from {}".format(pretrained_model)) + + +def save_model(program, model_path, epoch_id, prefix='ppcls'): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + model_path = os.path.join(model_path, str(epoch_id)) + _mkdir_if_not_exist(model_path) + model_prefix = os.path.join(model_path, prefix) + paddle.static.save(program, model_prefix) + logger.info("Already save model in {}".format(model_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/train.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/train.py new file mode 100644 index 000000000..4f85fe16f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/static/train.py @@ -0,0 +1,227 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../'))) + +import numpy as np +import random + +import paddle +from paddle.distributed import fleet +from visualdl import LogWriter + +from ppcls.data import build_dataloader +from ppcls.utils.config import get_config, print_config +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from ppcls.static.save_load import init_model, save_model +from ppcls.static import program + + +def parse_args(): + parser = argparse.ArgumentParser("PaddleClas train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/ResNet/ResNet50.yaml', + help='config file path') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + args = parser.parse_args() + return args + + +def main(args): + """ + all the config of training paradigm should be in config["Global"] + """ + config = get_config(args.config, overrides=args.override, show=False) + + # set seed + seed = config["Global"].get("seed", False) + if seed or seed == 0: + assert isinstance(seed, int), "The 'seed' must be a integer!" + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + global_config = config["Global"] + + mode = "train" + + log_file = os.path.join(global_config['output_dir'], + config["Arch"]["name"], f"{mode}.log") + log_ranks = config["Global"].get("log_ranks", "0") + init_logger(log_file=log_file, log_ranks=log_ranks) + print_config(config) + + if global_config.get("is_distributed", True): + fleet.init(is_collective=True) + + # assign the device + assert global_config["device"] in [ + "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps" + ] + device = paddle.set_device(global_config["device"]) + + # amp related config + amp_config = config.get("AMP", None) + use_amp = True if amp_config and amp_config.get("use_amp", True) else False + if use_amp: + AMP_RELATED_FLAGS_SETTING = { + 'FLAGS_cudnn_exhaustive_search': 1, + 'FLAGS_conv_workspace_size_limit': 1500, + 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, + 'FLAGS_max_inplace_grad_add': 8, + } + os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) + + # visualDL + vdl_writer = None + if global_config["use_visualdl"]: + vdl_dir = global_config["output_dir"] + vdl_writer = LogWriter(vdl_dir) + + # build dataloader + eval_dataloader = None + use_dali = global_config.get('use_dali', False) + + class_num = config["Arch"].get("class_num", None) + config["DataLoader"].update({"class_num": class_num}) + train_dataloader = build_dataloader( + config["DataLoader"], "Train", device=device, use_dali=use_dali) + if global_config["eval_during_train"]: + eval_dataloader = build_dataloader( + config["DataLoader"], "Eval", device=device, use_dali=use_dali) + + step_each_epoch = len(train_dataloader) + + # startup_prog is used to do some parameter init work, + # and train prog is used to hold the network + startup_prog = paddle.static.Program() + train_prog = paddle.static.Program() + + best_top1_acc = 0.0 # best top1 acc record + + train_fetchs, lr_scheduler, train_feeds, optimizer = program.build( + config, + train_prog, + startup_prog, + class_num, + step_each_epoch=step_each_epoch, + is_train=True, + is_distributed=global_config.get("is_distributed", True)) + + if global_config["eval_during_train"]: + eval_prog = paddle.static.Program() + eval_fetchs, _, eval_feeds, _ = program.build( + config, + eval_prog, + startup_prog, + is_train=False, + is_distributed=global_config.get("is_distributed", True)) + # clone to prune some content which is irrelevant in eval_prog + eval_prog = eval_prog.clone(for_test=True) + + # create the "Executor" with the statement of which device + exe = paddle.static.Executor(device) + # Parameter initialization + exe.run(startup_prog) + # load pretrained models or checkpoints + init_model(global_config, train_prog, exe) + + if use_amp: + # for AMP O2 + if config["AMP"].get("level", "O1").upper() == "O2": + use_fp16_test = True + msg = "Only support FP16 evaluation when AMP O2 is enabled." + logger.warning(msg) + # for AMP O1 + else: + use_fp16_test = config["AMP"].get("use_fp16_test", False) + + optimizer.amp_init( + device, + scope=paddle.static.global_scope(), + test_program=eval_prog + if global_config["eval_during_train"] else None, + use_fp16_test=use_fp16_test) + + if not global_config.get("is_distributed", True): + compiled_train_prog = program.compile( + config, train_prog, loss_name=train_fetchs["loss"][0].name) + else: + compiled_train_prog = train_prog + + if eval_dataloader is not None: + if not global_config.get("is_distributed", True): + compiled_eval_prog = program.compile(config, eval_prog) + else: + compiled_eval_prog = eval_prog + + for epoch_id in range(global_config["epochs"]): + # 1. train with train dataset + program.run(train_dataloader, exe, compiled_train_prog, train_feeds, + train_fetchs, epoch_id, 'train', config, vdl_writer, + lr_scheduler, args.profiler_options) + # 2. evaluate with eval dataset + if global_config["eval_during_train"] and epoch_id % global_config[ + "eval_interval"] == 0: + top1_acc = program.run(eval_dataloader, exe, compiled_eval_prog, + eval_feeds, eval_fetchs, epoch_id, "eval", + config) + if top1_acc > best_top1_acc: + best_top1_acc = top1_acc + message = "The best top1 acc {:.5f}, in epoch: {:d}".format( + best_top1_acc, epoch_id) + logger.info(message) + if epoch_id % global_config["save_interval"] == 0: + + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, "best_model") + + # 3. save the persistable model + if epoch_id % global_config["save_interval"] == 0: + model_path = os.path.join(global_config["output_dir"], + config["Arch"]["name"]) + save_model(train_prog, model_path, epoch_id) + + +if __name__ == '__main__': + paddle.enable_static() + args = parse_args() + main(args) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/COCO2017_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/COCO2017_label_list.txt new file mode 100644 index 000000000..51dba82b4 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/COCO2017_label_list.txt @@ -0,0 +1,80 @@ +0 person +1 bicycle +2 car +3 motorcycle +4 airplane +5 bus +6 train +7 truck +8 boat +9 traffic light +10 fire hydrant +11 stop sign +12 parking meter +13 bench +14 bird +15 cat +16 dog +17 horse +18 sheep +19 cow +20 elephant +21 bear +22 zebra +23 giraffe +24 backpack +25 umbrella +26 handbag +27 tie +28 suitcase +29 frisbee +30 skis +31 snowboard +32 sports ball +33 kite +34 baseball bat +35 baseball glove +36 skateboard +37 surfboard +38 tennis racket +39 bottle +40 wine glass +41 cup +42 fork +43 knife +44 spoon +45 bowl +46 banana +47 apple +48 sandwich +49 orange +50 broccoli +51 carrot +52 hot dog +53 pizza +54 donut +55 cake +56 chair +57 couch +58 potted plant +59 bed +60 dining table +61 toilet +62 tv +63 laptop +64 mouse +65 remote +66 keyboard +67 cell phone +68 microwave +69 oven +70 toaster +71 sink +72 refrigerator +73 book +74 clock +75 vase +76 scissors +77 teddy bear +78 hair drier +79 toothbrush \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/NUS-WIDE-SCENE_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/NUS-WIDE-SCENE_label_list.txt new file mode 100644 index 000000000..f4ee66ba7 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/NUS-WIDE-SCENE_label_list.txt @@ -0,0 +1,33 @@ +0 airport +1 beach +2 bridge +3 buildings +4 castle +5 cityscape +6 clouds +7 frost +8 garden +9 glacier +10 grass +11 harbor +12 house +13 lake +14 moon +15 mountain +16 nighttime +17 ocean +18 plants +19 railroad +20 rainbow +21 reflection +22 road +23 sky +24 snow +25 street +26 sunset +27 temple +28 town +29 valley +30 water +31 waterfall +32 window diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/image_orientation_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/image_orientation_label_list.txt new file mode 100644 index 000000000..f6c8ef322 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/image_orientation_label_list.txt @@ -0,0 +1,4 @@ +0 0° +1 90° +2 180° +3 270° \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/language_classification_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/language_classification_label_list.txt new file mode 100644 index 000000000..8d9ee9dd8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/language_classification_label_list.txt @@ -0,0 +1,10 @@ +0 arabic +1 chinese_cht +2 cyrillic +3 devanagari +4 japan +5 ka +6 korean +7 ta +8 te +9 latin \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/text_image_orientation_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/text_image_orientation_label_list.txt new file mode 100644 index 000000000..051944a92 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/text_image_orientation_label_list.txt @@ -0,0 +1,4 @@ +0 0 +1 90 +2 180 +3 270 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/textline_orientation_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/textline_orientation_label_list.txt new file mode 100644 index 000000000..207b70c6b --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/textline_orientation_label_list.txt @@ -0,0 +1,2 @@ +0 0_degree +1 180_degree diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/traffic_sign_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/traffic_sign_label_list.txt new file mode 100644 index 000000000..c1e41d539 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/PULC_label_list/traffic_sign_label_list.txt @@ -0,0 +1,232 @@ +0 pl80 +1 w9 +2 p6 +3 ph4.2 +4 i8 +5 w14 +6 w33 +7 pa13 +8 im +9 w58 +10 pl90 +11 il70 +12 p5 +13 pm55 +14 pl60 +15 ip +16 p11 +17 pdd +18 wc +19 i2r +20 w30 +21 pmr +22 p23 +23 pl15 +24 pm10 +25 pss +26 w1 +27 p4 +28 w38 +29 w50 +30 w34 +31 pw3.5 +32 iz +33 w39 +34 w11 +35 p1n +36 pr70 +37 pd +38 pnl +39 pg +40 ph5.3 +41 w66 +42 il80 +43 pb +44 pbm +45 pm5 +46 w24 +47 w67 +48 w49 +49 pm40 +50 ph4 +51 w45 +52 i4 +53 w37 +54 ph2.6 +55 pl70 +56 ph5.5 +57 i14 +58 i11 +59 p7 +60 p29 +61 pne +62 pr60 +63 pm13 +64 ph4.5 +65 p12 +66 p3 +67 w40 +68 pl5 +69 w13 +70 pr10 +71 p14 +72 i4l +73 pr30 +74 pw4.2 +75 w16 +76 p17 +77 ph3 +78 i9 +79 w15 +80 w35 +81 pa8 +82 pt +83 pr45 +84 w17 +85 pl30 +86 pcs +87 pctl +88 pr50 +89 ph4.4 +90 pm46 +91 pm35 +92 i15 +93 pa12 +94 pclr +95 i1 +96 pcd +97 pbp +98 pcr +99 w28 +100 ps +101 pm8 +102 w18 +103 w2 +104 w52 +105 ph2.9 +106 ph1.8 +107 pe +108 p20 +109 w36 +110 p10 +111 pn +112 pa14 +113 w54 +114 ph3.2 +115 p2 +116 ph2.5 +117 w62 +118 w55 +119 pw3 +120 pw4.5 +121 i12 +122 ph4.3 +123 phclr +124 i10 +125 pr5 +126 i13 +127 w10 +128 p26 +129 w26 +130 p8 +131 w5 +132 w42 +133 il50 +134 p13 +135 pr40 +136 p25 +137 w41 +138 pl20 +139 ph4.8 +140 pnlc +141 ph3.3 +142 w29 +143 ph2.1 +144 w53 +145 pm30 +146 p24 +147 p21 +148 pl40 +149 w27 +150 pmb +151 pc +152 i6 +153 pr20 +154 p18 +155 ph3.8 +156 pm50 +157 pm25 +158 i2 +159 w22 +160 w47 +161 w56 +162 pl120 +163 ph2.8 +164 i7 +165 w12 +166 pm1.5 +167 pm2.5 +168 w32 +169 pm15 +170 ph5 +171 w19 +172 pw3.2 +173 pw2.5 +174 pl10 +175 il60 +176 w57 +177 w48 +178 w60 +179 pl100 +180 pr80 +181 p16 +182 pl110 +183 w59 +184 w64 +185 w20 +186 ph2 +187 p9 +188 il100 +189 w31 +190 w65 +191 ph2.4 +192 pr100 +193 p19 +194 ph3.5 +195 pa10 +196 pcl +197 pl35 +198 p15 +199 w7 +200 pa6 +201 phcs +202 w43 +203 p28 +204 w6 +205 w3 +206 w25 +207 pl25 +208 il110 +209 p1 +210 w46 +211 pn-2 +212 w51 +213 w44 +214 w63 +215 w23 +216 pm20 +217 w8 +218 pmblr +219 w4 +220 i5 +221 il90 +222 w21 +223 p27 +224 pl50 +225 pl65 +226 w61 +227 ph2.2 +228 pm2 +229 i3 +230 pa18 +231 pw4 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/__init__.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/__init__.py new file mode 100644 index 000000000..294fbb125 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import logger +from . import metrics +from . import misc +from . import model_zoo + +from .config import get_config, convert_to_dict +from .dist_utils import all_gather +from .metrics import accuracy_score +from .metrics import hamming_distance +from .metrics import mean_average_precision +from .metrics import multi_hot_encode +from .metrics import precision_recall_fscore +from .misc import AverageMeter +from .save_load import init_model, save_model +from .save_result import save_predict_result diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/amp.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/amp.py new file mode 100644 index 000000000..7ada7a404 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/amp.py @@ -0,0 +1,61 @@ +from functools import partial +import contextlib +import paddle + + +class AutoCast: + def __init__(self, + use_amp=False, + amp_level="O1", + use_promote=False, + amp_eval=False): + self.use_amp = use_amp + self.amp_eval = amp_eval + + if self.use_amp: + # compatible with paddle 2.5 and older version + paddle_version = paddle.__version__[:3] + # paddle version >= 2.5.0 or develop + if paddle_version in ["2.5", "0.0"]: + self.cast_context = partial( + paddle.amp.auto_cast, + level=amp_level, + use_promote=use_promote) + # paddle version <= 2.4.x and not develop + else: + self.cast_context = partial( + paddle.amp.auto_cast, level=amp_level) + + def __call__(self, is_eval=False): + if self.use_amp: + # not is_eval: cast for all training + # is_eval and self.amp_eval: cast for evaluation only when amp_eval is True + if not is_eval or (is_eval and self.amp_eval): + return self.cast_context() + + return contextlib.nullcontext() + + +def build_scaler(use_amp=False, scale_loss=1.0, + use_dynamic_loss_scaling=False): + class Foo: + def __init__(self): + pass + + def scale(self, loss): + return loss + + def step(self, optimizer): + optimizer.step() + + def update(self): + return + + def minimize(self, optimizer, loss): + optimizer.step() + + if use_amp: + return paddle.amp.GradScaler( + init_loss_scaling=scale_loss, + use_dynamic_loss_scaling=use_dynamic_loss_scaling) + return Foo() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/check.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/check.py new file mode 100644 index 000000000..8b743335e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/check.py @@ -0,0 +1,149 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +import paddle +from paddle import is_compiled_with_cuda + +from ..arch.utils import get_architectures, similar_architectures, get_blacklist_model_in_static_mode +from . import logger + + +def check_version(): + """ + Log error and exit when the installed version of paddlepaddle is + not satisfied. + """ + err = "PaddlePaddle version 1.8.0 or higher is required, " \ + "or a suitable develop version is satisfied as well. \n" \ + "Please make sure the version is good with your code." + try: + pass + # paddle.utils.require_version('0.0.0') + except Exception: + logger.error(err) + sys.exit(1) + + +def check_gpu(): + """ + Log error and exit when using paddlepaddle cpu version. + """ + err = "You are using paddlepaddle cpu version! Please try to " \ + "install paddlepaddle-gpu to run model on GPU." + + try: + assert is_compiled_with_cuda() + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_architecture(architecture): + """ + check architecture and recommend similar architectures + """ + assert isinstance(architecture, dict), \ + ("the type of architecture({}) should be dict". format(architecture)) + assert "name" in architecture, \ + ("name must be in the architecture keys, just contains: {}". format( + architecture.keys())) + + similar_names = similar_architectures(architecture["name"], + get_architectures()) + model_list = ', '.join(similar_names) + err = "Architecture [{}] is not exist! Maybe you want: [{}]" \ + "".format(architecture["name"], model_list) + try: + assert architecture["name"] in similar_names + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_model_with_running_mode(architecture): + """ + check whether the model is consistent with the operating mode + """ + # some model are not supported in the static mode + blacklist = get_blacklist_model_in_static_mode() + if not paddle.in_dynamic_mode() and architecture["name"] in blacklist: + logger.error("Model: {} is not supported in the staic mode.".format( + architecture["name"])) + sys.exit(1) + return + + +def check_mix(architecture, use_mix=False): + """ + check mix parameter + """ + err = "Cannot use mix processing in GoogLeNet, " \ + "please set use_mix = False." + try: + if architecture["name"] == "GoogLeNet": + assert use_mix is not True + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_classes_num(classes_num): + """ + check classes_num + """ + err = "classes_num({}) should be a positive integer" \ + "and larger than 1".format(classes_num) + try: + assert isinstance(classes_num, int) + assert classes_num > 1 + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_data_dir(path): + """ + check cata_dir + """ + err = "Data path is not exist, please given a right path" \ + "".format(path) + try: + assert os.isdir(path) + except AssertionError: + logger.error(err) + sys.exit(1) + + +def check_function_params(config, key): + """ + check specify config + """ + k_config = config.get(key) + assert k_config is not None, \ + ('{} is required in config'.format(key)) + + assert k_config.get('function'), \ + ('function is required {} config'.format(key)) + params = k_config.get('params') + assert params is not None, \ + ('params is required in {} config'.format(key)) + assert isinstance(params, dict), \ + ('the params in {} config should be a dict'.format(key)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/config.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/config.py new file mode 100644 index 000000000..35fe7d428 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/config.py @@ -0,0 +1,326 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +import argparse +import yaml +from . import logger +from . import check +from collections import OrderedDict + +__all__ = ['get_config', 'convert_to_dict'] + + +def convert_to_dict(obj): + if isinstance(obj, dict): + return {k: convert_to_dict(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_to_dict(i) for i in obj] + else: + return obj + + +class AttrDict(dict): + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self.__dict__: + self.__dict__[key] = value + else: + self[key] = value + + def __deepcopy__(self, content): + return AttrDict(copy.deepcopy(dict(self))) + + +def create_attr_dict(yaml_config): + from ast import literal_eval + for key, value in yaml_config.items(): + if type(value) is dict: + yaml_config[key] = value = AttrDict(value) + if isinstance(value, str): + try: + value = literal_eval(value) + except BaseException: + pass + if isinstance(value, AttrDict): + create_attr_dict(yaml_config[key]) + else: + yaml_config[key] = value + + +def parse_config(cfg_file): + """Load a config file into AttrDict""" + with open(cfg_file, 'r') as fopen: + yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) + create_attr_dict(yaml_config) + return yaml_config + + +def print_dict(d, delimiter=0): + """ + Recursively visualize a dict and + indenting acrrording by the relationship of keys. + """ + placeholder = "-" * 60 + for k, v in d.items(): + if isinstance(v, dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + print_dict(v, delimiter + 4) + elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): + logger.info("{}{} : ".format(delimiter * " ", k)) + for value in v: + print_dict(value, delimiter + 4) + else: + logger.info("{}{} : {}".format(delimiter * " ", k, v)) + + if k[0].isupper() and delimiter == 0: + logger.info(placeholder) + + +def print_config(config): + """ + visualize configs + Arguments: + config: configs + """ + logger.advertise() + print_dict(config) + + +def check_config(config): + """ + Check config + """ + check.check_version() + use_gpu = config.get('use_gpu', True) + if use_gpu: + check.check_gpu() + architecture = config.get('ARCHITECTURE') + #check.check_architecture(architecture) + use_mix = config.get('use_mix', False) + check.check_mix(architecture, use_mix) + classes_num = config.get('classes_num') + check.check_classes_num(classes_num) + mode = config.get('mode', 'train') + if mode.lower() == 'train': + check.check_function_params(config, 'LEARNING_RATE') + check.check_function_params(config, 'OPTIMIZER') + + +def override(dl, ks, v): + """ + Recursively replace dict of list + Args: + dl(dict or list): dict or list to be replaced + ks(list): list of keys + v(str): value to be replaced + """ + + def str2num(v): + try: + return eval(v) + except Exception: + return v + + assert isinstance(dl, (list, dict)), ("{} should be a list or a dict") + assert len(ks) > 0, ('lenght of keys should larger than 0') + if isinstance(dl, list): + k = str2num(ks[0]) + if len(ks) == 1: + assert k < len(dl), ('index({}) out of range({})'.format(k, dl)) + dl[k] = str2num(v) + else: + override(dl[k], ks[1:], v) + else: + if len(ks) == 1: + # assert ks[0] in dl, ('{} is not exist in {}'.format(ks[0], dl)) + if not ks[0] in dl: + print('A new field ({}) detected!'.format(ks[0], dl)) + dl[ks[0]] = str2num(v) + else: + if ks[0] not in dl.keys(): + dl[ks[0]] = {} + print("A new Series field ({}) detected!".format(ks[0], dl)) + override(dl[ks[0]], ks[1:], v) + + +def override_config(config, options=None): + """ + Recursively override the config + Args: + config(dict): dict to be replaced + options(list): list of pairs(key0.key1.idx.key2=value) + such as: [ + 'topk=2', + 'VALID.transforms.1.ResizeImage.resize_short=300' + ] + Returns: + config(dict): replaced config + """ + if options is not None: + for opt in options: + assert isinstance(opt, str), ( + "option({}) should be a str".format(opt)) + assert "=" in opt, ( + "option({}) should contain a =" + "to distinguish between key and value".format(opt)) + pair = opt.split('=') + assert len(pair) == 2, ("there can be only a = in the option") + key, value = pair + keys = key.split('.') + override(config, keys, value) + return config + + +def get_config(fname, overrides=None, show=False): + """ + Read config from file + """ + assert os.path.exists(fname), ('config file({}) is not exist'.format(fname)) + config = parse_config(fname) + override_config(config, overrides) + if show: + print_config(config) + # check_config(config) + return config + + +def parse_args(): + parser = argparse.ArgumentParser("generic-image-rec train script") + parser.add_argument( + '-c', + '--config', + type=str, + default='configs/config.yaml', + help='config file path') + parser.add_argument( + '-o', + '--override', + action='append', + default=[], + help='config options to be overridden') + parser.add_argument( + '-p', + '--profiler_options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + args = parser.parse_args() + return args + + +def represent_dictionary_order(self, dict_data): + return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items()) + + +def setup_orderdict(): + yaml.add_representer(OrderedDict, represent_dictionary_order) + + +def dump_infer_config(inference_config, path): + setup_orderdict() + infer_cfg = OrderedDict() + config = copy.deepcopy(inference_config) + if config["Global"].get("pdx_model_name", None): + infer_cfg["Global"] = {"model_name": config["Global"]["pdx_model_name"]} + if config.get("Infer"): + transforms = config["Infer"]["transforms"] + elif config["DataLoader"]["Eval"].get("Query"): + transforms = config["DataLoader"]["Eval"]["Query"]["dataset"][ + "transform_ops"] + transforms.append({"ToCHWImage": None}) + elif config["DataLoader"]["Eval"].get("dataset"): + transforms = config["DataLoader"]["Eval"]["dataset"]["transform_ops"] + transforms.append({"ToCHWImage": None}) + else: + logger.error("This config does not support dump transform config!") + transform = next((item for item in transforms if 'CropImage' in item), None) + if transform: + dynamic_shapes = transform["CropImage"]["size"] + else: + transform = next((item for item in transforms + if 'ResizeImage' in item), None) + if transform: + if isinstance(transform["ResizeImage"]["size"], list): + dynamic_shapes = transform["ResizeImage"]["size"][0] + elif isinstance(transform["ResizeImage"]["size"], int): + dynamic_shapes = transform["ResizeImage"]["size"] + else: + raise ValueError( + "ResizeImage size must be either a list or an int.") + else: + raise ValueError("No valid transform found.") + # Configuration required config for high-performance inference. + if config["Global"].get("hpi_config_path", None): + hpi_config = convert_to_dict( + parse_config(config["Global"]["hpi_config_path"])) + if hpi_config["Hpi"]["backend_config"].get("paddle_tensorrt", None): + hpi_config["Hpi"]["backend_config"]["paddle_tensorrt"][ + "dynamic_shapes"]["x"] = [[ + 1, 3, dynamic_shapes, dynamic_shapes + ] for i in range(3)] + hpi_config["Hpi"]["backend_config"]["paddle_tensorrt"][ + "max_batch_size"] = 1 + if hpi_config["Hpi"]["backend_config"].get("tensorrt", None): + hpi_config["Hpi"]["backend_config"]["tensorrt"]["dynamic_shapes"][ + "x"] = [[1, 3, dynamic_shapes, dynamic_shapes] + for i in range(3)] + hpi_config["Hpi"]["backend_config"]["tensorrt"][ + "max_batch_size"] = 1 + infer_cfg["Hpi"] = hpi_config["Hpi"] + for transform in transforms: + if "NormalizeImage" in transform: + transform["NormalizeImage"]["channel_num"] = 3 + scale_str = transform["NormalizeImage"]["scale"] + numerator, denominator = scale_str.split('/') + numerator, denominator = float(numerator), float(denominator) + transform["NormalizeImage"]["scale"] = float(numerator / + denominator) + infer_cfg["PreProcess"] = { + "transform_ops": [ + infer_preprocess for infer_preprocess in transforms + if "DecodeImage" not in infer_preprocess + ] + } + if config.get("Infer"): + postprocess_dict = config["Infer"]["PostProcess"] + + with open(postprocess_dict["class_id_map_file"], 'r') as f: + label_id_maps = f.readlines() + label_names = [] + for line in label_id_maps: + line = line.strip().split(' ', 1) + label_names.append(line[1:][0]) + + postprocess_name = postprocess_dict.get("name", None) + postprocess_dict.pop("class_id_map_file") + postprocess_dict.pop("name") + dic = OrderedDict() + for item in postprocess_dict.items(): + dic[item[0]] = item[1] + dic['label_list'] = label_names + + if postprocess_name: + infer_cfg["PostProcess"] = {postprocess_name: dic} + else: + raise ValueError("PostProcess name is not specified") + else: + infer_cfg["PostProcess"] = {"NormalizeFeatures": None} + with open(path, 'w') as f: + yaml.dump(infer_cfg, f) + logger.info("Export inference config file to {}".format(os.path.join(path))) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_cls_trainval_lists.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_cls_trainval_lists.py new file mode 100644 index 000000000..11aa32f10 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_cls_trainval_lists.py @@ -0,0 +1,111 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import random +import string + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--dataset_path', type=str, default='./data') + parser.add_argument('--save_img_list_path', type=str, default='train.txt') + parser.add_argument( + '--train', action='store_true', help='Create train list.') + parser.add_argument('--val', action='store_true', help='Create val list.') + + args = parser.parse_args() + return args + + +def parse_class_id_map(class_id_map_file): + class_id_map = {} + with open(class_id_map_file, "r") as f: + lines = f.readlines() + for line in lines: + partition = line.split("\n")[0].partition(" ") + class_id_map[str(partition[-1])] = int(partition[0]) + return class_id_map + + +def main(args): + img_list = [] + label_list = [] + img_end = ['jpg', 'JPG', 'png', 'PNG', 'jpeg', 'JPEG', 'bmp'] + if args.dataset_path[-1] == "/": + args.dataset_path = args.dataset_path[:-1] + + if not os.path.exists(args.dataset_path): + raise Exception(f"The data path {args.dataset_path} not exists.") + else: + label_name_list = [ + label for label in os.listdir(args.dataset_path) + if os.path.isdir(os.path.join(args.dataset_path, label)) + ] + + if not os.path.exists( + os.path.join(os.path.dirname(args.dataset_path), + 'label.txt')) and args.val: + raise Exception( + 'The label file is not exist. Please set "--train" first.') + + for index, label_name in enumerate(label_name_list): + for root, dirs, files in os.walk( + os.path.join(args.dataset_path, label_name)): + for single_file in files: + if single_file.split('.')[-1] in img_end: + img_path = os.path.relpath( + os.path.join(root, single_file), + os.path.dirname(args.dataset_path)) + if args.val: + class_id_map = parse_class_id_map( + os.path.join( + os.path.dirname(args.dataset_path), + 'label.txt')) + img_list.append( + f'{img_path} {class_id_map[label_name]}') + else: + img_list.append(f'{img_path} {index}') + else: + print( + f'WARNING: File {os.path.join(root, single_file)} end with {single_file.split(".")[-1]} is not supported.' + ) + label_list.append(f'{index} {label_name}') + + if len(img_list) == 0: + raise Exception(f"Not found any images file in {args.dataset_path}.") + + with open( + os.path.join( + os.path.dirname(args.dataset_path), args.save_img_list_path), + 'w') as f: + f.write('\n'.join(img_list)) + print( + f'Already save {args.save_img_list_path} in {os.path.join(os.path.dirname(args.dataset_path), args.save_img_list_path)}.' + ) + + if not args.val: + with open( + os.path.join(os.path.dirname(args.dataset_path), 'label.txt'), + 'w') as f: + f.write('\n'.join(label_list)) + print( + f'Already save label.txt in {os.path.join(os.path.dirname(args.dataset_path), "label.txt")}.' + ) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_coco_multilabel_lists.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_coco_multilabel_lists.py new file mode 100644 index 000000000..3082b4a6c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/create_coco_multilabel_lists.py @@ -0,0 +1,126 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from ppcls.utils import logger +from ppcls.utils.logger import init_logger +from pycocotools.coco import COCO +from tqdm import tqdm + +init_logger() + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '--dataset_dir', + required=True, + help='root directory for dataset') + parser.add_argument( + '--image_dir', + required=True, + help='directory for images') + parser.add_argument( + '--anno_path', + required=True, + help='coco annotation file path') + parser.add_argument( + '--save_name', + default=None, + help='will same as anno_path if got None') + parser.add_argument( + '--output_dir', + default=None, + help='output directory, and will same as ' + 'dataset_dir if got None') + parser.add_argument( + '--save_label_name', + action='store_true', + help='save label name file') + + args = parser.parse_args() + if args.output_dir is None: + args.output_dir = args.dataset_dir + else: + os.makedirs(args.dataset_dir, exist_ok=True) + if args.save_name is None: + args.save_name = os.path.splitext(os.path.basename(args.anno_path))[0] + + image_dir = os.path.join(args.dataset_dir, args.image_dir) + anno_path = os.path.join(args.dataset_dir, args.anno_path) + assert os.path.exists(image_dir) and os.path.exists(anno_path), \ + ValueError("The dataset is not Found or " + "the folder structure is non-conformance.") + coco = COCO(anno_path) + cat_id_map = { + old_cat_id: new_cat_id + for new_cat_id, old_cat_id in enumerate(coco.getCatIds()) + } + num_classes = len(list(cat_id_map.keys())) + + assert 'annotations' in coco.dataset, \ + 'Annotation file: {} does not contains ground truth!!!'.format(anno_path) + + save_path = os.path.join(args.dataset_dir, args.save_name + '.txt') + logger.info("Start converting {}:".format(anno_path)) + with open(save_path, 'w') as fp: + lines = [] + for img_id in tqdm(sorted(coco.getImgIds())): + img_info = coco.loadImgs([img_id])[0] + img_filename = img_info['file_name'] + img_w = img_info['width'] + img_h = img_info['height'] + + img_filepath = os.path.join(image_dir, img_filename) + if not os.path.exists(img_filepath): + logger.warning('Illegal image file: {}, ' + 'and it will be ignored'.format(img_filepath)) + continue + + if img_w < 0 or img_h < 0: + logger.warning( + 'Illegal width: {} or height: {} in annotation, ' + 'and im_id: {} will be ignored'.format(img_w, img_h, img_id)) + continue + + ins_anno_ids = coco.getAnnIds(imgIds=[img_id]) + instances = coco.loadAnns(ins_anno_ids) + + label = [0] * num_classes + for instance in instances: + label[cat_id_map[instance['category_id']]] = 1 + lines.append(img_filename + '\t' + ','.join(map(str, label))) + + fp.write('\n'.join(lines)) + fp.close() + logger.info("Conversion completed, save to {}:".format(save_path)) + + if args.save_label_name: + label_txt_save_name = os.path.basename( + os.path.abspath(args.dataset_dir)) + '_labels.txt' + label_txt_save_path = os.path.join(args.dataset_dir, label_txt_save_name) + with open(label_txt_save_path, 'w') as fp: + label_name_list = [] + for cat in coco.cats.values(): + label_name_list.append(cat['name']) + fp.write('\n'.join(label_name_list)) + fp.close() + logger.info("Save label names to {}.".format(label_txt_save_path)) + + +if __name__ == '__main__': + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/dist_utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/dist_utils.py new file mode 100644 index 000000000..6b7d889d6 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/dist_utils.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Union + +import paddle + + +def all_gather(tensor: paddle.Tensor, concat: bool=True, + axis: int=0) -> Union[paddle.Tensor, List[paddle.Tensor]]: + """Gather tensor from all devices, concatenate them along given axis if specified. + + Args: + tensor (paddle.Tensor): Tensor to be gathered from all GPUs. + concat (bool, optional): Whether to concatenate gathered Tensors. Defaults to True. + axis (int, optional): Axis which concatenated along. Defaults to 0. + + Returns: + Union[paddle.Tensor, List[paddle.Tensor]]: Gathered Tensors + """ + result = [] + paddle.distributed.all_gather(result, tensor) + if concat: + return paddle.concat(result, axis) + return result diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/download.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/download.py new file mode 100644 index 000000000..decad654a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/download.py @@ -0,0 +1,304 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import os.path as osp +import shutil +import requests +import hashlib +import tarfile +import zipfile +import time +from collections import OrderedDict +from tqdm import tqdm + +from . import logger + +__all__ = ['get_weights_path_from_url'] + +WEIGHTS_HOME = osp.expanduser("~/.paddleclas/weights") + +DOWNLOAD_RETRY_LIMIT = 3 + + +def is_url(path): + """ + Whether path is URL. + Args: + path (string): URL string or not. + """ + return path.startswith('http://') or path.startswith('https://') + + +def get_weights_path_from_url(url, md5sum=None): + """Get weights path from WEIGHT_HOME, if not exists, + download it from url. + + Args: + url (str): download url + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded weights. + + Examples: + .. code-block:: python + + from paddle.utils.download import get_weights_path_from_url + + resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams' + local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url) + + """ + path = get_path_from_url(url, WEIGHTS_HOME, md5sum) + return path + + +def _map_path(url, root_dir): + # parse path after download under root_dir + fname = osp.split(url)[-1] + fpath = fname + return osp.join(root_dir, fpath) + + +def get_path_from_url(url, + root_dir, + md5sum=None, + check_exist=True, + decompress=True): + """ Download from given url to root_dir. + if file or directory specified by url is exists under + root_dir, return the path directly, otherwise download + from url and decompress it, return the path. + + Args: + url (str): download url + root_dir (str): root dir for downloading, it should be + WEIGHTS_HOME or DATASET_HOME + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded models & weights & datasets. + """ + + from paddle.distributed import ParallelEnv + + assert is_url(url), "downloading from {} not a url".format(url) + # parse path after download to decompress under root_dir + fullpath = _map_path(url, root_dir) + # Mainly used to solve the problem of downloading data from different + # machines in the case of multiple machines. Different nodes will download + # data, and the same node will only download data once. + rank_id_curr_node = int(os.environ.get("PADDLE_RANK_IN_NODE", 0)) + + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): + logger.info("Found {}".format(fullpath)) + else: + if rank_id_curr_node == 0: + fullpath = _download(url, root_dir, md5sum) + else: + while not os.path.exists(fullpath): + time.sleep(1) + + if rank_id_curr_node == 0: + if decompress and (tarfile.is_tarfile(fullpath) or + zipfile.is_zipfile(fullpath)): + fullpath = _decompress(fullpath) + + return fullpath + + +def _download(url, path, md5sum=None): + """ + Download from url, save to path. + + url (str): download url + path (str): download to given path + """ + if not osp.exists(path): + os.makedirs(path) + + fname = osp.split(url)[-1] + fullname = osp.join(path, fname) + retry_cnt = 0 + + while not (osp.exists(fullname) and _md5check(fullname, md5sum)): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RuntimeError("Download from {} failed. " + "Retry limit reached".format(url)) + + logger.info("Downloading {} from {}".format(fname, url)) + + try: + req = requests.get(url, stream=True) + except Exception as e: # requests.exceptions.ConnectionError + logger.info( + "Downloading {} from {} failed {} times with exception {}". + format(fname, url, retry_cnt + 1, str(e))) + time.sleep(1) + continue + + if req.status_code != 200: + raise RuntimeError("Downloading from {} failed with code " + "{}!".format(url, req.status_code)) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + with tqdm(total=(int(total_size) + 1023) // 1024) as pbar: + for chunk in req.iter_content(chunk_size=1024): + f.write(chunk) + pbar.update(1) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _md5check(fullname, md5sum=None): + if md5sum is None: + return True + + logger.info("File {} md5 checking...".format(fullname)) + md5 = hashlib.md5() + with open(fullname, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + md5.update(chunk) + calc_md5sum = md5.hexdigest() + + if calc_md5sum != md5sum: + logger.info("File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum)) + return False + return True + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + + if tarfile.is_tarfile(fname): + uncompressed_path = _uncompress_file_tar(fname) + elif zipfile.is_zipfile(fname): + uncompressed_path = _uncompress_file_zip(fname) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + return uncompressed_path + + +def _uncompress_file_zip(filepath): + files = zipfile.ZipFile(filepath, 'r') + file_list = files.namelist() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _uncompress_file_tar(filepath, mode="r:*"): + files = tarfile.open(filepath, mode) + file_list = files.getnames() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + files.close() + + return uncompressed_path + + +def _is_a_single_file(file_list): + if len(file_list) == 1 and file_list[0].find(os.sep) < -1: + return True + return False + + +def _is_a_single_dir(file_list): + new_file_list = [] + for file_path in file_list: + if '/' in file_path: + file_path = file_path.replace('/', os.sep) + elif '\\' in file_path: + file_path = file_path.replace('\\', os.sep) + new_file_list.append(file_path) + + file_name = new_file_list[0].split(os.sep)[0] + for i in range(1, len(new_file_list)): + if file_name != new_file_list[i].split(os.sep)[0]: + return False + return True diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/ema.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/ema.py new file mode 100644 index 000000000..8cdb3dfab --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/ema.py @@ -0,0 +1,45 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import paddle + + +class ExponentialMovingAverage(): + """ + Exponential Moving Average + Code was heavily based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/model_ema.py + """ + + def __init__(self, model, decay=0.9999): + super().__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + + @paddle.no_grad() + def _update(self, model, update_fn): + for ema_v, model_v in zip(self.module.state_dict().values(), + model.state_dict().values()): + paddle.assign(update_fn(ema_v, model_v), ema_v) + + def update(self, model): + self._update( + model, + update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/fm_vis.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/fm_vis.py new file mode 100644 index 000000000..a5368b10e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/fm_vis.py @@ -0,0 +1,97 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import cv2 +import utils +import argparse +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../../..'))) + +import paddle +from paddle.distributed import ParallelEnv + +from resnet import ResNet50 +from ppcls.utils.save_load import load_dygraph_pretrain + + +def parse_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--image_file", required=True, type=str) + parser.add_argument("-c", "--channel_num", type=int) + parser.add_argument("-p", "--pretrained_model", type=str) + parser.add_argument("--show", type=str2bool, default=False) + parser.add_argument("--interpolation", type=int, default=1) + parser.add_argument("--save_path", type=str, default=None) + parser.add_argument("--use_gpu", type=str2bool, default=True) + + return parser.parse_args() + + +def create_operators(interpolation=1): + size = 224 + img_mean = [0.485, 0.456, 0.406] + img_std = [0.229, 0.224, 0.225] + img_scale = 1.0 / 255.0 + + resize_op = utils.ResizeImage( + resize_short=256, interpolation=interpolation) + crop_op = utils.CropImage(size=(size, size)) + normalize_op = utils.NormalizeImage( + scale=img_scale, mean=img_mean, std=img_std) + totensor_op = utils.ToTensor() + + return [resize_op, crop_op, normalize_op, totensor_op] + + +def preprocess(data, ops): + for op in ops: + data = op(data) + return data + + +def main(): + args = parse_args() + operators = create_operators(args.interpolation) + # assign the place + place = 'gpu:{}'.format(ParallelEnv().dev_id) if args.use_gpu else 'cpu' + place = paddle.set_device(place) + + net = ResNet50() + load_dygraph_pretrain(net, args.pretrained_model) + + img = cv2.imread(args.image_file, cv2.IMREAD_COLOR) + data = preprocess(img, operators) + data = np.expand_dims(data, axis=0) + data = paddle.to_tensor(data) + net.eval() + _, fm = net(data) + assert args.channel_num >= 0 and args.channel_num <= fm.shape[ + 1], "the channel is out of the range, should be in {} but got {}".format( + [0, fm.shape[1]], args.channel_num) + + fm = (np.squeeze(fm[0][args.channel_num].numpy()) * 255).astype(np.uint8) + fm = cv2.resize(fm, (img.shape[1], img.shape[0])) + if args.save_path is not None: + print("the feature map is saved in path: {}".format(args.save_path)) + cv2.imwrite(args.save_path, fm) + + +if __name__ == "__main__": + main() diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/resnet.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/resnet.py new file mode 100644 index 000000000..0bea3401a --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/resnet.py @@ -0,0 +1,535 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D +from paddle.nn.initializer import Uniform +import math + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain + +MODEL_URLS = { + "ResNet18": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams", + "ResNet18_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams", + "ResNet34": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams", + "ResNet34_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams", + "ResNet50": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams", + "ResNet50_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams", + "ResNet101": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams", + "ResNet101_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams", + "ResNet152": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams", + "ResNet152_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams", + "ResNet200_vd": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams", +} + +__all__ = MODEL_URLS.keys() +''' +ResNet config: dict. + key: depth of ResNet. + values: config's dict of specific model. + keys: + block_type: Two different blocks in ResNet, BasicBlock and BottleneckBlock are optional. + block_depth: The number of blocks in different stages in ResNet. + num_channels: The number of channels to enter the next stage. +''' +NET_CONFIG = { + "18": { + "block_type": "BasicBlock", + "block_depth": [2, 2, 2, 2], + "num_channels": [64, 64, 128, 256] + }, + "34": { + "block_type": "BasicBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 64, 128, 256] + }, + "50": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 6, 3], + "num_channels": [64, 256, 512, 1024] + }, + "101": { + "block_type": "BottleneckBlock", + "block_depth": [3, 4, 23, 3], + "num_channels": [64, 256, 512, 1024] + }, + "152": { + "block_type": "BottleneckBlock", + "block_depth": [3, 8, 36, 3], + "num_channels": [64, 256, 512, 1024] + }, + "200": { + "block_type": "BottleneckBlock", + "block_depth": [3, 12, 48, 3], + "num_channels": [64, 256, 512, 1024] + }, +} + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + self.is_vd_mode = is_vd_mode + self.act = act + self.avg_pool = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=False, + data_format=data_format) + self.bn = BatchNorm( + num_filters, + param_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult), + data_layout=data_format) + self.relu = nn.ReLU() + + def forward(self, x): + if self.is_vd_mode: + x = self.avg_pool(x) + x = self.conv(x) + x = self.bn(x) + if self.act: + x = self.relu(x) + return x + + +class BottleneckBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + lr_mult=lr_mult, + data_format=data_format) + + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.relu = nn.ReLU() + self.shortcut = shortcut + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + x = self.conv2(x) + + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class BasicBlock(TheseusLayer): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + if_first=False, + lr_mult=1.0, + data_format="NCHW"): + super().__init__() + + self.stride = stride + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=stride, + act="relu", + lr_mult=lr_mult, + data_format=data_format) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + act=None, + lr_mult=lr_mult, + data_format=data_format) + if not shortcut: + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + stride=stride if if_first else 1, + is_vd_mode=False if if_first else True, + lr_mult=lr_mult, + data_format=data_format) + self.shortcut = shortcut + self.relu = nn.ReLU() + + def forward(self, x): + identity = x + x = self.conv0(x) + x = self.conv1(x) + if self.shortcut: + short = identity + else: + short = self.short(identity) + x = paddle.add(x=x, y=short) + x = self.relu(x) + return x + + +class ResNet(TheseusLayer): + """ + ResNet + Args: + config: dict. config of ResNet. + version: str="vb". Different version of ResNet, version vd can perform better. + class_num: int=1000. The number of classes. + lr_mult_list: list. Control the learning rate of different stages. + Returns: + model: nn.Layer. Specific ResNet model depends on args. + """ + + def __init__(self, + config, + version="vb", + class_num=1000, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + data_format="NCHW", + input_image_channel=3, + return_patterns=None): + super().__init__() + + self.cfg = config + self.lr_mult_list = lr_mult_list + self.is_vd_mode = version == "vd" + self.class_num = class_num + self.num_filters = [64, 128, 256, 512] + self.block_depth = self.cfg["block_depth"] + self.block_type = self.cfg["block_type"] + self.num_channels = self.cfg["num_channels"] + self.channels_mult = 1 if self.num_channels[-1] == 256 else 4 + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 5, "lr_mult_list length should be 5 but got {}".format( + len(self.lr_mult_list)) + + self.stem_cfg = { + #num_channels, num_filters, filter_size, stride + "vb": [[input_image_channel, 64, 7, 2]], + "vd": + [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + } + + self.stem = nn.Sequential(* [ + ConvBNLayer( + num_channels=in_c, + num_filters=out_c, + filter_size=k, + stride=s, + act="relu", + lr_mult=self.lr_mult_list[0], + data_format=data_format) + for in_c, out_c, k, s in self.stem_cfg[version] + ]) + + self.max_pool = MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) + block_list = [] + for block_idx in range(len(self.block_depth)): + shortcut = False + for i in range(self.block_depth[block_idx]): + block_list.append(globals()[self.block_type]( + num_channels=self.num_channels[block_idx] if i == 0 else + self.num_filters[block_idx] * self.channels_mult, + num_filters=self.num_filters[block_idx], + stride=2 if i == 0 and block_idx != 0 else 1, + shortcut=shortcut, + if_first=block_idx == i == 0 if version == "vd" else True, + lr_mult=self.lr_mult_list[block_idx + 1], + data_format=data_format)) + shortcut = True + self.blocks = nn.Sequential(*block_list) + + self.avg_pool = AdaptiveAvgPool2D(1, data_format=data_format) + self.flatten = nn.Flatten() + self.avg_pool_channels = self.num_channels[-1] * 2 + stdv = 1.0 / math.sqrt(self.avg_pool_channels * 1.0) + self.fc = Linear( + self.avg_pool_channels, + self.class_num, + weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + + self.data_format = data_format + if return_patterns is not None: + self.update_res(return_patterns) + self.register_forward_post_hook(self._return_dict_hook) + + def forward(self, x): + with paddle.static.amp.fp16_guard(): + if self.data_format == "NHWC": + x = paddle.transpose(x, [0, 2, 3, 1]) + x.stop_gradient = True + x = self.stem(x) + fm = x + x = self.max_pool(x) + x = self.blocks(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x, fm + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ResNet18(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18"], use_ssld) + return model + + +def ResNet18_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet18_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet18_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["18"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet18_vd"], use_ssld) + return model + + +def ResNet34(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34"], use_ssld) + return model + + +def ResNet34_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet34_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet34_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["34"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet34_vd"], use_ssld) + return model + + +def ResNet50(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50"], use_ssld) + return model + + +def ResNet50_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet50_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet50_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["50"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vd"], use_ssld) + return model + + +def ResNet101(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) + return model + + +def ResNet101_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet101_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet101_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["101"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet101_vd"], use_ssld) + return model + + +def ResNet152(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152 + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vb", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152"], use_ssld) + return model + + +def ResNet152_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet152_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet152_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["152"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet152_vd"], use_ssld) + return model + + +def ResNet200_vd(pretrained=False, use_ssld=False, **kwargs): + """ + ResNet200_vd + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `ResNet200_vd` model depends on args. + """ + model = ResNet(config=NET_CONFIG["200"], version="vd", **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["ResNet200_vd"], use_ssld) + return model diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/utils.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/utils.py new file mode 100644 index 000000000..7c7014932 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/feature_maps_visualization/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np + + +class DecodeImage(object): + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, img): + data = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(data, 1) + if self.to_rgb: + assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( + img.shape) + img = img[:, :, ::-1] + + return img + + +class ResizeImage(object): + def __init__(self, resize_short=None, interpolation=1): + self.resize_short = resize_short + self.interpolation = interpolation + + def __call__(self, img): + img_h, img_w = img.shape[:2] + percent = float(self.resize_short) / min(img_w, img_h) + w = int(round(img_w * percent)) + h = int(round(img_h * percent)) + return cv2.resize(img, (w, h), interpolation=self.interpolation) + + +class CropImage(object): + def __init__(self, size): + if type(size) is int: + self.size = (size, size) + else: + self.size = size + + def __call__(self, img): + w, h = self.size + img_h, img_w = img.shape[:2] + w_start = (img_w - w) // 2 + h_start = (img_h - h) // 2 + + w_end = w_start + w + h_end = h_start + h + return img[h_start:h_end, w_start:w_end, :] + + +class NormalizeImage(object): + def __init__(self, scale=None, mean=None, std=None): + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, img): + return (img.astype('float32') * self.scale - self.mean) / self.std + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, img): + img = img.transpose((2, 0, 1)) + return img diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/imagenet1k_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/imagenet1k_label_list.txt new file mode 100644 index 000000000..376e18021 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/imagenet1k_label_list.txt @@ -0,0 +1,1000 @@ +0 tench, Tinca tinca +1 goldfish, Carassius auratus +2 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +3 tiger shark, Galeocerdo cuvieri +4 hammerhead, hammerhead shark +5 electric ray, crampfish, numbfish, torpedo +6 stingray +7 cock +8 hen +9 ostrich, Struthio camelus +10 brambling, Fringilla montifringilla +11 goldfinch, Carduelis carduelis +12 house finch, linnet, Carpodacus mexicanus +13 junco, snowbird +14 indigo bunting, indigo finch, indigo bird, Passerina cyanea +15 robin, American robin, Turdus migratorius +16 bulbul +17 jay +18 magpie +19 chickadee +20 water ouzel, dipper +21 kite +22 bald eagle, American eagle, Haliaeetus leucocephalus +23 vulture +24 great grey owl, great gray owl, Strix nebulosa +25 European fire salamander, Salamandra salamandra +26 common newt, Triturus vulgaris +27 eft +28 spotted salamander, Ambystoma maculatum +29 axolotl, mud puppy, Ambystoma mexicanum +30 bullfrog, Rana catesbeiana +31 tree frog, tree-frog +32 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +33 loggerhead, loggerhead turtle, Caretta caretta +34 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +35 mud turtle +36 terrapin +37 box turtle, box tortoise +38 banded gecko +39 common iguana, iguana, Iguana iguana +40 American chameleon, anole, Anolis carolinensis +41 whiptail, whiptail lizard +42 agama +43 frilled lizard, Chlamydosaurus kingi +44 alligator lizard +45 Gila monster, Heloderma suspectum +46 green lizard, Lacerta viridis +47 African chameleon, Chamaeleo chamaeleon +48 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +49 African crocodile, Nile crocodile, Crocodylus niloticus +50 American alligator, Alligator mississipiensis +51 triceratops +52 thunder snake, worm snake, Carphophis amoenus +53 ringneck snake, ring-necked snake, ring snake +54 hognose snake, puff adder, sand viper +55 green snake, grass snake +56 king snake, kingsnake +57 garter snake, grass snake +58 water snake +59 vine snake +60 night snake, Hypsiglena torquata +61 boa constrictor, Constrictor constrictor +62 rock python, rock snake, Python sebae +63 Indian cobra, Naja naja +64 green mamba +65 sea snake +66 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +67 diamondback, diamondback rattlesnake, Crotalus adamanteus +68 sidewinder, horned rattlesnake, Crotalus cerastes +69 trilobite +70 harvestman, daddy longlegs, Phalangium opilio +71 scorpion +72 black and gold garden spider, Argiope aurantia +73 barn spider, Araneus cavaticus +74 garden spider, Aranea diademata +75 black widow, Latrodectus mactans +76 tarantula +77 wolf spider, hunting spider +78 tick +79 centipede +80 black grouse +81 ptarmigan +82 ruffed grouse, partridge, Bonasa umbellus +83 prairie chicken, prairie grouse, prairie fowl +84 peacock +85 quail +86 partridge +87 African grey, African gray, Psittacus erithacus +88 macaw +89 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +90 lorikeet +91 coucal +92 bee eater +93 hornbill +94 hummingbird +95 jacamar +96 toucan +97 drake +98 red-breasted merganser, Mergus serrator +99 goose +100 black swan, Cygnus atratus +101 tusker +102 echidna, spiny anteater, anteater +103 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +104 wallaby, brush kangaroo +105 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +106 wombat +107 jellyfish +108 sea anemone, anemone +109 brain coral +110 flatworm, platyhelminth +111 nematode, nematode worm, roundworm +112 conch +113 snail +114 slug +115 sea slug, nudibranch +116 chiton, coat-of-mail shell, sea cradle, polyplacophore +117 chambered nautilus, pearly nautilus, nautilus +118 Dungeness crab, Cancer magister +119 rock crab, Cancer irroratus +120 fiddler crab +121 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +122 American lobster, Northern lobster, Maine lobster, Homarus americanus +123 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +124 crayfish, crawfish, crawdad, crawdaddy +125 hermit crab +126 isopod +127 white stork, Ciconia ciconia +128 black stork, Ciconia nigra +129 spoonbill +130 flamingo +131 little blue heron, Egretta caerulea +132 American egret, great white heron, Egretta albus +133 bittern +134 crane +135 limpkin, Aramus pictus +136 European gallinule, Porphyrio porphyrio +137 American coot, marsh hen, mud hen, water hen, Fulica americana +138 bustard +139 ruddy turnstone, Arenaria interpres +140 red-backed sandpiper, dunlin, Erolia alpina +141 redshank, Tringa totanus +142 dowitcher +143 oystercatcher, oyster catcher +144 pelican +145 king penguin, Aptenodytes patagonica +146 albatross, mollymawk +147 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +148 killer whale, killer, orca, grampus, sea wolf, Orcinus orca +149 dugong, Dugong dugon +150 sea lion +151 Chihuahua +152 Japanese spaniel +153 Maltese dog, Maltese terrier, Maltese +154 Pekinese, Pekingese, Peke +155 Shih-Tzu +156 Blenheim spaniel +157 papillon +158 toy terrier +159 Rhodesian ridgeback +160 Afghan hound, Afghan +161 basset, basset hound +162 beagle +163 bloodhound, sleuthhound +164 bluetick +165 black-and-tan coonhound +166 Walker hound, Walker foxhound +167 English foxhound +168 redbone +169 borzoi, Russian wolfhound +170 Irish wolfhound +171 Italian greyhound +172 whippet +173 Ibizan hound, Ibizan Podenco +174 Norwegian elkhound, elkhound +175 otterhound, otter hound +176 Saluki, gazelle hound +177 Scottish deerhound, deerhound +178 Weimaraner +179 Staffordshire bullterrier, Staffordshire bull terrier +180 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +181 Bedlington terrier +182 Border terrier +183 Kerry blue terrier +184 Irish terrier +185 Norfolk terrier +186 Norwich terrier +187 Yorkshire terrier +188 wire-haired fox terrier +189 Lakeland terrier +190 Sealyham terrier, Sealyham +191 Airedale, Airedale terrier +192 cairn, cairn terrier +193 Australian terrier +194 Dandie Dinmont, Dandie Dinmont terrier +195 Boston bull, Boston terrier +196 miniature schnauzer +197 giant schnauzer +198 standard schnauzer +199 Scotch terrier, Scottish terrier, Scottie +200 Tibetan terrier, chrysanthemum dog +201 silky terrier, Sydney silky +202 soft-coated wheaten terrier +203 West Highland white terrier +204 Lhasa, Lhasa apso +205 flat-coated retriever +206 curly-coated retriever +207 golden retriever +208 Labrador retriever +209 Chesapeake Bay retriever +210 German short-haired pointer +211 vizsla, Hungarian pointer +212 English setter +213 Irish setter, red setter +214 Gordon setter +215 Brittany spaniel +216 clumber, clumber spaniel +217 English springer, English springer spaniel +218 Welsh springer spaniel +219 cocker spaniel, English cocker spaniel, cocker +220 Sussex spaniel +221 Irish water spaniel +222 kuvasz +223 schipperke +224 groenendael +225 malinois +226 briard +227 kelpie +228 komondor +229 Old English sheepdog, bobtail +230 Shetland sheepdog, Shetland sheep dog, Shetland +231 collie +232 Border collie +233 Bouvier des Flandres, Bouviers des Flandres +234 Rottweiler +235 German shepherd, German shepherd dog, German police dog, alsatian +236 Doberman, Doberman pinscher +237 miniature pinscher +238 Greater Swiss Mountain dog +239 Bernese mountain dog +240 Appenzeller +241 EntleBucher +242 boxer +243 bull mastiff +244 Tibetan mastiff +245 French bulldog +246 Great Dane +247 Saint Bernard, St Bernard +248 Eskimo dog, husky +249 malamute, malemute, Alaskan malamute +250 Siberian husky +251 dalmatian, coach dog, carriage dog +252 affenpinscher, monkey pinscher, monkey dog +253 basenji +254 pug, pug-dog +255 Leonberg +256 Newfoundland, Newfoundland dog +257 Great Pyrenees +258 Samoyed, Samoyede +259 Pomeranian +260 chow, chow chow +261 keeshond +262 Brabancon griffon +263 Pembroke, Pembroke Welsh corgi +264 Cardigan, Cardigan Welsh corgi +265 toy poodle +266 miniature poodle +267 standard poodle +268 Mexican hairless +269 timber wolf, grey wolf, gray wolf, Canis lupus +270 white wolf, Arctic wolf, Canis lupus tundrarum +271 red wolf, maned wolf, Canis rufus, Canis niger +272 coyote, prairie wolf, brush wolf, Canis latrans +273 dingo, warrigal, warragal, Canis dingo +274 dhole, Cuon alpinus +275 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +276 hyena, hyaena +277 red fox, Vulpes vulpes +278 kit fox, Vulpes macrotis +279 Arctic fox, white fox, Alopex lagopus +280 grey fox, gray fox, Urocyon cinereoargenteus +281 tabby, tabby cat +282 tiger cat +283 Persian cat +284 Siamese cat, Siamese +285 Egyptian cat +286 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +287 lynx, catamount +288 leopard, Panthera pardus +289 snow leopard, ounce, Panthera uncia +290 jaguar, panther, Panthera onca, Felis onca +291 lion, king of beasts, Panthera leo +292 tiger, Panthera tigris +293 cheetah, chetah, Acinonyx jubatus +294 brown bear, bruin, Ursus arctos +295 American black bear, black bear, Ursus americanus, Euarctos americanus +296 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +297 sloth bear, Melursus ursinus, Ursus ursinus +298 mongoose +299 meerkat, mierkat +300 tiger beetle +301 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +302 ground beetle, carabid beetle +303 long-horned beetle, longicorn, longicorn beetle +304 leaf beetle, chrysomelid +305 dung beetle +306 rhinoceros beetle +307 weevil +308 fly +309 bee +310 ant, emmet, pismire +311 grasshopper, hopper +312 cricket +313 walking stick, walkingstick, stick insect +314 cockroach, roach +315 mantis, mantid +316 cicada, cicala +317 leafhopper +318 lacewing, lacewing fly +319 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +320 damselfly +321 admiral +322 ringlet, ringlet butterfly +323 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +324 cabbage butterfly +325 sulphur butterfly, sulfur butterfly +326 lycaenid, lycaenid butterfly +327 starfish, sea star +328 sea urchin +329 sea cucumber, holothurian +330 wood rabbit, cottontail, cottontail rabbit +331 hare +332 Angora, Angora rabbit +333 hamster +334 porcupine, hedgehog +335 fox squirrel, eastern fox squirrel, Sciurus niger +336 marmot +337 beaver +338 guinea pig, Cavia cobaya +339 sorrel +340 zebra +341 hog, pig, grunter, squealer, Sus scrofa +342 wild boar, boar, Sus scrofa +343 warthog +344 hippopotamus, hippo, river horse, Hippopotamus amphibius +345 ox +346 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +347 bison +348 ram, tup +349 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +350 ibex, Capra ibex +351 hartebeest +352 impala, Aepyceros melampus +353 gazelle +354 Arabian camel, dromedary, Camelus dromedarius +355 llama +356 weasel +357 mink +358 polecat, fitch, foulmart, foumart, Mustela putorius +359 black-footed ferret, ferret, Mustela nigripes +360 otter +361 skunk, polecat, wood pussy +362 badger +363 armadillo +364 three-toed sloth, ai, Bradypus tridactylus +365 orangutan, orang, orangutang, Pongo pygmaeus +366 gorilla, Gorilla gorilla +367 chimpanzee, chimp, Pan troglodytes +368 gibbon, Hylobates lar +369 siamang, Hylobates syndactylus, Symphalangus syndactylus +370 guenon, guenon monkey +371 patas, hussar monkey, Erythrocebus patas +372 baboon +373 macaque +374 langur +375 colobus, colobus monkey +376 proboscis monkey, Nasalis larvatus +377 marmoset +378 capuchin, ringtail, Cebus capucinus +379 howler monkey, howler +380 titi, titi monkey +381 spider monkey, Ateles geoffroyi +382 squirrel monkey, Saimiri sciureus +383 Madagascar cat, ring-tailed lemur, Lemur catta +384 indri, indris, Indri indri, Indri brevicaudatus +385 Indian elephant, Elephas maximus +386 African elephant, Loxodonta africana +387 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +388 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +389 barracouta, snoek +390 eel +391 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +392 rock beauty, Holocanthus tricolor +393 anemone fish +394 sturgeon +395 gar, garfish, garpike, billfish, Lepisosteus osseus +396 lionfish +397 puffer, pufferfish, blowfish, globefish +398 abacus +399 abaya +400 academic gown, academic robe, judge's robe +401 accordion, piano accordion, squeeze box +402 acoustic guitar +403 aircraft carrier, carrier, flattop, attack aircraft carrier +404 airliner +405 airship, dirigible +406 altar +407 ambulance +408 amphibian, amphibious vehicle +409 analog clock +410 apiary, bee house +411 apron +412 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +413 assault rifle, assault gun +414 backpack, back pack, knapsack, packsack, rucksack, haversack +415 bakery, bakeshop, bakehouse +416 balance beam, beam +417 balloon +418 ballpoint, ballpoint pen, ballpen, Biro +419 Band Aid +420 banjo +421 bannister, banister, balustrade, balusters, handrail +422 barbell +423 barber chair +424 barbershop +425 barn +426 barometer +427 barrel, cask +428 barrow, garden cart, lawn cart, wheelbarrow +429 baseball +430 basketball +431 bassinet +432 bassoon +433 bathing cap, swimming cap +434 bath towel +435 bathtub, bathing tub, bath, tub +436 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +437 beacon, lighthouse, beacon light, pharos +438 beaker +439 bearskin, busby, shako +440 beer bottle +441 beer glass +442 bell cote, bell cot +443 bib +444 bicycle-built-for-two, tandem bicycle, tandem +445 bikini, two-piece +446 binder, ring-binder +447 binoculars, field glasses, opera glasses +448 birdhouse +449 boathouse +450 bobsled, bobsleigh, bob +451 bolo tie, bolo, bola tie, bola +452 bonnet, poke bonnet +453 bookcase +454 bookshop, bookstore, bookstall +455 bottlecap +456 bow +457 bow tie, bow-tie, bowtie +458 brass, memorial tablet, plaque +459 brassiere, bra, bandeau +460 breakwater, groin, groyne, mole, bulwark, seawall, jetty +461 breastplate, aegis, egis +462 broom +463 bucket, pail +464 buckle +465 bulletproof vest +466 bullet train, bullet +467 butcher shop, meat market +468 cab, hack, taxi, taxicab +469 caldron, cauldron +470 candle, taper, wax light +471 cannon +472 canoe +473 can opener, tin opener +474 cardigan +475 car mirror +476 carousel, carrousel, merry-go-round, roundabout, whirligig +477 carpenter's kit, tool kit +478 carton +479 car wheel +480 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +481 cassette +482 cassette player +483 castle +484 catamaran +485 CD player +486 cello, violoncello +487 cellular telephone, cellular phone, cellphone, cell, mobile phone +488 chain +489 chainlink fence +490 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +491 chain saw, chainsaw +492 chest +493 chiffonier, commode +494 chime, bell, gong +495 china cabinet, china closet +496 Christmas stocking +497 church, church building +498 cinema, movie theater, movie theatre, movie house, picture palace +499 cleaver, meat cleaver, chopper +500 cliff dwelling +501 cloak +502 clog, geta, patten, sabot +503 cocktail shaker +504 coffee mug +505 coffeepot +506 coil, spiral, volute, whorl, helix +507 combination lock +508 computer keyboard, keypad +509 confectionery, confectionary, candy store +510 container ship, containership, container vessel +511 convertible +512 corkscrew, bottle screw +513 cornet, horn, trumpet, trump +514 cowboy boot +515 cowboy hat, ten-gallon hat +516 cradle +517 crane +518 crash helmet +519 crate +520 crib, cot +521 Crock Pot +522 croquet ball +523 crutch +524 cuirass +525 dam, dike, dyke +526 desk +527 desktop computer +528 dial telephone, dial phone +529 diaper, nappy, napkin +530 digital clock +531 digital watch +532 dining table, board +533 dishrag, dishcloth +534 dishwasher, dish washer, dishwashing machine +535 disk brake, disc brake +536 dock, dockage, docking facility +537 dogsled, dog sled, dog sleigh +538 dome +539 doormat, welcome mat +540 drilling platform, offshore rig +541 drum, membranophone, tympan +542 drumstick +543 dumbbell +544 Dutch oven +545 electric fan, blower +546 electric guitar +547 electric locomotive +548 entertainment center +549 envelope +550 espresso maker +551 face powder +552 feather boa, boa +553 file, file cabinet, filing cabinet +554 fireboat +555 fire engine, fire truck +556 fire screen, fireguard +557 flagpole, flagstaff +558 flute, transverse flute +559 folding chair +560 football helmet +561 forklift +562 fountain +563 fountain pen +564 four-poster +565 freight car +566 French horn, horn +567 frying pan, frypan, skillet +568 fur coat +569 garbage truck, dustcart +570 gasmask, respirator, gas helmet +571 gas pump, gasoline pump, petrol pump, island dispenser +572 goblet +573 go-kart +574 golf ball +575 golfcart, golf cart +576 gondola +577 gong, tam-tam +578 gown +579 grand piano, grand +580 greenhouse, nursery, glasshouse +581 grille, radiator grille +582 grocery store, grocery, food market, market +583 guillotine +584 hair slide +585 hair spray +586 half track +587 hammer +588 hamper +589 hand blower, blow dryer, blow drier, hair dryer, hair drier +590 hand-held computer, hand-held microcomputer +591 handkerchief, hankie, hanky, hankey +592 hard disc, hard disk, fixed disk +593 harmonica, mouth organ, harp, mouth harp +594 harp +595 harvester, reaper +596 hatchet +597 holster +598 home theater, home theatre +599 honeycomb +600 hook, claw +601 hoopskirt, crinoline +602 horizontal bar, high bar +603 horse cart, horse-cart +604 hourglass +605 iPod +606 iron, smoothing iron +607 jack-o'-lantern +608 jean, blue jean, denim +609 jeep, landrover +610 jersey, T-shirt, tee shirt +611 jigsaw puzzle +612 jinrikisha, ricksha, rickshaw +613 joystick +614 kimono +615 knee pad +616 knot +617 lab coat, laboratory coat +618 ladle +619 lampshade, lamp shade +620 laptop, laptop computer +621 lawn mower, mower +622 lens cap, lens cover +623 letter opener, paper knife, paperknife +624 library +625 lifeboat +626 lighter, light, igniter, ignitor +627 limousine, limo +628 liner, ocean liner +629 lipstick, lip rouge +630 Loafer +631 lotion +632 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +633 loupe, jeweler's loupe +634 lumbermill, sawmill +635 magnetic compass +636 mailbag, postbag +637 mailbox, letter box +638 maillot +639 maillot, tank suit +640 manhole cover +641 maraca +642 marimba, xylophone +643 mask +644 matchstick +645 maypole +646 maze, labyrinth +647 measuring cup +648 medicine chest, medicine cabinet +649 megalith, megalithic structure +650 microphone, mike +651 microwave, microwave oven +652 military uniform +653 milk can +654 minibus +655 miniskirt, mini +656 minivan +657 missile +658 mitten +659 mixing bowl +660 mobile home, manufactured home +661 Model T +662 modem +663 monastery +664 monitor +665 moped +666 mortar +667 mortarboard +668 mosque +669 mosquito net +670 motor scooter, scooter +671 mountain bike, all-terrain bike, off-roader +672 mountain tent +673 mouse, computer mouse +674 mousetrap +675 moving van +676 muzzle +677 nail +678 neck brace +679 necklace +680 nipple +681 notebook, notebook computer +682 obelisk +683 oboe, hautboy, hautbois +684 ocarina, sweet potato +685 odometer, hodometer, mileometer, milometer +686 oil filter +687 organ, pipe organ +688 oscilloscope, scope, cathode-ray oscilloscope, CRO +689 overskirt +690 oxcart +691 oxygen mask +692 packet +693 paddle, boat paddle +694 paddlewheel, paddle wheel +695 padlock +696 paintbrush +697 pajama, pyjama, pj's, jammies +698 palace +699 panpipe, pandean pipe, syrinx +700 paper towel +701 parachute, chute +702 parallel bars, bars +703 park bench +704 parking meter +705 passenger car, coach, carriage +706 patio, terrace +707 pay-phone, pay-station +708 pedestal, plinth, footstall +709 pencil box, pencil case +710 pencil sharpener +711 perfume, essence +712 Petri dish +713 photocopier +714 pick, plectrum, plectron +715 pickelhaube +716 picket fence, paling +717 pickup, pickup truck +718 pier +719 piggy bank, penny bank +720 pill bottle +721 pillow +722 ping-pong ball +723 pinwheel +724 pirate, pirate ship +725 pitcher, ewer +726 plane, carpenter's plane, woodworking plane +727 planetarium +728 plastic bag +729 plate rack +730 plow, plough +731 plunger, plumber's helper +732 Polaroid camera, Polaroid Land camera +733 pole +734 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +735 poncho +736 pool table, billiard table, snooker table +737 pop bottle, soda bottle +738 pot, flowerpot +739 potter's wheel +740 power drill +741 prayer rug, prayer mat +742 printer +743 prison, prison house +744 projectile, missile +745 projector +746 puck, hockey puck +747 punching bag, punch bag, punching ball, punchball +748 purse +749 quill, quill pen +750 quilt, comforter, comfort, puff +751 racer, race car, racing car +752 racket, racquet +753 radiator +754 radio, wireless +755 radio telescope, radio reflector +756 rain barrel +757 recreational vehicle, RV, R.V. +758 reel +759 reflex camera +760 refrigerator, icebox +761 remote control, remote +762 restaurant, eating house, eating place, eatery +763 revolver, six-gun, six-shooter +764 rifle +765 rocking chair, rocker +766 rotisserie +767 rubber eraser, rubber, pencil eraser +768 rugby ball +769 rule, ruler +770 running shoe +771 safe +772 safety pin +773 saltshaker, salt shaker +774 sandal +775 sarong +776 sax, saxophone +777 scabbard +778 scale, weighing machine +779 school bus +780 schooner +781 scoreboard +782 screen, CRT screen +783 screw +784 screwdriver +785 seat belt, seatbelt +786 sewing machine +787 shield, buckler +788 shoe shop, shoe-shop, shoe store +789 shoji +790 shopping basket +791 shopping cart +792 shovel +793 shower cap +794 shower curtain +795 ski +796 ski mask +797 sleeping bag +798 slide rule, slipstick +799 sliding door +800 slot, one-armed bandit +801 snorkel +802 snowmobile +803 snowplow, snowplough +804 soap dispenser +805 soccer ball +806 sock +807 solar dish, solar collector, solar furnace +808 sombrero +809 soup bowl +810 space bar +811 space heater +812 space shuttle +813 spatula +814 speedboat +815 spider web, spider's web +816 spindle +817 sports car, sport car +818 spotlight, spot +819 stage +820 steam locomotive +821 steel arch bridge +822 steel drum +823 stethoscope +824 stole +825 stone wall +826 stopwatch, stop watch +827 stove +828 strainer +829 streetcar, tram, tramcar, trolley, trolley car +830 stretcher +831 studio couch, day bed +832 stupa, tope +833 submarine, pigboat, sub, U-boat +834 suit, suit of clothes +835 sundial +836 sunglass +837 sunglasses, dark glasses, shades +838 sunscreen, sunblock, sun blocker +839 suspension bridge +840 swab, swob, mop +841 sweatshirt +842 swimming trunks, bathing trunks +843 swing +844 switch, electric switch, electrical switch +845 syringe +846 table lamp +847 tank, army tank, armored combat vehicle, armoured combat vehicle +848 tape player +849 teapot +850 teddy, teddy bear +851 television, television system +852 tennis ball +853 thatch, thatched roof +854 theater curtain, theatre curtain +855 thimble +856 thresher, thrasher, threshing machine +857 throne +858 tile roof +859 toaster +860 tobacco shop, tobacconist shop, tobacconist +861 toilet seat +862 torch +863 totem pole +864 tow truck, tow car, wrecker +865 toyshop +866 tractor +867 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +868 tray +869 trench coat +870 tricycle, trike, velocipede +871 trimaran +872 tripod +873 triumphal arch +874 trolleybus, trolley coach, trackless trolley +875 trombone +876 tub, vat +877 turnstile +878 typewriter keyboard +879 umbrella +880 unicycle, monocycle +881 upright, upright piano +882 vacuum, vacuum cleaner +883 vase +884 vault +885 velvet +886 vending machine +887 vestment +888 viaduct +889 violin, fiddle +890 volleyball +891 waffle iron +892 wall clock +893 wallet, billfold, notecase, pocketbook +894 wardrobe, closet, press +895 warplane, military plane +896 washbasin, handbasin, washbowl, lavabo, wash-hand basin +897 washer, automatic washer, washing machine +898 water bottle +899 water jug +900 water tower +901 whiskey jug +902 whistle +903 wig +904 window screen +905 window shade +906 Windsor tie +907 wine bottle +908 wing +909 wok +910 wooden spoon +911 wool, woolen, woollen +912 worm fence, snake fence, snake-rail fence, Virginia fence +913 wreck +914 yawl +915 yurt +916 web site, website, internet site, site +917 comic book +918 crossword puzzle, crossword +919 street sign +920 traffic light, traffic signal, stoplight +921 book jacket, dust cover, dust jacket, dust wrapper +922 menu +923 plate +924 guacamole +925 consomme +926 hot pot, hotpot +927 trifle +928 ice cream, icecream +929 ice lolly, lolly, lollipop, popsicle +930 French loaf +931 bagel, beigel +932 pretzel +933 cheeseburger +934 hotdog, hot dog, red hot +935 mashed potato +936 head cabbage +937 broccoli +938 cauliflower +939 zucchini, courgette +940 spaghetti squash +941 acorn squash +942 butternut squash +943 cucumber, cuke +944 artichoke, globe artichoke +945 bell pepper +946 cardoon +947 mushroom +948 Granny Smith +949 strawberry +950 orange +951 lemon +952 fig +953 pineapple, ananas +954 banana +955 jackfruit, jak, jack +956 custard apple +957 pomegranate +958 hay +959 carbonara +960 chocolate sauce, chocolate syrup +961 dough +962 meat loaf, meatloaf +963 pizza, pizza pie +964 potpie +965 burrito +966 red wine +967 espresso +968 cup +969 eggnog +970 alp +971 bubble +972 cliff, drop, drop-off +973 coral reef +974 geyser +975 lakeside, lakeshore +976 promontory, headland, head, foreland +977 sandbar, sand bar +978 seashore, coast, seacoast, sea-coast +979 valley, vale +980 volcano +981 ballplayer, baseball player +982 groom, bridegroom +983 scuba diver +984 rapeseed +985 daisy +986 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +987 corn +988 acorn +989 hip, rose hip, rosehip +990 buckeye, horse chestnut, conker +991 coral fungus +992 agaric +993 gyromitra +994 stinkhorn, carrion fungus +995 earthstar +996 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +997 bolete +998 ear, spike, capitulum +999 toilet tissue, toilet paper, bathroom tissue diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/initializer.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/initializer.py new file mode 100644 index 000000000..b044e8088 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/initializer.py @@ -0,0 +1,318 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is based on https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py +Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file. +""" + +import math +import numpy as np + +import paddle +import paddle.nn as nn + +__all__ = [ + 'uniform_', + 'normal_', + 'constant_', + 'ones_', + 'zeros_', + 'xavier_uniform_', + 'xavier_normal_', + 'kaiming_uniform_', + 'kaiming_normal_', + 'linear_init_', + 'conv_init_', + 'reset_initialized_parameter', +] + + +def _no_grad_uniform_(tensor, a, b): + with paddle.no_grad(): + tensor.set_value( + paddle.uniform( + shape=tensor.shape, dtype=tensor.dtype, min=a, max=b)) + return tensor + + +def _no_grad_normal_(tensor, mean=0., std=1.): + with paddle.no_grad(): + tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape)) + return tensor + + +def _no_grad_fill_(tensor, value=0.): + with paddle.no_grad(): + tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype)) + return tensor + + +def uniform_(tensor, a, b): + """ + Modified tensor inspace using uniform_ + Args: + tensor (paddle.Tensor): paddle Tensor + a (float|int): min value. + b (float|int): max value. + Return: + tensor + """ + return _no_grad_uniform_(tensor, a, b) + + +def normal_(tensor, mean=0., std=1.): + """ + Modified tensor inspace using normal_ + Args: + tensor (paddle.Tensor): paddle Tensor + mean (float|int): mean value. + std (float|int): std value. + Return: + tensor + """ + return _no_grad_normal_(tensor, mean, std) + + +def constant_(tensor, value=0.): + """ + Modified tensor inspace using constant_ + Args: + tensor (paddle.Tensor): paddle Tensor + value (float|int): value to fill tensor. + Return: + tensor + """ + return _no_grad_fill_(tensor, value) + + +def ones_(tensor): + """ + Modified tensor inspace using ones_ + Args: + tensor (paddle.Tensor): paddle Tensor + Return: + tensor + """ + return _no_grad_fill_(tensor, 1) + + +def zeros_(tensor): + """ + Modified tensor inspace using zeros_ + Args: + tensor (paddle.Tensor): paddle Tensor + Return: + tensor + """ + return _no_grad_fill_(tensor, 0) + + +def _calculate_fan_in_and_fan_out(tensor, reverse=False): + """ + Calculate (fan_in, _fan_out) for tensor + + Args: + tensor (Tensor): paddle.Tensor + reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] is True + + Return: + Tuple[fan_in, fan_out] + """ + if tensor.ndim < 2: + raise ValueError( + "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" + ) + + if reverse: + num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] + else: + num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] + + receptive_field_size = 1 + if tensor.ndim > 2: + receptive_field_size = np.prod(tensor.shape[2:]) + + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def xavier_uniform_(tensor, gain=1., reverse=False): + """ + Modified tensor inspace using xavier_uniform_ + Args: + tensor (paddle.Tensor): paddle Tensor + gain (float): super parameter, 1. default. + reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. + Return: + tensor + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def xavier_normal_(tensor, gain=1., reverse=False): + """ + Modified tensor inspace using xavier_normal_ + Args: + tensor (paddle.Tensor): paddle Tensor + gain (float): super parameter, 1. default. + reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. + Return: + tensor + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + return _no_grad_normal_(tensor, 0, std) + + +# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html +def _calculate_correct_fan(tensor, mode, reverse=False): + mode = mode.lower() + valid_modes = ['fan_in', 'fan_out'] + if mode not in valid_modes: + raise ValueError("Mode {} not supported, please use one of {}".format( + mode, valid_modes)) + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) + + return fan_in if mode == 'fan_in' else fan_out + + +def _calculate_gain(nonlinearity, param=None): + linear_fns = [ + 'linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', + 'conv_transpose2d', 'conv_transpose3d' + ] + if nonlinearity in linear_fns or nonlinearity == 'sigmoid': + return 1 + elif nonlinearity == 'tanh': + return 5.0 / 3 + elif nonlinearity == 'relu': + return math.sqrt(2.0) + elif nonlinearity == 'leaky_relu': + if param is None: + negative_slope = 0.01 + elif not isinstance(param, bool) and isinstance( + param, int) or isinstance(param, float): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError("negative_slope {} not a valid number".format( + param)) + return math.sqrt(2.0 / (1 + negative_slope**2)) + elif nonlinearity == 'selu': + return 3.0 / 4 + else: + raise ValueError("Unsupported nonlinearity {}".format(nonlinearity)) + + +def kaiming_uniform_(tensor, + a=0, + mode='fan_in', + nonlinearity='leaky_relu', + reverse=False): + """ + Modified tensor inspace using kaiming_uniform method + Args: + tensor (paddle.Tensor): paddle Tensor + mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut + nonlinearity (str): nonlinearity method name + reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. + Return: + tensor + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def kaiming_normal_(tensor, + a=0, + mode='fan_in', + nonlinearity='leaky_relu', + reverse=False): + """ + Modified tensor inspace using kaiming_normal_ + Args: + tensor (paddle.Tensor): paddle Tensor + mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut + nonlinearity (str): nonlinearity method name + reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. + Return: + tensor + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + return _no_grad_normal_(tensor, 0, std) + + +def linear_init_(module): + bound = 1 / math.sqrt(module.weight.shape[0]) + uniform_(module.weight, -bound, bound) + uniform_(module.bias, -bound, bound) + + +def conv_init_(module): + bound = 1 / np.sqrt(np.prod(module.weight.shape[1:])) + uniform_(module.weight, -bound, bound) + if module.bias is not None: + uniform_(module.bias, -bound, bound) + + +def bias_init_with_prob(prior_prob=0.01): + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init + + +@paddle.no_grad() +def reset_initialized_parameter(model, include_self=True): + """ + Reset initialized parameter using following method for [conv, linear, embedding, bn] + + Args: + model (paddle.Layer): paddle Layer + include_self (bool: False): include_self for Layer.named_sublayers method. Indicate whether including itself + Return: + None + """ + for _, m in model.named_sublayers(include_self=include_self): + if isinstance(m, nn.Conv2D): + k = float(m._groups) / (m._in_channels * m._kernel_size[0] * + m._kernel_size[1]) + k = math.sqrt(k) + _no_grad_uniform_(m.weight, -k, k) + if hasattr(m, 'bias') and getattr(m, 'bias') is not None: + _no_grad_uniform_(m.bias, -k, k) + + elif isinstance(m, nn.Linear): + k = math.sqrt(1. / m.weight.shape[0]) + _no_grad_uniform_(m.weight, -k, k) + if hasattr(m, 'bias') and getattr(m, 'bias') is not None: + _no_grad_uniform_(m.bias, -k, k) + + elif isinstance(m, nn.Embedding): + _no_grad_normal_(m.weight, mean=0., std=1.) + + elif isinstance(m, (nn.BatchNorm2D, nn.LayerNorm)): + _no_grad_fill_(m.weight, 1.) + if hasattr(m, 'bias') and getattr(m, 'bias') is not None: + _no_grad_fill_(m.bias, 0) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/logger.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/logger.py new file mode 100644 index 000000000..b8d1ebaf8 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/logger.py @@ -0,0 +1,173 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import logging +import os +import sys + +import paddle.distributed as dist + +_logger = None + + +class LoggerHook(object): + """ + logs will print multi-times when calling Fleet API. + Commonly, only need to display single log at rank0 and ignore the others. + """ + block = False + + def __init__(self, log): + self.log = log + + def __call__(self, *args, **kwargs): + if not self.block: + self.log(*args, **kwargs) + + +def init_logger(name='ppcls', + log_file=None, + log_level=logging.INFO, + log_ranks="0"): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified a FileHandler will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + log_ranks (str): The ids of gpu to log which are separated by "," when more than 1, "0" by default. + Returns: + logging.Logger: The expected logger. + """ + global _logger + + # solve mutiple init issue when using paddleclas.py and engin.engin + init_flag = False + if _logger is None: + _logger = logging.getLogger(name) + init_flag = True + + formatter = logging.Formatter( + '[%(asctime)s] %(name)s %(levelname)s: %(message)s', + datefmt="%Y/%m/%d %H:%M:%S") + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setFormatter(formatter) + stream_handler._name = 'stream_handler' + + # add stream_handler when _logger dose not contain stream_handler + for i, h in enumerate(_logger.handlers): + if h.get_name() == stream_handler.get_name(): + break + if i == len(_logger.handlers) - 1: + _logger.addHandler(stream_handler) + if init_flag: + _logger.addHandler(stream_handler) + + if log_file is not None and dist.get_rank() == 0: + log_file_folder = os.path.split(log_file)[0] + os.makedirs(log_file_folder, exist_ok=True) + file_handler = logging.FileHandler(log_file, 'a') + file_handler.setFormatter(formatter) + file_handler._name = 'file_handler' + + # add file_handler when _logger dose not contain same file_handler + for i, h in enumerate(_logger.handlers): + if h.get_name() == file_handler.get_name() and \ + h.baseFilename == file_handler.baseFilename: + break + if i == len(_logger.handlers) - 1: + _logger.addHandler(file_handler) + + if isinstance(log_ranks, str): + log_ranks = [int(i) for i in log_ranks.split(',')] + elif isinstance(log_ranks, int): + log_ranks = [log_ranks] + if dist.get_rank() in log_ranks: + _logger.setLevel(log_level) + LoggerHook.block = False + else: + _logger.setLevel(logging.ERROR) + LoggerHook.block = True + _logger.propagate = False + + +@LoggerHook +def info(fmt, *args): + _logger.info(fmt, *args) + + +@LoggerHook +def debug(fmt, *args): + _logger.debug(fmt, *args) + + +@LoggerHook +def warning(fmt, *args): + _logger.warning(fmt, *args) + + +@LoggerHook +def error(fmt, *args): + _logger.error(fmt, *args) + + +def scaler(name, value, step, writer): + """ + This function will draw a scalar curve generated by the visualdl. + Usage: Install visualdl: pip3 install visualdl==2.0.0b4 + and then: + visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 + to preview loss corve in real time. + """ + if writer is None: + return + writer.add_scalar(tag=name, step=step, value=value) + + +def advertise(): + """ + Show the advertising message like the following: + + =========================================================== + == PaddleClas is powered by PaddlePaddle ! == + =========================================================== + == == + == For more info please go to the following website. == + == == + == https://github.com/PaddlePaddle/PaddleClas == + =========================================================== + + """ + copyright = "PaddleClas is powered by PaddlePaddle !" + ad = "For more info please go to the following website." + website = "https://github.com/PaddlePaddle/PaddleClas" + AD_LEN = 6 + len(max([copyright, ad, website], key=len)) + + info("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( + "=" * (AD_LEN + 4), + "=={}==".format(copyright.center(AD_LEN)), + "=" * (AD_LEN + 4), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(ad.center(AD_LEN)), + "=={}==".format(' ' * AD_LEN), + "=={}==".format(website.center(AD_LEN)), + "=" * (AD_LEN + 4), )) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/metrics.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/metrics.py new file mode 100644 index 000000000..b0db68a75 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/metrics.py @@ -0,0 +1,107 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from sklearn.metrics import hamming_loss +from sklearn.metrics import accuracy_score as accuracy_metric +from sklearn.metrics import multilabel_confusion_matrix +from sklearn.metrics import precision_recall_fscore_support +from sklearn.metrics import average_precision_score +from sklearn.preprocessing import binarize + +import numpy as np + +__all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"] + + +def multi_hot_encode(logits, threshold=0.5): + """ + Encode logits to multi-hot by elementwise for multilabel + """ + + return binarize(logits, threshold=threshold) + + +def hamming_distance(output, target): + """ + Soft metric based label for multilabel classification + Returns: + The smaller the return value is, the better model is. + """ + + return hamming_loss(target, output) + + +def accuracy_score(output, target, base="sample"): + """ + Hard metric for multilabel classification + Args: + output: + target: + base: ["sample", "label"], default="sample" + if "sample", return metric score based sample, + if "label", return metric score based label. + Returns: + accuracy: + """ + + assert base in ["sample", "label"], 'must be one of ["sample", "label"]' + + if base == "sample": + accuracy = accuracy_metric(target, output) + elif base == "label": + mcm = multilabel_confusion_matrix(target, output) + tns = mcm[:, 0, 0] + fns = mcm[:, 1, 0] + tps = mcm[:, 1, 1] + fps = mcm[:, 0, 1] + + accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps)) + + return accuracy + + +def precision_recall_fscore(output, target): + """ + Metric based label for multilabel classification + Returns: + precisions: + recalls: + fscores: + """ + + precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output) + + return precisions, recalls, fscores + + +def mean_average_precision(logits, target): + """ + Calculate average precision + Args: + logits: probability from network before sigmoid or softmax + target: ground truth, 0 or 1 + """ + if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError("logits and target should be np.ndarray.") + + aps = [] + for i in range(target.shape[1]): + ap = average_precision_score(target[:, i], logits[:, i]) + aps.append(ap) + + return np.mean(aps) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/misc.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/misc.py new file mode 100644 index 000000000..b63da7c5f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/misc.py @@ -0,0 +1,155 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +__all__ = ['AverageMeter'] + + +class AverageMeter(object): + """ + Computes and stores the average and current value + Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + + def __init__(self, name='', fmt='f', postfix="", need_avg=True): + self.name = name + self.fmt = fmt + self.postfix = postfix + self.need_avg = need_avg + self.reset() + + def reset(self): + """ reset """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ update """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + @property + def avg_info(self): + if isinstance(self.avg, paddle.Tensor): + self.avg = float(self.avg) + return "{}: {:.5f}".format(self.name, self.avg) + + @property + def total(self): + return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format( + self=self) + + @property + def total_minute(self): + return '{self.name} {s:{self.fmt}}{self.postfix} min'.format( + s=self.sum / 60, self=self) + + @property + def mean(self): + return '{self.name}: {self.avg:{self.fmt}}{self.postfix}'.format( + self=self) if self.need_avg else '' + + @property + def value(self): + return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format( + self=self) + + +class AttrMeter(object): + """ + Computes and stores the average and current value + Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + + def __init__(self, threshold=0.5): + self.threshold = threshold + self.reset() + + def reset(self): + self.gt_pos = 0 + self.gt_neg = 0 + self.true_pos = 0 + self.true_neg = 0 + self.false_pos = 0 + self.false_neg = 0 + + self.gt_pos_ins = [] + self.true_pos_ins = [] + self.intersect_pos = [] + self.union_pos = [] + + def update(self, metric_dict): + self.gt_pos += metric_dict['gt_pos'] + self.gt_neg += metric_dict['gt_neg'] + self.true_pos += metric_dict['true_pos'] + self.true_neg += metric_dict['true_neg'] + self.false_pos += metric_dict['false_pos'] + self.false_neg += metric_dict['false_neg'] + + self.gt_pos_ins += metric_dict['gt_pos_ins'].tolist() + self.true_pos_ins += metric_dict['true_pos_ins'].tolist() + self.intersect_pos += metric_dict['intersect_pos'].tolist() + self.union_pos += metric_dict['union_pos'].tolist() + + def res(self): + import numpy as np + eps = 1e-20 + label_pos_recall = 1.0 * self.true_pos / ( + self.gt_pos + eps) # true positive + label_neg_recall = 1.0 * self.true_neg / ( + self.gt_neg + eps) # true negative + # mean accuracy + label_ma = (label_pos_recall + label_neg_recall) / 2 + + label_pos_recall = np.mean(label_pos_recall) + label_neg_recall = np.mean(label_neg_recall) + label_prec = (self.true_pos / (self.true_pos + self.false_pos + eps)) + label_acc = (self.true_pos / + (self.true_pos + self.false_pos + self.false_neg + eps)) + label_f1 = np.mean(2 * label_prec * label_pos_recall / + (label_prec + label_pos_recall + eps)) + + ma = (np.mean(label_ma)) + + self.gt_pos_ins = np.array(self.gt_pos_ins) + self.true_pos_ins = np.array(self.true_pos_ins) + self.intersect_pos = np.array(self.intersect_pos) + self.union_pos = np.array(self.union_pos) + instance_acc = self.intersect_pos / (self.union_pos + eps) + instance_prec = self.intersect_pos / (self.true_pos_ins + eps) + instance_recall = self.intersect_pos / (self.gt_pos_ins + eps) + instance_f1 = 2 * instance_prec * instance_recall / ( + instance_prec + instance_recall + eps) + + instance_acc = np.mean(instance_acc) + instance_prec = np.mean(instance_prec) + instance_recall = np.mean(instance_recall) + instance_f1 = 2 * instance_prec * instance_recall / ( + instance_prec + instance_recall + eps) + + instance_acc = np.mean(instance_acc) + instance_prec = np.mean(instance_prec) + instance_recall = np.mean(instance_recall) + instance_f1 = np.mean(instance_f1) + + res = [ + ma, label_f1, label_pos_recall, label_neg_recall, instance_f1, + instance_acc, instance_prec, instance_recall + ] + return res diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/model_zoo.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/model_zoo.py new file mode 100644 index 000000000..e9ab5992d --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import requests +import shutil +import tarfile +import tqdm +import zipfile + +from ..arch.utils import similar_architectures +from . import logger + +__all__ = ['get'] + +DOWNLOAD_RETRY_LIMIT = 3 + + +class UrlError(Exception): + """ UrlError + """ + + def __init__(self, url='', code=''): + message = "Downloading from {} failed with code {}!".format(url, code) + super(UrlError, self).__init__(message) + + +class ModelNameError(Exception): + """ ModelNameError + """ + + def __init__(self, message=''): + super(ModelNameError, self).__init__(message) + + +class RetryError(Exception): + """ RetryError + """ + + def __init__(self, url='', times=''): + message = "Download from {} failed. Retry({}) limit reached".format( + url, times) + super(RetryError, self).__init__(message) + + +def _get_url(architecture, postfix="pdparams"): + prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/" + fname = architecture + "_pretrained." + postfix + return prefix + fname + + +def _move_and_merge_tree(src, dst): + """ + Move src directory to dst, if dst is already exists, + merge src to dst + """ + if not os.path.exists(dst): + shutil.move(src, dst) + elif os.path.isfile(src): + shutil.move(src, dst) + else: + for fp in os.listdir(src): + src_fp = os.path.join(src, fp) + dst_fp = os.path.join(dst, fp) + if os.path.isdir(src_fp): + if os.path.isdir(dst_fp): + _move_and_merge_tree(src_fp, dst_fp) + else: + shutil.move(src_fp, dst_fp) + elif os.path.isfile(src_fp) and \ + not os.path.isfile(dst_fp): + shutil.move(src_fp, dst_fp) + + +def _download(url, path): + """ + Download from url, save to path. + url (str): download url + path (str): download to given path + """ + if not os.path.exists(path): + os.makedirs(path) + + fname = os.path.split(url)[-1] + fullname = os.path.join(path, fname) + retry_cnt = 0 + + while not os.path.exists(fullname): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RetryError(url, DOWNLOAD_RETRY_LIMIT) + + logger.info("Downloading {} from {}".format(fname, url)) + + req = requests.get(url, stream=True) + if req.status_code != 200: + raise UrlError(url, req.status_code) + + # For protecting download interupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get('content-length') + with open(tmp_fullname, 'wb') as f: + if total_size: + for chunk in tqdm.tqdm( + req.iter_content(chunk_size=1024), + total=(int(total_size) + 1023) // 1024, + unit='KB'): + f.write(chunk) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + + return fullname + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.info("Decompressing {}...".format(fname)) + + # For protecting decompressing interupted, + # decompress to fpath_tmp directory firstly, if decompress + # successed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + fpath = os.path.split(fname)[0] + fpath_tmp = os.path.join(fpath, 'tmp') + if os.path.isdir(fpath_tmp): + shutil.rmtree(fpath_tmp) + os.makedirs(fpath_tmp) + + if fname.find('tar') >= 0: + with tarfile.open(fname) as tf: + tf.extractall(path=fpath_tmp) + elif fname.find('zip') >= 0: + with zipfile.ZipFile(fname) as zf: + zf.extractall(path=fpath_tmp) + else: + raise TypeError("Unsupport compress file type {}".format(fname)) + + fs = os.listdir(fpath_tmp) + assert len( + fs + ) == 1, "There should just be 1 pretrained path in an archive file but got {}.".format( + len(fs)) + + f = fs[0] + src_dir = os.path.join(fpath_tmp, f) + dst_dir = os.path.join(fpath, f) + _move_and_merge_tree(src_dir, dst_dir) + + shutil.rmtree(fpath_tmp) + os.remove(fname) + + return f + + +def _get_pretrained(): + with open('./ppcls/utils/pretrained.list') as flist: + pretrained = [line.strip() for line in flist] + return pretrained + + +def _check_pretrained_name(architecture): + assert isinstance(architecture, str), \ + ("the type of architecture({}) should be str". format(architecture)) + pretrained = _get_pretrained() + similar_names = similar_architectures(architecture, pretrained) + model_list = ', '.join(similar_names) + err = "{} is not exist! Maybe you want: [{}]" \ + "".format(architecture, model_list) + if architecture not in similar_names: + raise ModelNameError(err) + + +def list_models(): + pretrained = _get_pretrained() + msg = "All avialable pretrained models are as follows: {}".format( + pretrained) + logger.info(msg) + return + + +def get(architecture, path, decompress=False, postfix="pdparams"): + """ + Get the pretrained model. + """ + _check_pretrained_name(architecture) + url = _get_url(architecture, postfix=postfix) + fname = _download(url, path) + if postfix == "tar" and decompress: + _decompress(fname) + logger.info("download {} finished ".format(fname)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pedestrian_attribute_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pedestrian_attribute_label_list.txt new file mode 100644 index 000000000..af6e0df1c --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pedestrian_attribute_label_list.txt @@ -0,0 +1,26 @@ +0 Hat(帽子) +1 Glasses(眼镜) +2 ShortSleeve(短袖) +3 LongSleeve(长袖) +4 UpperStride(上衣条纹) +5 UpperLogo(上衣有标志) +6 UpperPlaid(上衣格子) +7 UpperSplice(上衣拼接) +8 LowerStripe(裤子条纹) +9 LowerPattern(裤子图案) +10 LongCoat(长外套) +11 Trousers(长裤) +12 Shorts(短裤) +13 Skirt&Dress(裙子或连衣裙) +14 Boots(靴子) +15 HandBag(手提包) +16 ShoulderBag(单肩包) +17 Backpack(背包) +18 HoldObjectsInFront(手持物品在前) +19 AgeLess18(年龄小于18岁) +20 Age18-60(年龄在18-60岁之间) +21 AgeOver60(年龄大于60岁) +22 Female(女性) +23 Front(面朝前) +24 Side(侧面) +25 Back(背面) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pretrained.list b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pretrained.list new file mode 100644 index 000000000..36d70f5a2 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/pretrained.list @@ -0,0 +1,121 @@ +ResNet18 +ResNet34 +ResNet50 +ResNet101 +ResNet152 +ResNet50_vc +ResNet18_vd +ResNet34_vd +ResNet50_vd +ResNet50_vd_v2 +ResNet101_vd +ResNet152_vd +ResNet200_vd +ResNet50_vd_ssld +ResNet50_vd_ssld_v2 +Fix_ResNet50_vd_ssld_v2 +ResNet101_vd_ssld +MobileNetV3_large_x0_35 +MobileNetV3_large_x0_5 +MobileNetV3_large_x0_75 +MobileNetV3_large_x1_0 +MobileNetV3_large_x1_25 +MobileNetV3_small_x0_35 +MobileNetV3_small_x0_5 +MobileNetV3_small_x0_75 +MobileNetV3_small_x1_0 +MobileNetV3_small_x1_25 +MobileNetV3_large_x1_0_ssld +MobileNetV3_large_x1_0_ssld_int8 +MobileNetV3_small_x1_0_ssld +MobileNetV2_x0_25 +MobileNetV2_x0_5 +MobileNetV2_x0_75 +MobileNetV2 +MobileNetV2_x1_5 +MobileNetV2_x2_0 +MobileNetV2_ssld +MobileNetV1_x0_25 +MobileNetV1_x0_5 +MobileNetV1_x0_75 +MobileNetV1 +MobileNetV1_ssld +ShuffleNetV2_x0_25 +ShuffleNetV2_x0_33 +ShuffleNetV2_x0_5 +ShuffleNetV2 +ShuffleNetV2_x1_5 +ShuffleNetV2_x2_0 +ShuffleNetV2_swish +ResNeXt50_32x4d +ResNeXt50_64x4d +ResNeXt101_32x4d +ResNeXt101_64x4d +ResNeXt152_32x4d +ResNeXt152_64x4d +ResNeXt50_vd_32x4d +ResNeXt50_vd_64x4d +ResNeXt101_vd_32x4d +ResNeXt101_vd_64x4d +ResNeXt152_vd_32x4d +ResNeXt152_vd_64x4d +SE_ResNet18_vd +SE_ResNet34_vd +SE_ResNet50_vd +SE_ResNeXt50_32x4d +SE_ResNeXt101_32x4d +SE_ResNeXt50_vd_32x4d +SENet154_vd +Res2Net50_26w_4s +Res2Net50_vd_26w_4s +Res2Net50_14w_8s +Res2Net101_vd_26w_4s +Res2Net200_vd_26w_4s +GoogLeNet +InceptionV4 +Xception41 +Xception41_deeplab +Xception65 +Xception65_deeplab +Xception71 +HRNet_W18_C +HRNet_W30_C +HRNet_W32_C +HRNet_W40_C +HRNet_W44_C +HRNet_W48_C +HRNet_W64_C +DPN68 +DPN92 +DPN98 +DPN107 +DPN131 +DenseNet121 +DenseNet161 +DenseNet169 +DenseNet201 +DenseNet264 +EfficientNetB0_small +EfficientNetB0 +EfficientNetB1 +EfficientNetB2 +EfficientNetB3 +EfficientNetB4 +EfficientNetB5 +EfficientNetB6 +EfficientNetB7 +ResNeXt101_32x8d_wsl +ResNeXt101_32x16d_wsl +ResNeXt101_32x32d_wsl +ResNeXt101_32x48d_wsl +Fix_ResNeXt101_32x48d_wsl +AlexNet +SqueezeNet1_0 +SqueezeNet1_1 +VGG11 +VGG13 +VGG16 +VGG19 +DarkNet53_ImageNet1k +ResNet50_ACNet_deploy +CSPResNet50_leaky diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/profiler.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/profiler.py new file mode 100644 index 000000000..28ac46736 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/profiler.py @@ -0,0 +1,129 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle +import paddle.profiler as profiler + +# A global variable to record the number of calling times for profiler +# functions. It is used to specify the tracing range of training steps. +_profiler_step_id = 0 + +# A global variable to avoid parsing from string every time. +_profiler_options = None +_prof = None + +class ProfilerOptions(object): + ''' + Use a string to initialize a ProfilerOptions. + The string should be in the format: "key1=value1;key2=value;key3=value3". + For example: + "profile_path=model.profile" + "batch_range=[50, 60]; profile_path=model.profile" + "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile" + + ProfilerOptions supports following key-value pair: + batch_range - a integer list, e.g. [100, 110]. + state - a string, the optional values are 'CPU', 'GPU' or 'All'. + sorted_key - a string, the optional values are 'calls', 'total', + 'max', 'min' or 'ave. + tracer_option - a string, the optional values are 'Default', 'OpDetail', + 'AllOpDetail'. + profile_path - a string, the path to save the serialized profile data, + which can be used to generate a timeline. + exit_on_finished - a boolean. + ''' + + def __init__(self, options_str): + assert isinstance(options_str, str) + + self._options = { + 'batch_range': [10, 20], + 'state': 'All', + 'sorted_key': 'total', + 'tracer_option': 'Default', + 'profile_path': '/tmp/profile', + 'exit_on_finished': True, + 'timer_only': True + } + self._parse_from_string(options_str) + + def _parse_from_string(self, options_str): + for kv in options_str.replace(' ', '').split(';'): + key, value = kv.split('=') + if key == 'batch_range': + value_list = value.replace('[', '').replace(']', '').split(',') + value_list = list(map(int, value_list)) + if len(value_list) >= 2 and value_list[0] >= 0 and value_list[ + 1] > value_list[0]: + self._options[key] = value_list + elif key == 'exit_on_finished': + self._options[key] = value.lower() in ("yes", "true", "t", "1") + elif key in [ + 'state', 'sorted_key', 'tracer_option', 'profile_path' + ]: + self._options[key] = value + elif key == 'timer_only': + self._options[key] = value + + def __getitem__(self, name): + if self._options.get(name, None) is None: + raise ValueError( + "ProfilerOptions does not have an option named %s." % name) + return self._options[name] + + +def add_profiler_step(options_str=None): + ''' + Enable the operator-level timing using PaddlePaddle's profiler. + The profiler uses a independent variable to count the profiler steps. + One call of this function is treated as a profiler step. + Args: + profiler_options - a string to initialize the ProfilerOptions. + Default is None, and the profiler is disabled. + ''' + if options_str is None: + return + + global _prof + global _profiler_step_id + global _profiler_options + + if _profiler_options is None: + _profiler_options = ProfilerOptions(options_str) + # profile : https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/performance_improving/profiling_model.html#chakanxingnengshujudetongjibiaodan + # timer_only = True only the model's throughput and time overhead are displayed + # timer_only = False calling summary can print a statistical form that presents performance data from different perspectives. + # timer_only = False the output Timeline information can be found in the profiler_log directory + if _prof is None: + _timer_only = str(_profiler_options['timer_only']) == str(True) + _prof = profiler.Profiler( + scheduler = (_profiler_options['batch_range'][0], _profiler_options['batch_range'][1]), + on_trace_ready = profiler.export_chrome_tracing('./profiler_log'), + timer_only = _timer_only) + _prof.start() + else: + _prof.step() + + if _profiler_step_id == _profiler_options['batch_range'][1]: + _prof.stop() + _prof.summary( + op_detail=True, + thread_sep=False, + time_unit='ms') + _prof = None + if _profiler_options['exit_on_finished']: + sys.exit(0) + + _profiler_step_id += 1 diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_load.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_load.py new file mode 100644 index 000000000..13ccab2ad --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_load.py @@ -0,0 +1,225 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import errno +import os +import json + +import paddle +from . import logger +from .download import get_weights_path_from_url + +__all__ = ['init_model', 'save_model', 'load_dygraph_pretrain'] + + +def _mkdir_if_not_exist(path): + """ + mkdir if not exists, ignore the exception when multiprocess mkdir together + """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(path): + logger.warning( + 'be happy if some process has already created {}'.format( + path)) + else: + raise OSError('Failed to mkdir {}'.format(path)) + + +def _extract_student_weights(all_params, student_prefix="Student."): + s_params = { + key[len(student_prefix):]: all_params[key] + for key in all_params if student_prefix in key + } + return s_params + + +def _set_ssld_pretrained(pretrained_path, + use_ssld=False, + use_ssld_stage1_pretrained=False): + if use_ssld and "ssld" not in pretrained_path: + pretrained_path = pretrained_path.replace("_pretrained", + "_ssld_pretrained") + if use_ssld_stage1_pretrained and "ssld" in pretrained_path: + pretrained_path = pretrained_path.replace("ssld_pretrained", + "ssld_stage1_pretrained") + return pretrained_path + + +def load_dygraph_pretrain(model, + pretrained_path, + use_ssld=False, + use_ssld_stage1_pretrained=False, + use_imagenet22k_pretrained=False, + use_imagenet22kto1k_pretrained=False): + if pretrained_path.startswith(("http://", "https://")): + pretrained_path = _set_ssld_pretrained( + pretrained_path, + use_ssld=use_ssld, + use_ssld_stage1_pretrained=use_ssld_stage1_pretrained) + if use_imagenet22k_pretrained: + pretrained_path = pretrained_path.replace("_pretrained", + "_22k_pretrained") + if use_imagenet22kto1k_pretrained: + pretrained_path = pretrained_path.replace("_pretrained", + "_22kto1k_pretrained") + pretrained_path = get_weights_path_from_url(pretrained_path) + if not pretrained_path.endswith('.pdparams'): + pretrained_path = pretrained_path + '.pdparams' + if not os.path.exists(pretrained_path): + raise ValueError("Model pretrain path {} does not " + "exists.".format(pretrained_path)) + param_state_dict = paddle.load(pretrained_path) + if isinstance(model, list): + for m in model: + if hasattr(m, 'set_dict'): + m.set_dict(param_state_dict) + else: + model.set_dict(param_state_dict) + logger.info("Finish load pretrained model from {}".format(pretrained_path)) + return + + +def load_distillation_model(model, pretrained_model): + logger.info("In distillation mode, teacher model will be " + "loaded firstly before student model.") + + if not isinstance(pretrained_model, list): + pretrained_model = [pretrained_model] + + teacher = model.teacher if hasattr(model, + "teacher") else model._layers.teacher + student = model.student if hasattr(model, + "student") else model._layers.student + load_dygraph_pretrain(teacher, path=pretrained_model[0]) + logger.info("Finish initing teacher model from {}".format(pretrained_model)) + # load student model + if len(pretrained_model) >= 2: + load_dygraph_pretrain(student, path=pretrained_model[1]) + logger.info("Finish initing student model from {}".format( + pretrained_model)) + + +def init_model(config, + net, + optimizer=None, + loss: paddle.nn.Layer=None, + ema=None): + """ + load model from checkpoint or pretrained_model + """ + checkpoints = config.get('checkpoints') + if checkpoints and optimizer is not None: + assert os.path.exists(checkpoints + ".pdparams"), \ + "Given dir {}.pdparams not exist.".format(checkpoints) + assert os.path.exists(checkpoints + ".pdopt"), \ + "Given dir {}.pdopt not exist.".format(checkpoints) + # load state dict + opti_dict = paddle.load(checkpoints + ".pdopt") + metric_dict = paddle.load(checkpoints + ".pdstates") + if ema is not None: + assert os.path.exists(checkpoints + ".pdema"), \ + "Given dir {}.pdema not exist.".format(checkpoints) + para_dict = paddle.load(checkpoints + ".pdema") + para_ema_dict = paddle.load(checkpoints + ".pdparams") + ema.set_state_dict(para_ema_dict) + else: + para_dict = paddle.load(checkpoints + ".pdparams") + metric_dict["metric"] = 0.0 + # set state dict + net.set_state_dict(para_dict) + loss.set_state_dict(para_dict) + for i in range(len(optimizer)): + optimizer[i].set_state_dict(opti_dict[i] if isinstance( + opti_dict, list) else opti_dict) + logger.info("Finish load checkpoints from {}".format(checkpoints)) + return metric_dict + + pretrained_model = config.get('pretrained_model') + use_distillation = config.get('use_distillation', False) + if pretrained_model: + if use_distillation: + load_distillation_model(net, pretrained_model) + else: # common load + load_dygraph_pretrain(net, path=pretrained_model) + logger.info("Finish load pretrained model from {}".format( + pretrained_model)) + + +def save_model(net, + optimizer, + metric_info, + model_path, + ema=None, + model_name="", + prefix='ppcls', + loss: paddle.nn.Layer=None, + save_student_model=False): + """ + save model to the target path + """ + if paddle.distributed.get_rank() != 0: + return + + if prefix == 'best_model': + best_model_path = os.path.join(model_path, 'best_model') + _mkdir_if_not_exist(best_model_path) + + _mkdir_if_not_exist(model_path) + model_path = os.path.join(model_path, prefix) + + params_state_dict = net.state_dict() + if loss is not None: + loss_state_dict = loss.state_dict() + keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys()) + assert len(keys_inter) == 0, \ + f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}" + params_state_dict.update(loss_state_dict) + + if save_student_model: + s_params = _extract_student_weights(params_state_dict) + if len(s_params) > 0: + paddle.save(s_params, model_path + "_student.pdparams") + if ema is not None: + paddle.save(params_state_dict, model_path + ".pdema") + paddle.save(ema.state_dict(), model_path + ".pdparams") + else: + paddle.save(params_state_dict, model_path + ".pdparams") + + if prefix == 'best_model': + best_model_path = os.path.join(best_model_path, 'model') + paddle.save(params_state_dict, best_model_path + ".pdparams") + paddle.save([opt.state_dict() for opt in optimizer], model_path + ".pdopt") + paddle.save(metric_info, model_path + ".pdstates") + logger.info("Already save model in {}".format(model_path)) + + +def save_model_info(model_info, save_path, prefix): + """ + save model info to the target path + """ + if paddle.distributed.get_rank() != 0: + return + save_path = os.path.join(save_path, prefix) + if not os.path.exists(save_path): + os.makedirs(save_path) + with open(os.path.join(save_path, f'{prefix}.info.json'), 'w') as f: + json.dump(model_info, f) + logger.info("Already save model info in {}".format(save_path)) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_result.py b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_result.py new file mode 100644 index 000000000..f7613db36 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/save_result.py @@ -0,0 +1,102 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +import yaml +import paddle + +from . import logger + + +def save_predict_result(save_path, result): + if os.path.splitext(save_path)[-1] == '': + if save_path[-1] == "/": + save_path = save_path[:-1] + save_path = save_path + '.json' + elif os.path.splitext(save_path)[-1] == '.json': + save_path = save_path + else: + raise Exception( + f"{save_path} is invalid input path, only files in json format are supported." + ) + + if os.path.exists(save_path): + logger.warning(f"The file {save_path} will be overwritten.") + with open(save_path, 'w', encoding='utf-8') as f: + json.dump(result, f) + + +def update_train_results(config, + prefix, + metric_info, + done_flag=False, + last_num=5, + ema=False): + + if paddle.distributed.get_rank() != 0: + return + + assert last_num >= 1 + train_results_path = os.path.join(config["Global"]["output_dir"], + "train_results.json") + save_model_tag = ["pdparams", "pdopt", "pdstates"] + save_inference_tag = [ + "inference_config", "pdmodel", "pdiparams", "pdiparams.info" + ] + if ema: + save_model_tag.append("pdema") + if os.path.exists(train_results_path): + with open(train_results_path, "r") as fp: + train_results = json.load(fp) + else: + train_results = {} + train_results["model_name"] = config["Global"].get("pdx_model_name", + None) + if config.get("infer", None): + train_results["label_dict"] = config["Infer"]["PostProcess"].get( + "class_id_map_file", "") + else: + train_results["label_dict"] = "" + train_results["train_log"] = "train.log" + train_results["visualdl_log"] = "" + train_results["config"] = "config.yaml" + train_results["models"] = {} + for i in range(1, last_num + 1): + train_results["models"][f"last_{i}"] = {} + train_results["models"]["best"] = {} + train_results["done_flag"] = done_flag + if prefix == "best_model": + train_results["models"]["best"]["score"] = metric_info["metric"] + for tag in save_model_tag: + train_results["models"]["best"][tag] = os.path.join( + prefix, f"{prefix}.{tag}") + for tag in save_inference_tag: + train_results["models"]["best"][tag] = os.path.join( + prefix, "inference", f"inference.{tag}" + if tag != "inference_config" else "inference.yml") + else: + for i in range(last_num - 1, 0, -1): + train_results["models"][f"last_{i + 1}"] = train_results["models"][ + f"last_{i}"].copy() + train_results["models"][f"last_{1}"]["score"] = metric_info["metric"] + for tag in save_model_tag: + train_results["models"][f"last_{1}"][tag] = os.path.join( + prefix, f"{prefix}.{tag}") + for tag in save_inference_tag: + train_results["models"][f"last_{1}"][tag] = os.path.join( + prefix, "inference", f"inference.{tag}" + if tag != "inference_config" else "inference.yml") + + with open(train_results_path, "w") as fp: + json.dump(train_results, fp) diff --git a/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/vehicle_attribute_label_list.txt b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/vehicle_attribute_label_list.txt new file mode 100644 index 000000000..03ad382fe --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/ppcls_2.6/utils/vehicle_attribute_label_list.txt @@ -0,0 +1,19 @@ +0 yellow(黄色) +1 orange(橙色) +2 green(绿色) +3 gray(灰色) +4 red(红色) +5 blue(蓝色) +6 white(白色) +7 golden(金色) +8 brown(棕色) +9 black(黑色) +10 sedan(轿车) +11 suv(SUV) +12 van(厢式车) +13 hatchback(掀背车) +14 mpv(多用途车) +15 pickup(皮卡) +16 bus(公共汽车) +17 truck(卡车) +18 estate(旅行车) diff --git a/cv/classification/resnet50/paddlepaddle/requirements.txt b/cv/classification/resnet50/paddlepaddle/requirements.txt new file mode 100644 index 000000000..9d896504f --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/requirements.txt @@ -0,0 +1,11 @@ +prettytable +ujson +opencv-python<=4.6.0.66 +pillow>=9.0.0 +tqdm +PyYAML>=5.1 +visualdl>=2.2.0 +scipy>=1.0.0 +scikit-learn>=0.21.0 +gast==0.3.3 +easydict \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/run_resnet50.sh b/cv/classification/resnet50/paddlepaddle/run_resnet50.sh new file mode 100644 index 000000000..d1edf8e97 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/run_resnet50.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export PYTHONPATH=./:${PYTHONPATH} + +pip3 install -r requirements.txt + +python3 train.py -c ./ppcls/configs/quick_start/ResNet50_vd.yaml +exit $? \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/run_resnet50_dist.sh b/cv/classification/resnet50/paddlepaddle/run_resnet50_dist.sh new file mode 100644 index 000000000..8b5ad479e --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/run_resnet50_dist.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export PYTHONPATH=./:${PYTHONPATH} + +pip3 install -r requirements.txt + +python3 -m paddle.distributed.launch -ips=127.0.0.1 train.py -c ./ppcls/configs/quick_start/ResNet50_vd.yaml +exit $? \ No newline at end of file diff --git a/cv/classification/resnet50/paddlepaddle/train.py b/cv/classification/resnet50/paddlepaddle/train.py new file mode 100644 index 000000000..3eab7fdd0 --- /dev/null +++ b/cv/classification/resnet50/paddlepaddle/train.py @@ -0,0 +1,40 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +from ppcls.utils import config +from ppcls.engine.engine import Engine + +if __name__ == "__main__": + args = config.parse_args() + config = config.get_config( + args.config, overrides=args.override, show=False) + config.profiler_options = args.profiler_options + if "BATCH_SIZE" in os.environ: + config["DataLoader"]["sampler"]["batch_size"] = int(os.environ["BATCH_SIZE"]) + + try: + from dltest import show_training_arguments + show_training_arguments([args, config]) + except: + pass + engine = Engine(config, mode="train") + engine.train() diff --git a/tests/executables/resnet/init_paddle.sh b/tests/executables/resnet/init_paddle.sh index 39528b7a4..fdb475bd8 100644 --- a/tests/executables/resnet/init_paddle.sh +++ b/tests/executables/resnet/init_paddle.sh @@ -13,7 +13,7 @@ if [ ! -d "${DATASET_DIR}/flowers102" ]; then tar zxf ${DATASET_DIR}/flowers102.tgz -C ${DATASET_DIR} fi -RESNET_PADDLE_DIR=${PRJ_DIR}/official/cv/classification/resnet/paddle +RESNET_PADDLE_DIR=${PRJ_DIR}/cv/classification/resnet50/paddlepaddle cd ${RESNET_PADDLE_DIR} pip3 install -r requirements.txt diff --git a/tests/executables/resnet/train_resnet50_dist_paddle.sh b/tests/executables/resnet/train_resnet50_dist_paddle.sh index e3aaf503c..b4dccfde7 100644 --- a/tests/executables/resnet/train_resnet50_dist_paddle.sh +++ b/tests/executables/resnet/train_resnet50_dist_paddle.sh @@ -8,7 +8,7 @@ if [[ -d ${OUTPUT_DIR} ]]; then mkdir -p ${OUTPUT_DIR} fi -RESNET_PADDLE_DIR=${PROJECT_DIR}/official/cv/classification/resnet/paddle +RESNET_PADDLE_DIR=${PROJECT_DIR}/cv/classification/resnet50/paddlepaddle/ cd ${RESNET_PADDLE_DIR} ixdltest-check --nonstrict_mode_args="--epoch ${NONSTRICT_EPOCH}" -b 8 --run_script \ -- Gitee From 4d292e3a7126b984321e5f96415b31333703f94e Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 14:56:03 +0800 Subject: [PATCH 15/18] sync ssd pytorch all --- .../ssd/pytorch/{ci/prepare.sh => Dockerfile} | 52 +- cv/detection/ssd/pytorch/async_evaluator.py | 95 +++ cv/detection/ssd/pytorch/base/BaseDockerfile | 58 -- .../ssd/pytorch/base/config/__init__.py | 4 - cv/detection/ssd/pytorch/base/config/_base.py | 143 ----- .../ssd/pytorch/base/config/config_manager.py | 165 ------ .../ssd/pytorch/base/config/mutable_params.py | 23 - .../ssd/pytorch/base/dataloaders/__init__.py | 1 - .../pytorch/base/dataloaders/dataloader.py | 13 - .../base/dataloaders/native_pipeline.py | 119 ---- .../ssd/pytorch/base/model/__init__.py | 22 - .../ssd/pytorch/base/model/layers/__init__.py | 3 - .../ssd/pytorch/base/model/losses/__init__.py | 2 - .../ssd/pytorch/base/model/models/__init__.py | 0 .../ssd/pytorch/base/model/models/resnet.py | 291 --------- .../ssd/pytorch/base/model/models/ssd300.py | 141 ----- .../ssd/pytorch/base/optimizers/__init__.py | 1 - .../ssd/pytorch/base/optimizers/factory.py | 11 - cv/detection/ssd/pytorch/base/prepare.py | 316 ---------- cv/detection/ssd/pytorch/base/run_train.py | 123 ---- cv/detection/ssd/pytorch/base/run_training.sh | 43 -- .../ssd/pytorch/base/run_with_docker.sh | 224 ------- cv/detection/ssd/pytorch/base/setup.py | 102 ---- .../pytorch/base/test/dali_dataloader_test.py | 48 -- .../ssd/pytorch/base/test/dataloader_test.py | 47 -- .../ssd/pytorch/base/train/__init__.py | 0 .../ssd/pytorch/base/train/evaluator.py | 116 ---- .../ssd/pytorch/base/train/event/__init__.py | 4 - .../ssd/pytorch/base/train/event/base.py | 64 -- .../pytorch/base/train/event/base_adapter.py | 30 - .../ssd/pytorch/base/train/event/compose.py | 109 ---- .../ssd/pytorch/base/train/event/log.py | 145 ----- .../ssd/pytorch/base/train/trainer.py | 229 -------- .../ssd/pytorch/base/train/training_state.py | 53 -- .../ssd/pytorch/base/utils/__init__.py | 17 - cv/detection/ssd/pytorch/base/utils/check.py | 72 --- cv/detection/ssd/pytorch/base/utils/dist.py | 165 ------ .../ssd/pytorch/base/utils/logging.py | 235 -------- cv/detection/ssd/pytorch/base/utils/paths.py | 18 - .../model/losses/loss.py => base_model.py} | 2 +- cv/detection/ssd/pytorch/bind.sh | 212 +++++++ .../ssd/pytorch/{base => }/bind_launch.py | 70 +-- .../ssd/pytorch/{base => }/box_coder.py | 119 ++-- cv/detection/ssd/pytorch/build_ssd.sh | 25 + cv/detection/ssd/pytorch/clean_ssd.sh | 10 + .../csrc => csrc_pt1}/box_encoder_cuda.cu | 6 +- .../{iluvatar/csrc => csrc_pt1}/interface.cpp | 0 .../csrc => csrc_pt1}/nhwc/Descriptors.cpp | 0 .../csrc => csrc_pt1}/nhwc/Descriptors.h | 1 + .../csrc => csrc_pt1}/nhwc/Exceptions.h | 0 .../csrc => csrc_pt1}/nhwc/ParamsHash.h | 0 .../csrc => csrc_pt1}/nhwc/batch_norm.cu | 24 +- .../{nvidia/csrc => csrc_pt1}/nhwc/conv.cpp | 24 +- .../csrc => csrc_pt1}/nhwc/max_pool.cu | 2 +- .../pytorch/{nvidia/csrc => csrc_pt1}/nms.cu | 5 +- .../csrc => csrc_pt1}/random_horiz_flip.cu | 6 +- .../csrc => csrc_pt2}/box_encoder_cuda.cu | 6 +- .../{nvidia/csrc => csrc_pt2}/interface.cpp | 0 .../csrc => csrc_pt2}/nhwc/Descriptors.cpp | 2 +- .../csrc => csrc_pt2}/nhwc/Descriptors.h | 3 + .../csrc => csrc_pt2}/nhwc/Exceptions.h | 0 .../csrc => csrc_pt2}/nhwc/ParamsHash.h | 0 .../csrc => csrc_pt2}/nhwc/batch_norm.cu | 24 +- .../{iluvatar/csrc => csrc_pt2}/nhwc/conv.cpp | 26 +- .../csrc => csrc_pt2}/nhwc/max_pool.cu | 2 +- .../{iluvatar/csrc => csrc_pt2}/nms.cu | 5 +- .../csrc => csrc_pt2}/random_horiz_flip.cu | 8 +- .../dataloaders => data}/build_pipeline.py | 37 +- .../dataloaders => data}/dali_iterator.py | 17 +- .../dataloaders => data}/dali_pipeline.py | 33 +- .../dataloaders => data}/input_iterators.py | 4 + .../ssd/pytorch/data/native_pipeline.py | 165 ++++++ .../{base/dataloaders => data}/prefetcher.py | 0 .../{base/dataloaders => data}/sampler.py | 0 .../pytorch/default/config/config_V100x1x1.py | 32 - .../pytorch/default/config/config_V100x1x8.py | 31 - .../default/config/config_nv_V100x1x8.py | 32 - .../pytorch/default/config/training_event.py | 79 --- cv/detection/ssd/pytorch/download_dataset.sh | 25 + cv/detection/ssd/pytorch/eval.py | 258 ++++++++ .../pytorch/{base => }/fused_color_jitter.py | 0 .../iluvatar/config/config_V100x1x1.py | 44 -- .../iluvatar/config/config_V100x1x8.py | 44 -- .../iluvatar/config/config_V100x1x8_2.1.0.py | 44 -- .../iluvatar/config/config_V100x1x8_wsl.py | 44 -- .../iluvatar/config/config_nodali_V100x1x8.py | 44 -- .../iluvatar/config/config_nv_V100x1x8.py | 44 -- .../ssd/pytorch/iluvatar/config/converter.py | 15 - .../iluvatar/config/environment_variables.sh | 10 - .../ssd/pytorch/iluvatar/config/nhwc/conv.py | 99 ---- .../iluvatar/config/nhwc/test_bn_cudnn.py | 93 --- .../ssd/pytorch/iluvatar/config/ssd300.py | 230 -------- .../pytorch/iluvatar/config/training_event.py | 103 ---- cv/detection/ssd/pytorch/iluvatar/reset.sh | 22 - cv/detection/ssd/pytorch/install_ssd.sh | 33 ++ .../nhwc/test_conv.py => master_params.py} | 64 +- cv/detection/ssd/pytorch/mlperf_log_utils.py | 34 ++ cv/detection/ssd/pytorch/mlperf_logger.py | 101 ++++ .../{iluvatar/config => }/nhwc/batch_norm.py | 0 cv/detection/ssd/pytorch/nhwc/cifar10_nhwc.py | 229 ++++++++ .../pytorch/{nvidia/config => }/nhwc/conv.py | 12 +- .../{iluvatar/config => }/nhwc/max_pool.py | 0 cv/detection/ssd/pytorch/nhwc/mnist_nhwc.py | 133 +++++ .../ssd/pytorch/nhwc/resnet_nhwc_cifar10.py | 135 +++++ .../{nvidia/config => }/nhwc/test_bn_cudnn.py | 8 +- .../{nvidia/config => }/nhwc/test_conv.py | 6 +- .../config => }/nhwc/test_max_pool.py | 7 +- .../ssd/pytorch/nvidia/config/Dockerfile | 14 - .../pytorch/nvidia/config/config_V100x1x1.py | 44 -- .../pytorch/nvidia/config/config_V100x1x8.py | 44 -- .../nvidia/config/config_nodali_V100x1x8.py | 44 -- .../ssd/pytorch/nvidia/config/converter.py | 15 - .../nvidia/config/environment_variables.sh | 10 - .../pytorch/nvidia/config/nhwc/batch_norm.py | 77 --- .../pytorch/nvidia/config/nhwc/max_pool.py | 60 -- .../nvidia/config/nhwc/test_max_pool.py | 69 --- .../ssd/pytorch/nvidia/config/resnet.py | 236 -------- .../pytorch/nvidia/config/training_event.py | 103 ---- cv/detection/ssd/pytorch/nvidia/reset.sh | 9 - cv/detection/ssd/pytorch/nvidia/setup.py | 105 ---- .../{base/model/losses => }/opt_loss.py | 14 + cv/detection/ssd/pytorch/parse_config.py | 172 ++++++ .../prepare_json.py => prepare-json.py} | 0 .../ssd/pytorch/{base => }/requirements.txt | 4 +- .../pytorch/{iluvatar/config => }/resnet.py | 39 +- cv/detection/ssd/pytorch/run.sub | 62 ++ cv/detection/ssd/pytorch/run_and_time.sh | 82 +++ cv/detection/ssd/pytorch/run_ddp_mm.sh | 126 ++++ cv/detection/ssd/pytorch/run_with_docker.sh | 68 +++ .../ssd/pytorch/{iluvatar => }/setup.py | 0 .../ssd/pytorch/{nvidia/config => }/ssd300.py | 9 +- cv/detection/ssd/pytorch/test.py | 204 +++++++ .../pytorch/{base => }/test/box_coder_test.py | 34 +- .../{base => }/test/cuda_encoder_test.py | 30 +- .../pytorch/{base => }/test/opt_loss_test.py | 30 +- cv/detection/ssd/pytorch/train.py | 554 ++++++++++++++++++ .../{base/dataloaders/util.py => utils.py} | 416 +++++++++---- cv/detection/ssd/pytorch/visualize.py | 174 ++++++ tests/executables/ssd/init_torch.sh | 2 +- tests/executables/ssd/train_ssd_amp_torch.sh | 2 +- 140 files changed, 3561 insertions(+), 5488 deletions(-) rename cv/detection/ssd/pytorch/{ci/prepare.sh => Dockerfile} (32%) create mode 100644 cv/detection/ssd/pytorch/async_evaluator.py delete mode 100644 cv/detection/ssd/pytorch/base/BaseDockerfile delete mode 100644 cv/detection/ssd/pytorch/base/config/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/config/_base.py delete mode 100644 cv/detection/ssd/pytorch/base/config/config_manager.py delete mode 100644 cv/detection/ssd/pytorch/base/config/mutable_params.py delete mode 100644 cv/detection/ssd/pytorch/base/dataloaders/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/dataloaders/dataloader.py delete mode 100644 cv/detection/ssd/pytorch/base/dataloaders/native_pipeline.py delete mode 100644 cv/detection/ssd/pytorch/base/model/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/model/layers/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/model/losses/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/model/models/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/model/models/resnet.py delete mode 100644 cv/detection/ssd/pytorch/base/model/models/ssd300.py delete mode 100644 cv/detection/ssd/pytorch/base/optimizers/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/optimizers/factory.py delete mode 100644 cv/detection/ssd/pytorch/base/prepare.py delete mode 100644 cv/detection/ssd/pytorch/base/run_train.py delete mode 100644 cv/detection/ssd/pytorch/base/run_training.sh delete mode 100644 cv/detection/ssd/pytorch/base/run_with_docker.sh delete mode 100644 cv/detection/ssd/pytorch/base/setup.py delete mode 100644 cv/detection/ssd/pytorch/base/test/dali_dataloader_test.py delete mode 100644 cv/detection/ssd/pytorch/base/test/dataloader_test.py delete mode 100644 cv/detection/ssd/pytorch/base/train/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/train/evaluator.py delete mode 100644 cv/detection/ssd/pytorch/base/train/event/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/train/event/base.py delete mode 100644 cv/detection/ssd/pytorch/base/train/event/base_adapter.py delete mode 100644 cv/detection/ssd/pytorch/base/train/event/compose.py delete mode 100644 cv/detection/ssd/pytorch/base/train/event/log.py delete mode 100644 cv/detection/ssd/pytorch/base/train/trainer.py delete mode 100644 cv/detection/ssd/pytorch/base/train/training_state.py delete mode 100644 cv/detection/ssd/pytorch/base/utils/__init__.py delete mode 100644 cv/detection/ssd/pytorch/base/utils/check.py delete mode 100644 cv/detection/ssd/pytorch/base/utils/dist.py delete mode 100644 cv/detection/ssd/pytorch/base/utils/logging.py delete mode 100644 cv/detection/ssd/pytorch/base/utils/paths.py rename cv/detection/ssd/pytorch/{base/model/losses/loss.py => base_model.py} (100%) create mode 100644 cv/detection/ssd/pytorch/bind.sh rename cv/detection/ssd/pytorch/{base => }/bind_launch.py (75%) rename cv/detection/ssd/pytorch/{base => }/box_coder.py (76%) create mode 100644 cv/detection/ssd/pytorch/build_ssd.sh create mode 100644 cv/detection/ssd/pytorch/clean_ssd.sh rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt1}/box_encoder_cuda.cu (99%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt1}/interface.cpp (100%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/nhwc/Descriptors.cpp (100%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt1}/nhwc/Descriptors.h (99%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt1}/nhwc/Exceptions.h (100%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt1}/nhwc/ParamsHash.h (100%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/nhwc/batch_norm.cu (96%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/nhwc/conv.cpp (98%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/nhwc/max_pool.cu (99%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/nms.cu (99%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt1}/random_horiz_flip.cu (98%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt2}/box_encoder_cuda.cu (99%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt2}/interface.cpp (100%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/nhwc/Descriptors.cpp (99%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt2}/nhwc/Descriptors.h (99%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt2}/nhwc/Exceptions.h (100%) rename cv/detection/ssd/pytorch/{nvidia/csrc => csrc_pt2}/nhwc/ParamsHash.h (100%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/nhwc/batch_norm.cu (96%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/nhwc/conv.cpp (98%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/nhwc/max_pool.cu (99%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/nms.cu (99%) rename cv/detection/ssd/pytorch/{iluvatar/csrc => csrc_pt2}/random_horiz_flip.cu (97%) rename cv/detection/ssd/pytorch/{base/dataloaders => data}/build_pipeline.py (58%) rename cv/detection/ssd/pytorch/{base/dataloaders => data}/dali_iterator.py (94%) rename cv/detection/ssd/pytorch/{base/dataloaders => data}/dali_pipeline.py (42%) rename cv/detection/ssd/pytorch/{base/dataloaders => data}/input_iterators.py (99%) create mode 100644 cv/detection/ssd/pytorch/data/native_pipeline.py rename cv/detection/ssd/pytorch/{base/dataloaders => data}/prefetcher.py (100%) rename cv/detection/ssd/pytorch/{base/dataloaders => data}/sampler.py (100%) delete mode 100644 cv/detection/ssd/pytorch/default/config/config_V100x1x1.py delete mode 100644 cv/detection/ssd/pytorch/default/config/config_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/default/config/config_nv_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/default/config/training_event.py create mode 100644 cv/detection/ssd/pytorch/download_dataset.sh create mode 100644 cv/detection/ssd/pytorch/eval.py rename cv/detection/ssd/pytorch/{base => }/fused_color_jitter.py (100%) delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x1.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_2.1.0.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_wsl.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_nodali_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/config_nv_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/converter.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/environment_variables.sh delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/nhwc/conv.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_bn_cudnn.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/ssd300.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/config/training_event.py delete mode 100644 cv/detection/ssd/pytorch/iluvatar/reset.sh create mode 100644 cv/detection/ssd/pytorch/install_ssd.sh rename cv/detection/ssd/pytorch/{iluvatar/config/nhwc/test_conv.py => master_params.py} (33%) create mode 100644 cv/detection/ssd/pytorch/mlperf_log_utils.py create mode 100644 cv/detection/ssd/pytorch/mlperf_logger.py rename cv/detection/ssd/pytorch/{iluvatar/config => }/nhwc/batch_norm.py (100%) create mode 100644 cv/detection/ssd/pytorch/nhwc/cifar10_nhwc.py rename cv/detection/ssd/pytorch/{nvidia/config => }/nhwc/conv.py (92%) rename cv/detection/ssd/pytorch/{iluvatar/config => }/nhwc/max_pool.py (100%) create mode 100644 cv/detection/ssd/pytorch/nhwc/mnist_nhwc.py create mode 100644 cv/detection/ssd/pytorch/nhwc/resnet_nhwc_cifar10.py rename cv/detection/ssd/pytorch/{nvidia/config => }/nhwc/test_bn_cudnn.py (97%) rename cv/detection/ssd/pytorch/{nvidia/config => }/nhwc/test_conv.py (96%) rename cv/detection/ssd/pytorch/{iluvatar/config => }/nhwc/test_max_pool.py (96%) delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/Dockerfile delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/config_V100x1x1.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/config_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/config_nodali_V100x1x8.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/converter.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/environment_variables.sh delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/nhwc/batch_norm.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/nhwc/max_pool.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/nhwc/test_max_pool.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/resnet.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/config/training_event.py delete mode 100644 cv/detection/ssd/pytorch/nvidia/reset.sh delete mode 100644 cv/detection/ssd/pytorch/nvidia/setup.py rename cv/detection/ssd/pytorch/{base/model/losses => }/opt_loss.py (77%) create mode 100644 cv/detection/ssd/pytorch/parse_config.py rename cv/detection/ssd/pytorch/{base/data_preprocessing/prepare_json.py => prepare-json.py} (100%) rename cv/detection/ssd/pytorch/{base => }/requirements.txt (35%) rename cv/detection/ssd/pytorch/{iluvatar/config => }/resnet.py (88%) create mode 100644 cv/detection/ssd/pytorch/run.sub create mode 100644 cv/detection/ssd/pytorch/run_and_time.sh create mode 100644 cv/detection/ssd/pytorch/run_ddp_mm.sh create mode 100644 cv/detection/ssd/pytorch/run_with_docker.sh rename cv/detection/ssd/pytorch/{iluvatar => }/setup.py (100%) rename cv/detection/ssd/pytorch/{nvidia/config => }/ssd300.py (98%) create mode 100644 cv/detection/ssd/pytorch/test.py rename cv/detection/ssd/pytorch/{base => }/test/box_coder_test.py (53%) rename cv/detection/ssd/pytorch/{base => }/test/cuda_encoder_test.py (88%) rename cv/detection/ssd/pytorch/{base => }/test/opt_loss_test.py (64%) create mode 100644 cv/detection/ssd/pytorch/train.py rename cv/detection/ssd/pytorch/{base/dataloaders/util.py => utils.py} (48%) create mode 100644 cv/detection/ssd/pytorch/visualize.py diff --git a/cv/detection/ssd/pytorch/ci/prepare.sh b/cv/detection/ssd/pytorch/Dockerfile similarity index 32% rename from cv/detection/ssd/pytorch/ci/prepare.sh rename to cv/detection/ssd/pytorch/Dockerfile index 7a9d113e8..dab6ec428 100644 --- a/cv/detection/ssd/pytorch/ci/prepare.sh +++ b/cv/detection/ssd/pytorch/Dockerfile @@ -1,10 +1,8 @@ -#!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. +# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -14,22 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -x -## install libGL -ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') -if [[ ${ID} == "ubuntu" ]]; then - apt install -y libgl1-mesa-glx -elif [[ ${ID} == "centos" ]]; then - yum install -y mesa-libGL -else - echo "Not Support Os" -fi +ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:20.06-py3 +FROM ${FROM_IMAGE_NAME} -mkdir -p /home/data/perf/ssd -ln -s /mnt/deepspark/data/datasets/coco /home/data/perf/ssd/ -cp /mnt/deepspark/data/checkpoints/resnet34-333f7ec4.pth /home/data/perf/ssd/ +# Install dependencies for system configuration logger +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + infiniband-diags \ + pciutils \ + && rm -rf /var/lib/apt/lists/* -cd base -source ../iluvatar/config/environment_variables.sh -python3 prepare.py --name iluvatar --data_dir /home/data/perf/ssd -timeout 1800 bash run_training.sh --name iluvatar --config V100x1x8 --data_dir /home/data/perf/ssd --backbone_path /home/data/perf/ssd/resnet34-333f7ec4.pth \ No newline at end of file +WORKDIR opt + +# Install Python dependencies +WORKDIR /workspace/single_stage_detector + +COPY requirements.txt . +RUN pip install --no-cache-dir https://github.com/mlperf/logging/archive/9ea0afa.zip \ + && pip install --no-cache-dir -r requirements.txt + +# Copy SSD code and build +COPY . . +RUN pip install . + +# Configure environment variables +ENV OMP_NUM_THREADS=1 +ENV OPENCV_FOR_THREADS_NUM=1 +ENV TORCH_HOME=/workspace/single_stage_detector/torch-model-cache diff --git a/cv/detection/ssd/pytorch/async_evaluator.py b/cv/detection/ssd/pytorch/async_evaluator.py new file mode 100644 index 000000000..8c8343fff --- /dev/null +++ b/cv/detection/ssd/pytorch/async_evaluator.py @@ -0,0 +1,95 @@ +import torch + +from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor + +class AsyncEvaluator(): + """ + Creates a threaded evaluator for a given device. + If device == None then the current active device is used + """ + def __init__(self, num_threads=1, device=None): + self.num_threads = num_threads + # self.pool = ThreadPoolExecutor(num_threads) + self.pool = ProcessPoolExecutor(num_threads) + + self.events = {} + + def __del__(self): + for t, e in self.events.items(): + e.cancel() + + # submit given function and its arguments with an + # associated tag for bookkkeeping + def submit_task(self, tag, fn, *args, **kwargs): + + # launch work + e = self.pool.submit(fn, *args, **kwargs) + + # record work + self.events[tag] = e + + # check if a task has completed + def task_done(self, tag): + return self.events[tag].done() + + # get the result of a task: + # Note: will block until completed + def task_result(self, tag): + return self.events[tag].result(timeout=None) + + # Get all currently finished tasks in a dict of + # { tag : result } + def finished_tasks(self): + ret = {} + to_remove = [] + # Check all existing tasks + for t in self.events.keys(): + done = self.events[t].done() + + if done: + ret[t] = self.task_result(t) + to_remove.append(t) + + # As soon as a task is finished we want to remove it + for t in to_remove: + self.task_remove(t) + + return ret + + # remove a task from the outstanding list + # Note: will cancel task if not completed + def task_remove(self, tag): + done = self.events[tag].done() + + # cancel task if necessary + if not done: + self.events[tag].cancel() + + # remove the entry + del self.events[tag] + + # return list of tags outstanding + def task_tags(self): + return self.events.keys() + + # wait for everything to finish + def get_all_tasks(self): + ret = {} + to_remove = [] + # Check all existing tasks + for t in self.events.keys(): + ret[t] = self.task_result(t) + to_remove.append(t) + + # As soon as a task is finished we want to remove it + for t in to_remove: + self.task_remove(t) + return ret + + # wait for everything to finish + def wait_all_tasks(self): + for t in self.events.keys(): + y = self.task_result(t) + print('task {} finished'.format(t)) + + diff --git a/cv/detection/ssd/pytorch/base/BaseDockerfile b/cv/detection/ssd/pytorch/base/BaseDockerfile deleted file mode 100644 index 6f036a1c0..000000000 --- a/cv/detection/ssd/pytorch/base/BaseDockerfile +++ /dev/null @@ -1,58 +0,0 @@ -FROM ubuntu:18.04 - -ENV DEBIAN_FRONTEND=noninteractive -ENV PATH /root/miniconda/bin:$PATH - - -RUN apt-get update -y -RUN apt-get install -y \ - apt-utils \ - sudo \ - openssh-server \ - vim \ - git \ - curl \ - wget \ - tree \ - perl \ - kmod \ - make \ - pciutils \ - build-essential \ - python3.8-dev \ - python3-pip \ - libjpeg-dev \ - zlib1g-dev \ - unzip \ - cmake \ - bzip2 \ - cabextract \ - iputils-ping \ - pbzip2 \ - pv \ - numactl - -# Configure anaconda -RUN wget https://repo.anaconda.com/miniconda/Miniconda3-py38_4.10.3-Linux-x86_64.sh && \ - bash ./Miniconda3-py38_4.10.3-Linux-x86_64.sh -b -p /root/miniconda && \ -# eval "$(/root/miniconda/bin/conda shell.bash hook)" && \ - /root/miniconda/bin/conda clean -tipsy && \ - ln -s /root/miniconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \ - echo ". /root/miniconda/etc/profile.d/conda.sh" >> ~/.bashrc && \ - echo "conda activate base" >> ~/.bashrc && \ - conda config --set always_yes yes --set changeps1 no - - -RUN /bin/bash -c "apt-get install -y linux-headers-`uname -r`" - -# TODO: Remove pip source -RUN /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" - -COPY requirements.txt requirements.txt -RUN /bin/bash -c "pip3 install -r requirements.txt" - - -WORKDIR /workspace/baai-perf - - - diff --git a/cv/detection/ssd/pytorch/base/config/__init__.py b/cv/detection/ssd/pytorch/base/config/__init__.py deleted file mode 100644 index 258fb17d7..000000000 --- a/cv/detection/ssd/pytorch/base/config/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ._base import * - -from .config_manager import activate_config_env - diff --git a/cv/detection/ssd/pytorch/base/config/_base.py b/cv/detection/ssd/pytorch/base/config/_base.py deleted file mode 100644 index 4262ad1a7..000000000 --- a/cv/detection/ssd/pytorch/base/config/_base.py +++ /dev/null @@ -1,143 +0,0 @@ -from typing import ClassVar -from train.event.base import BaseTrainingEventInterface - -# 1.Basic Configurations -# The train dir. Should contain coco datasets. -data_dir: str = None - -# The backbone dir. The path to pretrained backbone weights file of resnet34-333f7ec4.pth, 'default is to get it from online torchvision repository'. -backbone_path: str = None - -# Whether to run training. -do_train: bool = False - -# Whether to read local rank from ENVVAR -use_env: bool = False - -# device -device: str = None - -# total gpu count -n_gpu: int = 1 - -distributed: bool = False - -# local_rank for distributed training on gpus -local_rank: int = -1 - -# Communication backend for distributed training on gpus -dist_backend: str = "nccl" - -# Stop training after reaching this Masked-LM accuracy -threshold: float = 0.23 - -# NMS threshold -nms_valid_thresh: float = 0.05 - -# Total number of training epochs to perform. -epochs: int = 80 - -# Start epoch, use for training from checkpoint -epoch: int = 0 - -# Start iteration, use for training from checkpoint -iteration: int = 0 - -# Sample to begin performing eval. -evaluation: list = [5, 10, 40, 50, 55, 60, 65, 70, 75, 80] - -# Whether to save checkpoints -save_checkpoint: bool = False - -# Where to save checkpoints -output: str = "./output" - -# path to model checkpoint file -checkpoint: str = None - -# random seed for initialization -seed: int = 42 - -# frequency of logging loss. If not positive, no logging is provided for training loss -log_freq: int = 20 - -# Number of updates steps to accumualte before performing a backward/update pass. -num_classes: int = 81 - -# Input images size -input_size: int = 300 - - -# 2.Model Training Configurations -gradient_accumulation_steps: int = 1 - -# Total batch size for training. -train_batch_size: int = 120 - -# Total batch size for training. -eval_batch_size: int = 160 - -# The initial learning rate. -learning_rate: float = 2.92e-3 - -# weight decay rate. -weight_decay_rate: float = 1.6e-4 - -# decay rate of learning rate. default is 0.1. -lr_decay_factor: float = 0.1 - -# epochs at which to decay the learning rate. -lr_decay_epochs: list = [40, 50] - -# How long the learning rate will be warmed up in fraction of epochs. -warmup: int = 650 - -# MLperf rule parameter for controlling warmup curve. -warmup_factor: int = 0 - -# Loss scaling, positive power of 2 values can improve fp16 convergence. -loss_scale: float = 0.0 - - -# 3. Optimizer Configurations -# A object to provide some core components in training -training_event: ClassVar[BaseTrainingEventInterface] = None - -# Dataloader workers -num_workers: int = 4 - -# Whether to use 16-bit float precision instead of 32-bit -fp16: bool = False - -# Control training mode(FP32 or FP16) by opt_level using apex -# ToDo enum 0,1,2 -opt_level: int = 0 - -delay_allreduce: bool = False - -# Group of processes to collaborate on BatchNorm ops -# ToDo enum 1,2,4,8 -bn_group: int = 1 - -fast_nms: bool = False - -# Use fused color jitter -fast_cj: bool = False - -# -use_coco_ext: bool = False - -# Whether use Dali -dali: bool = False - -# Run dali in synchronous mode instead of the (default) asynchronous -dali_sync: bool = False - -# cache size (in GB) for Dali's nvjpeg caching -dali_cache: int = 0 - -# The following 4 optimization configurations must using Dali -nhwc: bool = False -pad_input: bool = False -jit: bool = False -use_nvjpeg: bool = False diff --git a/cv/detection/ssd/pytorch/base/config/config_manager.py b/cv/detection/ssd/pytorch/base/config/config_manager.py deleted file mode 100644 index e8bd00a25..000000000 --- a/cv/detection/ssd/pytorch/base/config/config_manager.py +++ /dev/null @@ -1,165 +0,0 @@ -import copy -import importlib -import inspect -import os -import sys -from argparse import ArgumentParser -from typing import Iterable, Mapping - -import config as global_config -from . import _base as base_config -from .mutable_params import mutable_params - -mutable_params = copy.copy(mutable_params) -immutable_params = set(global_config.__dict__.keys()) - set(mutable_params) - - -def get_config(config_path: str): - if os.path.exists(config_path): - abs_path = config_path - sys.path.append(os.path.dirname(abs_path)) - config_path = os.path.basename(config_path).replace(".py", "") - try: - module = importlib.import_module(config_path) - except Exception as ex: - sys.path.pop(-1) - raise ex - sys.path.pop(-1) - else: - raise FileNotFoundError("Not found config:", config_path) - - return module - - -def get_annotations(other_modules: list=None): - annotations = dict() - - if "__annotations__" in base_config.__dict__: - annotations.update(base_config.__dict__["__annotations__"]) - - if other_modules is not None: - for mod in other_modules: - if isinstance(mod, str): - mod = get_config(mod) - if "__annotations__" in mod.__dict__: - annotations.update(mod.__dict__["__annotations__"]) - - return annotations - - -def is_property(name: str, value): - status = [ - not name.startswith('__'), - not callable(value), - not inspect.isclass(value), - not inspect.ismodule(value), - not inspect.ismethod(value), - not inspect.isfunction(value), - not inspect.isbuiltin(value), - ] - - return all(status) - - -def get_properties_from_config(config): - if not isinstance(config, Mapping): - config = config.__dict__ - properties = dict() - for name, value in config.items(): - if is_property(name, value): - properties[name] = value - - return properties - - -def add_to_argparser(config: dict, parser: ArgumentParser, other_modules: list=None): - annotations = get_annotations(other_modules) - - def get_property_type(name, value): - if value is not None: - return type(value) - if name in annotations: - return annotations[name] - return str - - def add_args(parser, name, value, prefix=''): - dtype = get_property_type(prefix + name, value) - - if dtype == str: - parser.add_argument('--' + prefix + name, type=str, default=None) - elif dtype == int: - parser.add_argument('--' + prefix + name, type=int, default=None) - elif dtype == float: - parser.add_argument('--' + prefix + name, type=float, default=None) - elif dtype == bool: - parser.add_argument('--' + prefix + name, action=f"store_{str(not value).lower()}", default=None) - elif isinstance(value, Mapping): - for k, v in value.items(): - add_args(parser, k, v, prefix=prefix + name + ".") - elif isinstance(value, Iterable) and not isinstance(value, Mapping): - parser.add_argument('--' + prefix + name, type=type(value[0]), nargs='+', default=None) - # else: - # print(f'WARN: Cannot parse key {prefix + name} of type {type(value)}.') - - for name, value in config.items(): - if not is_property(name, value): - continue - add_args(parser, name, value) - - -def _merge_dict_to_config(src: dict, dist: dict, ignore_none=True): - for arg, value in src.items(): - if ignore_none and value is None: - continue - dist[arg] = value - - -def parse_from_args(config: dict, parser=None, other_modules: list=None, with_config_env_name: bool=False): - if parser is None: - parser = ArgumentParser() - - add_to_argparser(config, parser, other_modules) - if with_config_env_name: - parser.add_argument("config", type=str, help="Config name") - - args = parser.parse_args() - return args - - -def activate_config_env(name=None, parser=None, parse_args=True, with_config_env_name: bool=False): - global_config_copy_ = copy.copy(global_config.__dict__) - - if parse_args: - args_dict = dict() - for mutable_param in mutable_params: - args_dict[mutable_param] = global_config_copy_[mutable_param] - args = parse_from_args(args_dict, parser, with_config_env_name=with_config_env_name) - del args_dict - if name is None and with_config_env_name: - name = args.config - - if name is None: - raise RuntimeError("Argument `name` must be given.") - - external_module_params = copy.copy(get_config(name).__dict__) - for immutable_param in immutable_params: - if immutable_param in external_module_params: - external_module_params.pop(immutable_param) - - _merge_dict_to_config(global_config_copy_, global_config.__dict__) - _merge_dict_to_config(external_module_params, global_config.__dict__) - if parse_args: - _merge_dict_to_config(args.__dict__, global_config.__dict__) - - -def print_config(config=None): - if config is None: - config = global_config - properties = get_properties_from_config(config) - config_fields = [] - for name, value in properties.items(): - config_fields.append(f"{name}={value}") - - config_fields = ", ".join(config_fields) - config_str = f"Config({config_fields})" - print(config_str) \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/config/mutable_params.py b/cv/detection/ssd/pytorch/base/config/mutable_params.py deleted file mode 100644 index b5bc1d41f..000000000 --- a/cv/detection/ssd/pytorch/base/config/mutable_params.py +++ /dev/null @@ -1,23 +0,0 @@ -mutable_params = [ - "dist_backend", "seed", - "train_batch_size", "eval_batch_size", "learning_rate", "weight_decay_rate", "lr_decay_factor", "lr_decay_epochs", - "warmup", "warmup_factor", "loss_scale", - "gradient_accumulation_steps", "fp16", "opt_level", "delay_allreduce", "fast_nms", "fast_cj", - "dali", "dali_cache", "nhwc", "pad_input", "jit", "use_nvjpeg", - "training_event", -] - -mutable_params += [ - "local_rank", - "do_train", - "data_dir", - "backbone_path", - "log_freq", -] - -# only use for debug and fine tune. -mutable_params += [ - "save_checkpoint", - "output", - "checkpoint" -] \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/dataloaders/__init__.py b/cv/detection/ssd/pytorch/base/dataloaders/__init__.py deleted file mode 100644 index 01ffe4e37..000000000 --- a/cv/detection/ssd/pytorch/base/dataloaders/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .dataloader import create_train_dataloader, create_eval_dataloader \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/dataloaders/dataloader.py b/cv/detection/ssd/pytorch/base/dataloaders/dataloader.py deleted file mode 100644 index a05c764b5..000000000 --- a/cv/detection/ssd/pytorch/base/dataloaders/dataloader.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -from .build_pipeline import prebuild_pipeline, build_pipeline - -def create_train_dataloader(config): - train_pipe = prebuild_pipeline(config) - train_loader, epoch_size, train_sampler = build_pipeline(config, training=True, pipe=train_pipe) - return train_loader, epoch_size, train_sampler - - -def create_eval_dataloader(config): - val_loader, inv_map, cocoGt = build_pipeline(config, training=False) - return val_loader, inv_map, cocoGt diff --git a/cv/detection/ssd/pytorch/base/dataloaders/native_pipeline.py b/cv/detection/ssd/pytorch/base/dataloaders/native_pipeline.py deleted file mode 100644 index 27afce830..000000000 --- a/cv/detection/ssd/pytorch/base/dataloaders/native_pipeline.py +++ /dev/null @@ -1,119 +0,0 @@ -import torch -import os - -from functools import partial - -from torch.utils.data import DataLoader - -from box_coder import dboxes300_coco -from .util import COCODetection, SSDTransformer -from .sampler import GeneralDistributedSampler - -from pycocotools.coco import COCO -import numpy as np - - -def SSDCollator(batch, dali): - """ - :param batch: batch data, [image, image_id, (htot, wtot), bboxes, labels] - if Dali is False: - a batch is like: - [ - [torch.Size([3, 300, 300]), 152915, (262, 386), torch.Size([8732, 4]), torch.Size([8732])], - [torch.Size([3, 300, 300]), 260111, (480, 640), torch.Size([8732, 4]), torch.Size([8732])] - .... - ] - if Dali is True: - This function will not be called. - :param dali: whether use Dali - :return: - """ - images = [] - image_ids = [] - image_sizes = [] - bboxes = [] - bbox_offsets = [0] - labels = [] - for img, img_id, img_size, bbox, label in batch: - images.append(img.view(1, *img.shape)) - image_ids.append(img_id) - image_sizes.append(img_size) - bboxes.append(bbox) - labels.append(label) - bbox_offsets.append(bbox_offsets[-1] + bbox.shape[0]) - - images = torch.cat(images) - bboxes = torch.cat(bboxes) - labels = torch.cat(labels) - if dali: - bbox_offsets = np.array(bbox_offsets).astype(np.int32) - return [images, bboxes, labels, torch.tensor(bbox_offsets)] - else: - N = images.shape[0] - return [images, bboxes.view(N, -1, 4), labels.view(N, -1)] - - -def build_train_pipe(config): - input_size = config.input_size - train_coco_root = os.path.join(config.data_dir, "train2017") - if config.dali: - # Default model, this branch is not be executed, and the alternative branch is dataloaders.dali_pipeline.build_dali_pipeline. - train_annotate = os.path.join(config.data_dir, "annotations/bbox_only_instances_train2017.json") - train_trans = SSDTransformer((input_size, input_size), dali=True, - fast_nms=config.fast_nms, fast_cj=config.fast_cj, val=False) - else: - train_annotate = os.path.join(config.data_dir, "annotations/instances_train2017.json") - dboxes = dboxes300_coco() - train_trans = SSDTransformer((input_size, input_size), dboxes=dboxes, dali=False, - fast_nms=config.fast_nms, fast_cj=config.fast_cj, val=False) - - train_coco = COCODetection(train_coco_root, train_annotate, train_trans) - - if config.distributed: - train_sampler = GeneralDistributedSampler(train_coco, pad=False) - else: - train_sampler = None - - train_loader = DataLoader(train_coco, - batch_size=config.train_batch_size, - shuffle=(train_sampler is None), - sampler=train_sampler, - num_workers=config.num_workers, - collate_fn=partial(SSDCollator, dali=config.dali)) - return train_loader, len(train_loader), train_sampler - - -def build_eval_pipe(config): - # Paths - input_size = config.input_size - val_coco_root = os.path.join(config.data_dir, "val2017") - val_annotate = os.path.join(config.data_dir, "annotations/bbox_only_instances_val2017.json") - val_trans = SSDTransformer((input_size, input_size), dali=True, - fast_nms=config.fast_nms, fast_cj=config.fast_cj, val=True) - if config.use_coco_ext: - cocoGt = COCO(annotation_file=val_annotate, use_ext=True) - else: - cocoGt = COCO(annotation_file=val_annotate) - val_coco = COCODetection(val_coco_root, val_annotate, val_trans, cocoGt.dataset) - - if config.distributed: - val_sampler = GeneralDistributedSampler(val_coco, pad=False) - else: - val_sampler = None - - val_dataloader = DataLoader(val_coco, - batch_size=config.eval_batch_size, - shuffle=False, # Note: distributed sampler is shuffled :( - sampler=val_sampler, - num_workers=config.num_workers) - - inv_map = {v:k for k,v in val_coco.label_map.items()} - - return val_dataloader, inv_map, cocoGt - - -def build_native_pipeline(config, training=True, pipe=None): - if training: - return build_train_pipe(config) - else: - return build_eval_pipe(config) diff --git a/cv/detection/ssd/pytorch/base/model/__init__.py b/cv/detection/ssd/pytorch/base/model/__init__.py deleted file mode 100644 index 7f31eba74..000000000 --- a/cv/detection/ssd/pytorch/base/model/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -from .models.ssd300 import SSD300 - - -def load_checkpoint(model, checkpoint): - print("loading model checkpoint", checkpoint) - od = torch.load(checkpoint) - - # remove proceeding 'module' from checkpoint - saved_model = od["model"] - for k in list(saved_model.keys()): - if k.startswith('module.'): - saved_model[k[7:]] = saved_model.pop(k) - model.load_state_dict(saved_model) - return od - - -def create_model(config): - model = SSD300(config) - model.train() - model.cuda() - return model \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/model/layers/__init__.py b/cv/detection/ssd/pytorch/base/model/layers/__init__.py deleted file mode 100644 index d5bdef535..000000000 --- a/cv/detection/ssd/pytorch/base/model/layers/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -import torch - - diff --git a/cv/detection/ssd/pytorch/base/model/losses/__init__.py b/cv/detection/ssd/pytorch/base/model/losses/__init__.py deleted file mode 100644 index 71d4a7969..000000000 --- a/cv/detection/ssd/pytorch/base/model/losses/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .loss import Loss -from .opt_loss import OptLoss \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/model/models/__init__.py b/cv/detection/ssd/pytorch/base/model/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cv/detection/ssd/pytorch/base/model/models/resnet.py b/cv/detection/ssd/pytorch/base/model/models/resnet.py deleted file mode 100644 index afa119cd9..000000000 --- a/cv/detection/ssd/pytorch/base/model/models/resnet.py +++ /dev/null @@ -1,291 +0,0 @@ -from typing import Type, Any, Callable, Union, List, Optional - -#from torchvision.models.vgg import vgg16 -import torch.nn as nn -from torch import Tensor -import torch.utils.model_zoo as model_zoo -import torch.nn.functional as F -import torch -from torch.autograd import Variable -from collections import OrderedDict - -# from torchvision.models.resnet import resnet34 - - -model_urls = { - 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', -} - - -def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def _ModifyConvStrideDilation(conv: nn.Conv2d, stride: tuple = (1, 1), padding=None): - conv.stride = stride - - if padding is not None: - conv.padding = padding - - -def _ModifyBlock(block, bottleneck=False, **kwargs): - for m in list(block.children()): - if bottleneck: - _ModifyConvStrideDilation(m.conv2, **kwargs) - else: - _ModifyConvStrideDilation(m.conv1, **kwargs) - - if m.downsample is not None: - # need to make sure no padding for the 1x1 residual connection - _ModifyConvStrideDilation(list(m.downsample.children())[0], **kwargs) - - -class BasicBlock(nn.Module): - expansion: int = 1 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. - - expansion: int = 4 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__( - self, - block: Type[Union[BasicBlock, Bottleneck]], - layers: List[int], - num_classes: int = 1000, - zero_init_residual: bool = False, - groups: int = 1, - width_per_group: int = 64, - replace_stride_with_dilation: Optional[List[bool]] = None, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, - dilate=replace_stride_with_dilation[2]) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] - - def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, - stride: int = 1, dilate: bool = False) -> nn.Sequential: - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer)) - - return nn.Sequential(*layers) - - def _forward_impl(self, x: Tensor) -> Tensor: - # See note [TorchScript super()] - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.fc(x) - - return x - - def forward(self, x: Tensor) -> Tensor: - return self._forward_impl(x) - - -class ResNet34(nn.Module): - def __init__(self, model_path=None): - super().__init__() - # rn34 = resnet34(pretrained=(model_path is None)) - rn34 = ResNet(BasicBlock, [3, 4, 6, 3]) - if model_path is None: - state_dict = model_zoo.load_url(model_urls['resnet34'], - progress=True) - rn34.load_state_dict(state_dict) - else: - rn34.load_state_dict(torch.load(model_path)) - - # discard last Resnet block, avrpooling and classification FC - self.layer1 = nn.Sequential(*list(rn34.children())[:6]) - self.layer2 = nn.Sequential(*list(rn34.children())[6:7]) - # modify conv4 if necessary - # Always deal with stride in first block - modulelist = list(self.layer2.children()) - _ModifyBlock(modulelist[0], stride=(1,1)) - - def forward(self, data): - layer1_activation = self.layer1(data) - x = layer1_activation - layer2_activation = self.layer2(x) - - return [layer2_activation] - diff --git a/cv/detection/ssd/pytorch/base/model/models/ssd300.py b/cv/detection/ssd/pytorch/base/model/models/ssd300.py deleted file mode 100644 index 5bcb3f6a6..000000000 --- a/cv/detection/ssd/pytorch/base/model/models/ssd300.py +++ /dev/null @@ -1,141 +0,0 @@ -import torch -import torch.nn as nn -from .resnet import ResNet34 - -class SSD300(nn.Module): - """ - Build a SSD module to take 300x300 image input, - and output 8732 per class bounding boxes - - vggt: pretrained vgg16 (partial) model - label_num: number of classes (including background 0) - """ - def __init__(self, config): - - super(SSD300, self).__init__() - - self.label_num = config.num_classes - - if not config.checkpoint and config.backbone_path: - self.backbone = ResNet34(model_path=config.backbone_path) - else: - self.backbone = ResNet34() - out_channels = 256 - out_size = 38 - self.out_chan = [out_channels, 512, 512, 256, 256, 256] - - self._build_additional_features(out_size, self.out_chan) - - # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 - - self.num_defaults = [4, 6, 6, 6, 4, 4] - self.loc = [] - self.conf = [] - - for nd, oc in zip(self.num_defaults, self.out_chan): - self.loc.append(nn.Conv2d(oc, nd*4, kernel_size=3, padding=1)) - self.conf.append(nn.Conv2d(oc, nd*self.label_num, kernel_size=3, padding=1)) - - self.loc = nn.ModuleList(self.loc) - self.conf = nn.ModuleList(self.conf) - # intitalize all weights - self._init_weights() - - def _build_additional_features(self, input_size, input_channels): - idx = 0 - if input_size == 38: - idx = 0 - elif input_size == 19: - idx = 1 - elif input_size == 10: - idx = 2 - - self.additional_blocks = [] - - if input_size == 38: - self.additional_blocks.append(nn.Sequential( - nn.Conv2d(input_channels[idx], 256, kernel_size=1), - nn.ReLU(inplace=True), - nn.Conv2d(256, input_channels[idx+1], kernel_size=3, padding=1, stride=2), - nn.ReLU(inplace=True), - )) - idx += 1 - - self.additional_blocks.append(nn.Sequential( - nn.Conv2d(input_channels[idx], 256, kernel_size=1), - nn.ReLU(inplace=True), - nn.Conv2d(256, input_channels[idx+1], kernel_size=3, padding=1, stride=2), - nn.ReLU(inplace=True), - )) - idx += 1 - - # conv9_1, conv9_2 - self.additional_blocks.append(nn.Sequential( - nn.Conv2d(input_channels[idx], 128, kernel_size=1), - nn.ReLU(inplace=True), - nn.Conv2d(128, input_channels[idx+1], kernel_size=3, padding=1, stride=2), - nn.ReLU(inplace=True), - )) - idx += 1 - - # conv10_1, conv10_2 - self.additional_blocks.append(nn.Sequential( - nn.Conv2d(input_channels[idx], 128, kernel_size=1), - nn.ReLU(inplace=True), - nn.Conv2d(128, input_channels[idx+1], kernel_size=3), - nn.ReLU(inplace=True), - )) - idx += 1 - - # Only necessary in VGG for now - if input_size >= 19: - # conv11_1, conv11_2 - self.additional_blocks.append(nn.Sequential( - nn.Conv2d(input_channels[idx], 128, kernel_size=1), - nn.ReLU(inplace=True), - nn.Conv2d(128, input_channels[idx+1], kernel_size=3), - nn.ReLU(inplace=True), - )) - - self.additional_blocks = nn.ModuleList(self.additional_blocks) - - def _init_weights(self): - - layers = [ - *self.additional_blocks, - *self.loc, *self.conf] - - for layer in layers: - for param in layer.parameters(): - if param.dim() > 1: nn.init.xavier_uniform_(param) - - # Shape the classifier to the view of bboxes - def bbox_view(self, src, loc, conf): - ret = [] - for s, l, c in zip(src, loc, conf): - ret.append((l(s).view(s.size(0), 4, -1), c(s).view(s.size(0), self.label_num, -1))) - - locs, confs = list(zip(*ret)) - locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous() - return locs, confs - - def forward(self, data): - - layers = self.backbone(data) - - # last result from network goes into additional blocks - x = layers[-1] - additional_results = [] - for i, l in enumerate(self.additional_blocks): - x = l(x) - additional_results.append(x) - - src = [*layers, *additional_results] - # Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4 - - locs, confs = self.bbox_view(src, self.loc, self.conf) - - # For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results - return locs, confs - diff --git a/cv/detection/ssd/pytorch/base/optimizers/__init__.py b/cv/detection/ssd/pytorch/base/optimizers/__init__.py deleted file mode 100644 index 8ff379afc..000000000 --- a/cv/detection/ssd/pytorch/base/optimizers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .factory import create_optimizer \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/optimizers/factory.py b/cv/detection/ssd/pytorch/base/optimizers/factory.py deleted file mode 100644 index 343b3a1da..000000000 --- a/cv/detection/ssd/pytorch/base/optimizers/factory.py +++ /dev/null @@ -1,11 +0,0 @@ -import torch - - -def create_optimizer(name: str, params, config): - name = name.lower() - - if name == "sgd": - return torch.optim.SGD(params, lr=config.learning_rate, - momentum=0.9, - weight_decay=config.weight_decay_rate) - raise RuntimeError(f"Not found optimier {name}.") diff --git a/cv/detection/ssd/pytorch/base/prepare.py b/cv/detection/ssd/pytorch/base/prepare.py deleted file mode 100644 index 4b181436e..000000000 --- a/cv/detection/ssd/pytorch/base/prepare.py +++ /dev/null @@ -1,316 +0,0 @@ -import os -import shutil -import subprocess -from typing import List, Optional, Union -import os.path as ospath -from argparse import ArgumentParser, REMAINDER -from functools import partial, wraps -from typing import NamedTuple -# import platform # python3.5已弃用 - - -# ========================================================= -# Define arguments -# ========================================================= - -def parse_args(): - parser = ArgumentParser("Prepare") - parser.add_argument("--name", type=str, default=None, help="The name of submitter") - parser.add_argument("--data_dir", type=str, default=None, help="Data directory") - # WARN: Don't delete this argument - parser.add_argument('other_args', nargs=REMAINDER) - args = parser.parse_args() - return args - - -# ========================================================= -# Constants -# ========================================================= -args = parse_args() - -APT = "apt" -YUM = "yum" -PIP_INSTALL = "pip3 install " -PYTHON = "python3" -DOWNLOAD = "wget" - -APT_PKGS = ["numactl"] -YUM_PKGS = ["numactl"] -SUPPORTED_WHEELS = ["torch", "apex"] - - -MODEL_DIR = ospath.abspath( - ospath.join( - __file__, - "../../../" - ) -) -PROJ_DIR = ospath.abspath( - ospath.join( - MODEL_DIR, - "pytorch" - ) -) -MODEL_IMPL_DIR = ospath.join(PROJ_DIR, "base") -CURRENT_MODEL_NAME = ospath.basename(MODEL_DIR) - -PACKAGE_DIR_NAME = "packages" -SOURCE_DIR_NAME = "csrc" -SDK_DIR_NAME = "sdk_installers" -PACKAGE_LIST_NAME = "files.txt" -SDK_LIST_NAME = "files.txt" - -SUBMITTER = args.name -DATA_DIR = args.data_dir -SUBMITTER_DIR = ospath.join(PROJ_DIR, SUBMITTER) - -EXTENSION_SOURCE_DIR_ENV = "EXTENSION_SOURCE_DIR" -SDK_ARGUMENTS_ENV = "SDK_ARGUMENTS" - - -# ========================================================= -# Helpers -# ========================================================= - -class ShellResult(NamedTuple): - - returncode: int - result: Union[subprocess.CompletedProcess, str] = None - - -def _exec_cmd(cmd: Union[str, List], *args, **kwargs): - args_str = " ".join(args) - args_str += " ".join([f"{name}={value}" for name, value in kwargs.items()]) - cmd_str = cmd - if isinstance(cmd, (tuple, list)): - cmd_str = " ".join(cmd) - print(f"Commands: {cmd_str}") - - result = subprocess.run(cmd, *args, **kwargs, stdout=None, stderr=subprocess.STDOUT) - - if result.returncode > 0: - msg = f"ERROR: {cmd} {args_str}" - return ShellResult(returncode=result.returncode, result=msg) - - return ShellResult(returncode=result.returncode, result=result) - - -def exec_shell_cmd(cmd: str, *args, **kwargs): - return _exec_cmd(cmd, shell=True, *args, **kwargs) - - -def exec_shell_cmds(cmds: List[str], *args, **kwargs): - cmds = "\n".join(cmds) - return exec_shell_cmd(cmds, *args, **kwargs) - - -def warning(*args, **kwargs): - print("WARN:", *args, **kwargs) - - -def find_file_by_match(dir, file_patterns): - if ospath.exists(dir): - dir_files = os.listdir(dir) - else: - return file_patterns - - for i, pattern in enumerate(file_patterns): - pattern = pattern.strip() - if len(pattern) <= 1 or not pattern.endswith("*"): - continue - - pattern = pattern[:-1] - - for dir_file in dir_files: - if dir_file.startswith(pattern): - file_patterns[i] = dir_file - break - return file_patterns - -# ========================================================= -# Pipelines -# ========================================================= - -def install_apt_packages(): - if len(APT_PKGS) == 0: - return - return exec_shell_cmd(f"{APT} install -y {' '.join(APT_PKGS)}") - - -def install_yum_packages(): - if len(YUM_PKGS) == 0: - return - return exec_shell_cmd(f"{YUM} install {' '.join(YUM_PKGS)}") - - -def prepare_data(): - checked_files = ["bbox_only_instances_train2017.json", "bbox_only_instances_val2017.json"] - path_join = ospath.join - exist_preprocessed_data = all([ospath.exists(path_join(DATA_DIR, "annotations", name)) for name in checked_files]) - - if exist_preprocessed_data: - return - # current_dir = os.path.join(MODEL_DIR, "pytorch") - cmds = [ - # f"cd {current_dir}", - f"python3 data_preprocessing/prepare_json.py --keep-keys {DATA_DIR}/annotations/instances_val2017.json {DATA_DIR}/annotations/bbox_only_instances_val2017.json", - f"python3 data_preprocessing/prepare_json.py {DATA_DIR}/annotations/instances_train2017.json {DATA_DIR}/annotations/bbox_only_instances_train2017.json" - ] - return exec_shell_cmds(cmds) - - -def install_sdk(): - def get_sdk_args(): - sdk_args = dict() - if SDK_ARGUMENTS_ENV in os.environ: - sdk_args_str = os.environ[SDK_ARGUMENTS_ENV] - - sdk_args_segments = sdk_args_str.split(';') - for sdk_arg in sdk_args_segments: - sdk, arg = sdk_arg.split('=', maxsplit=1) - sdk_args[sdk] = arg - return sdk_args - - sdk_args_dict = get_sdk_args() - print("SDK Arguments:", sdk_args_dict) - - sdk_installer_dir = ospath.join(SUBMITTER_DIR, SDK_DIR_NAME) - if not ospath.exists(sdk_installer_dir): - sdk_installer_dir = ospath.join(MODEL_IMPL_DIR, SUBMITTER, SDK_DIR_NAME) - if not ospath.exists(sdk_installer_dir): - warning("Not found sdk\'s dir, skip run installer") - return - - # Find sdk installers - sdk_list_file = ospath.join(sdk_installer_dir, SDK_LIST_NAME) - if ospath.exists(sdk_list_file): - with open(sdk_list_file) as f: - sdk_installers = f.readlines() - sdk_installers_pattern = [sdk.strip() for sdk in sdk_installers] - sdk_installers = find_file_by_match(sdk_installer_dir, sdk_installers_pattern) - else: - sdk_installers = os.listdir(sdk_installer_dir) - sdk_installers.sort() - - sdk_installers_cmds = [] - for sdk in sdk_installers: - if sdk.endswith(".run"): - sdk_arg = "" - for sdk_args_key in sdk_args_dict: - if sdk.startswith(sdk_args_key): - sdk_arg = sdk_args_dict[sdk_args_key] - sdk_installers_cmds.append("sh " + ospath.join(sdk_installer_dir, sdk) + f" {sdk_arg}") - - if len(sdk_installers_cmds) == 0: - warning("Not found installer in", sdk_installer_dir, ", skip run installer") - return - - return exec_shell_cmds(sdk_installers_cmds) - - -def install_requirements(): - return exec_shell_cmd( - f"{PIP_INSTALL} -r requirements.txt" - ) - - -def install_wheel_pkgs(filter_packages: bool=False): - wheel_dir = ospath.join(SUBMITTER_DIR, PACKAGE_DIR_NAME) - if not ospath.exists(wheel_dir): - warning("Not found package\'s dir, skip install wheel package") - return - - # Find packages - package_list_file = ospath.join(wheel_dir, PACKAGE_LIST_NAME) - if ospath.exists(package_list_file): - with open(package_list_file) as f: - packages = f.readlines() - packages_pattern = [pkg.strip() for pkg in packages] - packages = find_file_by_match(wheel_dir, packages_pattern) - else: - packages = os.listdir(wheel_dir) - packages.sort() - - def _filter_packages(name: str): - for support_pkg in SUPPORTED_WHEELS: - if name.startswith(support_pkg): - return True - return False - - if filter_packages: - packages = list(filter(_filter_packages, packages)) - - if len(packages) == 0: - warning("Not found wheel packages in", wheel_dir) - return - - install_packages_cmds = [f"{PIP_INSTALL} {ospath.join(wheel_dir, pkg)}" for pkg in packages] - return exec_shell_cmds(install_packages_cmds) - - -def install_extensions(): - source_dir = ospath.join(SUBMITTER_DIR, SOURCE_DIR_NAME) - if not ospath.exists(source_dir): - warning("Not found source dir:", source_dir) - return - - sandbox_dir = os.path.join(MODEL_DIR, "pytorch", 'sandbox', "extension") - if os.path.exists(sandbox_dir): - shutil.rmtree(sandbox_dir) - print("sandbox_dir: ", sandbox_dir) - cmds = [ - f"export {EXTENSION_SOURCE_DIR_ENV}={source_dir}", - f"mkdir -p {sandbox_dir}", - f"cd {sandbox_dir}", - f"{PYTHON} {SUBMITTER_DIR}/setup.py install", - f"rm -rf {sandbox_dir}", - ] - - return exec_shell_cmds(cmds) - -def get_linux_distro(): - try: - with open('/etc/os-release') as f: - for line in f: - if line.startswith('NAME='): - # 提取 NAME 字段的值(例如 "Ubuntu" 或 "CentOS") - name = line.split('=')[1].strip().strip('"') - if 'Ubuntu' in name: - return 'Ubuntu' - elif 'CentOS' in name: - return 'CentOS' - else: - return name # 返回其他发行版名称 - except FileNotFoundError: - return 'Unknown Linux distribution' - -def pipelines(): - plat = get_linux_distro().lower() - if "centos" == plat: - res = [install_yum_packages] - elif "ubuntu" == plat: - res = [install_apt_packages] - else: - raise Exception("Invalid Platform, only support Centos and Debian!") - res.extend([ - install_requirements, - install_sdk, - partial(install_wheel_pkgs, filter_packages=True), - install_extensions, - prepare_data, - ]) - return res - - -if __name__ == '__main__': - for pipeline in pipelines(): - result = pipeline() - if result is not None and result.returncode > 0: - print(result.result) - print("Fail:", pipeline) - exit(result.returncode) - - - - - diff --git a/cv/detection/ssd/pytorch/base/run_train.py b/cv/detection/ssd/pytorch/base/run_train.py deleted file mode 100644 index 5b72b9591..000000000 --- a/cv/detection/ssd/pytorch/base/run_train.py +++ /dev/null @@ -1,123 +0,0 @@ -"""SSD training""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -from copy import copy -import os -import random -import time -from concurrent.futures import ProcessPoolExecutor - -import numpy as np -import torch -from torch.cuda.amp import GradScaler - -import utils -from dataloaders.dataloader import create_train_dataloader -from train.evaluator import Evaluator -from train.trainer import Trainer -from train.training_state import TrainingState -from train.event import TrainingEventCompose, TrainingLogger - -logger = None - - -def main(): - import config - parser = argparse.ArgumentParser("SSD") - config.activate_config_env(parser=parser, with_config_env_name=True) - - if config.use_env and 'LOCAL_RANK' in os.environ: - config.local_rank = int(os.environ['LOCAL_RANK']) - - device, num_gpus = utils.init_dist_training_env(config) - config.device = device - config.n_gpu = num_gpus - - utils.check_config(config) - - torch.backends.cudnn.benchmark = True - interface = config.training_event(config) - events = [ - TrainingLogger(config, log_freq=config.log_freq) - ] - training_event = TrainingEventCompose(interface, events) - training_event.launch() - - global logger - logger = events[0].logger - - utils.barrier() - training_event.on_init_start() - init_start_time = logger.previous_log_time - utils.setup_seeds(config) - - evaluator = Evaluator(config) - grad_scaler = GradScaler(init_scale=float(os.getenv("INIT_LOSS_SCALE", 2 ** 20)), growth_interval=2000) - training_state = TrainingState() - trainer = Trainer(config, training_event, evaluator, training_state, grad_scaler, device=device) - training_state._trainer = trainer - - train_dataloader, epoch_size, train_sampler = create_train_dataloader(config) - utils.barrier() - trainer.init() - - utils.barrier() - init_evaluation_start = time.time() - eval_ap = evaluator.evaluate(trainer) - training_state.eval_ap = eval_ap - init_evaluation_end = time.time() - init_evaluation_info = dict( - eval_ap=eval_ap, - time=init_evaluation_end - init_evaluation_start - ) - training_event.on_init_evaluate(init_evaluation_info) - - if not config.do_train: - return config, training_state, init_evaluation_info["time"] - - training_event.on_init_end() - init_end_time = logger.previous_log_time - training_state.init_time = (init_end_time - init_start_time) / 1e+3 - - utils.barrier() - - training_event.on_train_begin() - raw_train_start_time = logger.previous_log_time - while training_state.epoch < config.epochs and not training_state.end_training: - training_state.epoch += 1 - trainer.train_one_epoch(train_dataloader) - if config.distributed and not config.dali: - train_sampler.set_epoch(training_state.epoch) - training_event.on_train_end() - raw_train_end_time = logger.previous_log_time - training_state.raw_train_time = (raw_train_end_time - raw_train_start_time) / 1e+3 - return config, training_state, epoch_size - - -if __name__ == "__main__": - now = time.time() - config, training_state, epoch_size = main() - - if not utils.is_main_process(): - print("process {} exit!".format(config.local_rank)) - exit() - - gpu_count = config.n_gpu - e2e_time = time.time() - now - training_perf = (epoch_size * training_state.epoch) / training_state.raw_train_time - if config.do_train: - finished_info = { - "e2e_time": e2e_time, - "training_samples_per_second": training_perf, - "converged": training_state.converged, - "final_ap": training_state.eval_ap, - "raw_train_time": training_state.raw_train_time, - "init_time": training_state.init_time, - } - else: - finished_info = {"e2e_time": e2e_time} - logger.log("FINISHED", finished_info, stacklevel=0) diff --git a/cv/detection/ssd/pytorch/base/run_training.sh b/cv/detection/ssd/pytorch/base/run_training.sh deleted file mode 100644 index 94ab4b9c4..000000000 --- a/cv/detection/ssd/pytorch/base/run_training.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -get_lscpu_value() { - awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}" -} -lscpu_out=$(lscpu) - -n_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}") -n_cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}") - -echo "Number of CPU sockets on a node: ${n_sockets}" -echo "Number of CPU cores per socket: ${n_cores_per_socket}" - -EXIT_STATUS=0 -check_status() -{ - if ((${PIPESTATUS[0]} != 0)); then - EXIT_STATUS=1 - fi -} - -export PYTHONPATH=../:$PYTHONPATH - -python3 -u -m bind_launch \ - --nsockets_per_node ${n_sockets} \ - --ncores_per_socket ${n_cores_per_socket} \ - --no_hyperthreads \ - --no_membind ./run_train.py --do_train "$@" ; check_status - -exit ${EXIT_STATUS} \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/run_with_docker.sh b/cv/detection/ssd/pytorch/base/run_with_docker.sh deleted file mode 100644 index 5ac6404b7..000000000 --- a/cv/detection/ssd/pytorch/base/run_with_docker.sh +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# ================================================= -# Constants -# ================================================= - -MODEL="ssd" -export MODEL -DOCKER_IMAGE="model_zoo:${MODEL}" -NEXP=1 - -# TODO: Add to Dockerfile -WORK_DIR="/workspace/model_zoo/ssd/pytorch" -MODEL_DIR="${WORK_DIR}/base" - -CURRENT_DIR=$(cd `dirname $0`; pwd) -PROJ_DIR="${CURRENT_DIR}/../" -BUILD_EXTENSION_DIR="${CURRENT_DIR}/build" -BUILD_EXTENSION_PACKAGE_NAME="ext_ops" - -BASE_DOCKERFILE_PATH="${CURRENT_DIR}/BaseDockerfile" -HOST_DOCKERFILE_PATH="${CURRENT_DIR}/Dockerfile" - -SOURCE_DATA_DIR="" -MAP_DATA_DIR="/mnt/dataset/model_zoo/${MODEL}" -MAP_BACKBONE_DIR="/mnt/dataset/model_zoo/${MODEL}/resnet34-333f7ec4.pth" -SUBMITTER="iluvatar" -CONFIG="" - -: "${CLEAR_CACHES:=1}" -SHM_SIZE="32g" - - -# ================================================= -# Parse arguments -# ================================================= - -i=2 -TRAINING_SCRIPT_ARGS="$@" -for arg in "$@" -do - if [[ $arg =~ "--data_dir" ]]; then - if [[ $arg =~ "=" ]]; then - kv=(${arg//=/ }) - SOURCE_DATA_DIR=${kv[1]} - TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/$arg/"--data_dir ${MAP_DATA_DIR} --backbone_path ${MAP_BACKBONE_DIR}"} - else - SOURCE_DATA_DIR=${!i} - TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/"--data_dir ${!i}"/"--data_dir ${MAP_DATA_DIR} --backbone_path ${MAP_BACKBONE_DIR}"} - fi - - elif [[ $arg =~ "--name" ]]; then - if [[ $arg =~ "=" ]]; then - kv=(${arg//=/ }) - SUBMITTER=${kv[1]} - else - SUBMITTER=${!i} - fi - - elif [[ $arg =~ "--config" ]]; then - if [[ $arg =~ "=" ]]; then - kv=(${arg//=/ }) - CONFIG=${kv[1]} - else - CONFIG=${!i} - fi - fi - - let i++ -done - - -# ================================================= -# Check arguments -# ================================================= - -if [[ "${SOURCE_DATA_DIR}" == "" ]]; then - echo "ERROR: data_dir is not given, please set --data_dir " - exit 1 -fi - -if [[ "${CONFIG}" == "" ]]; then - echo "ERROR: config is not given, please set --config " - exit 1 -fi - -CONTAINER_SUBMITTER_DIR="${WORK_DIR}/${SUBMITTER}" -HOST_SUBMITTER_DIR="${PROJ_DIR}/${SUBMITTER}" - -CONTAINER_ENVIRONMENT_VARIABLES_PATH=${CONTAINER_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh -HOST_ENVIRONMENT_VARIABLES_PATH="${HOST_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh" - -HOST_SUBMITTER_DOCKERFILE="${PROJ_DIR}/${SUBMITTER}/${MODEL}/config/Dockerfile" -CONTAINER_NAME="model_zoo-${MODEL}-${SUBMITTER}-container" - -if [ ! -f "${HOST_ENVIRONMENT_VARIABLES_PATH}" ]; then - touch "${HOST_ENVIRONMENT_VARIABLES_PATH}" -fi - -source ${HOST_ENVIRONMENT_VARIABLES_PATH} - -RESULTS_DIR="${PROJ_DIR}/${SUBMITTER}/${MODEL}/results" -LOG_FILE_BASE="${RESULTS_DIR}/config_${CONFIG}_experiment" - -echo "======================================" -echo "Arguments" -echo "---------" - -echo "CLEAR_CACHES = ${CLEAR_CACHES}" -echo "CLEAR_CONTAINER = ${CLEAR_CNT}" -echo "MODEL = ${MODEL}" -echo "CONTAINER_NAME = ${CONTAINER_NAME}" -echo "DOCKER_IMAGE = ${DOCKER_IMAGE}" -echo "MODEL_DIR = ${MODEL_DIR}" -echo "SUBMITTER = ${SUBMITTER}" -echo "CONTAINER_SUBMITTER_DIR = ${CONTAINER_SUBMITTER_DIR}" -echo "HOST_SUBMITTER_DOCKERFILE = ${HOST_SUBMITTER_DOCKERFILE}" -echo "CONFIG = ${CONFIG}" -echo "CONTAINER_MOUNTS = ${CONTAINER_MOUNTS}" -echo "TRAINING_SCRIPT_ARGS = ${TRAINING_SCRIPT_ARGS[*]}" -echo "CURRENT_DIR = ${CURRENT_DIR}" -echo "CONTAINER_ENVIRONMENT_VARIABLES_PATH = ${CONTAINER_ENVIRONMENT_VARIABLES_PATH}" -echo "RESULTS_DIR = ${RESULTS_DIR}" -echo "LOG_FILE_BASE = ${LOG_FILE_BASE}" -echo "SHM_SIZE = ${SHM_SIZE}" -echo "======================================" - - -# ================================================= -# Training -# ================================================= - -# Cleanup container -cleanup_docker() { - docker container rm -f "${CONTAINER_NAME}" || true -} -if [ "${CLEAR_CNT}" -eq 1 ]; then - cleanup_docker - trap 'set -eux; cleanup_docker' EXIT -fi - - -container_status=`docker ps | grep ${CONTAINER_NAME}` -if [[ ! ${container_status} =~ ${CONTAINER_NAME} ]]; then - # Clean built extension - if [ -d "${BUILD_EXTENSION_DIR}" ]; then - echo "WARN: Delete built extension" - rm -rf "${BUILD_EXTENSION_DIR}" - rm -rf ${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so - echo "extension file: "${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so"" - fi - - - # Build image - if [ -f "${HOST_DOCKERFILE_PATH}" ]; then - echo "WARN: Remove previous Dockerfile" - rm -f "${HOST_DOCKERFILE_PATH}" - fi - - echo "WARN: cp BaseDockerfile to Dockerfile" - cp "${BASE_DOCKERFILE_PATH}" "${HOST_DOCKERFILE_PATH}" - - if [ ${SUBMITTER} = "nvidia" ]; then - echo "Nvidia Dockerfile build from Nvidia NGC Images." - cat "${HOST_SUBMITTER_DOCKERFILE}" > "${HOST_DOCKERFILE_PATH}" - elif [ -f "${HOST_SUBMITTER_DOCKERFILE}" ]; then - echo "WARN: Found submitter's Dockerfile, merging submitter's Dockerfile to Dockerfile" - cat "${HOST_SUBMITTER_DOCKERFILE}" >> "${HOST_DOCKERFILE_PATH}" - fi - - docker build -t ${DOCKER_IMAGE} ./ - - # Setup container by Dockerfile - docker run --rm --init --detach \ - --net=host --uts=host --ipc=host --security-opt=seccomp=unconfined \ - --privileged=true \ - --ulimit=stack=67108864 --ulimit=memlock=-1 \ - -w ${MODEL_DIR} \ - --shm-size="${SHM_SIZE}" \ - --volume ${SOURCE_DATA_DIR}:${MAP_DATA_DIR} \ - --volume ${PROJ_DIR}:${WORK_DIR} \ - --name="${CONTAINER_NAME}" ${CONTAINER_MOUNTS} \ - "${DOCKER_IMAGE}" sleep infinity - - # make sure container has time to finish initialization - # TODO: Uncomment - sleep 5 - docker exec -it "${CONTAINER_NAME}" true - - mkdir -p ${RESULTS_DIR} - docker exec -it "${CONTAINER_NAME}" sh -c "chmod 777 run_training.sh" - - # TODO: Remove pip source - docker exec -it "${CONTAINER_NAME}" /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" - - docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};python3 prepare.py --name ${SUBMITTER} --data_dir ${MAP_DATA_DIR}" -fi - -# Run experiments -for _experiment_index in $(seq 1 "${NEXP}"); do - ( - echo "Beginning trial ${_experiment_index} of ${NEXP}" - echo "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};bash ./run_training.sh ${TRAINING_SCRIPT_ARGS[*]}" - - if [ "${CLEAR_CACHES}" -eq 1 ]; then - sync && sudo /sbin/sysctl vm.drop_caches=3 - fi - - # Run experiment - docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};bash ./run_training.sh ${TRAINING_SCRIPT_ARGS[*]}" - ) |& tee "${LOG_FILE_BASE}_${_experiment_index}.log" -done \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/setup.py b/cv/detection/ssd/pytorch/base/setup.py deleted file mode 100644 index f006678c0..000000000 --- a/cv/detection/ssd/pytorch/base/setup.py +++ /dev/null @@ -1,102 +0,0 @@ -import glob -import os -import os.path as ospath - -from setuptools import setup, Extension -from torch.utils.cpp_extension import BuildExtension, CUDAExtension - -PACKAGE_NAME = "ext_ops" - -SOURCE_FILE_EXT = ["c", "cpp", "cu"] -HEADER_FILE_EXT = ["h", "hpp", "cuh"] - -SUPPORT_EXTENSIONS = SOURCE_FILE_EXT + HEADER_FILE_EXT -SOURCE_DIR_KEY = "extension_source_dir" -NVCC_ARGUMENTS_KEY = "NVCC_ARGUMENTS" - - -def get_value_from_environ(name: str, default=None): - if name in os.environ: - return os.environ[name] - if name.upper() in os.environ: - return os.environ[name.upper()] - - return default - - -def check_source_dir(): - source_dir = get_value_from_environ(SOURCE_DIR_KEY) - if source_dir in [None, ""]: - raise ValueError(f"Invaild `source_dir` argument: {source_dir}.") - - return source_dir - - -def find_source_files() -> dict: - source_dir = check_source_dir() - - if not ospath.exists(source_dir): - return dict() - - # Search source files - sources = dict() - for ext in SOURCE_FILE_EXT: - sources[ext] = glob.glob(ospath.join(source_dir, "**", f"*.{ext}"), recursive=True) - - return sources - - -def find_include_dirs() -> list: - source_dir = check_source_dir() - if not ospath.exists(source_dir): - return [] - return glob.glob(ospath.join(source_dir, "**", "include"), recursive=True) - - -def get_nvcc_arguments() -> list: - arguments = get_value_from_environ(NVCC_ARGUMENTS_KEY) - if arguments is None: - return [] - arguments = arguments.split(" ") - return arguments - - -source_files = find_source_files() -include_dirs = find_include_dirs() -c_sources = source_files.pop("c") -other_sources = [] -for _sources in source_files.values(): - other_sources.extend(_sources) - -nvcc_arguments = get_nvcc_arguments() - -ext_modules = [] - -if len(c_sources) != 0: - ext_modules.append(Extension( - name=PACKAGE_NAME, - sources=c_sources, - include_dirs=include_dirs, - extra_compile_args={ - 'c': ['-O3'] - } - )) - -if len(other_sources) != 0: - ext_modules.append(CUDAExtension( - name=PACKAGE_NAME, - sources=other_sources, - extra_compile_args={ - 'cxx': ['-O3', ], - 'nvcc': ['-O3'] + nvcc_arguments - } - )) - -setup( - name=PACKAGE_NAME, - version="0.1", - ext_modules=ext_modules, - cmdclass={ - 'build_ext': BuildExtension - } -) diff --git a/cv/detection/ssd/pytorch/base/test/dali_dataloader_test.py b/cv/detection/ssd/pytorch/base/test/dali_dataloader_test.py deleted file mode 100644 index 6b1d3b484..000000000 --- a/cv/detection/ssd/pytorch/base/test/dali_dataloader_test.py +++ /dev/null @@ -1,48 +0,0 @@ -import sys -sys.path.append("..") - -import torch -from dataloaders import create_train_dataloader, create_eval_dataloader - -if __name__ == "__main__": - class Config(object): - def __init__(self): - pass - - config = Config() - config.data_dir = "/home/data/perf/ssd" - config.input_size = 300 - config.n_gpu = 1 - config.distributed = False - config.local_rank = 0 - config.local_seed = 32 - config.num_workers = 4 - config.train_batch_size = 32 - config.eval_batch_size = 32 - config.fp16 = True - config.fast_nms = True - config.fast_cj = True - config.use_coco_ext = False - config.dali = True - config.dali_sync = False - config.dali_cache = -1 - config.nhwc = True - config.pad_input = True - config.jit = True - config.use_nvjpeg = False - - train_loader, epoch_size, train_sampler = create_train_dataloader(config) - for batch in train_loader: - print(len(batch)) - break - val_loader, inv_map, cocoGt = create_eval_dataloader(config) - from dataloaders.prefetcher import eval_prefetcher - val_loader = eval_prefetcher(iter(val_loader), - torch.cuda.current_device(), - config.pad_input, - config.nhwc, - config.fp16) - for batch in val_loader: - print(len(batch)) - break - print("finished!") \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/test/dataloader_test.py b/cv/detection/ssd/pytorch/base/test/dataloader_test.py deleted file mode 100644 index 570c7a256..000000000 --- a/cv/detection/ssd/pytorch/base/test/dataloader_test.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys -sys.path.append("..") - -import torch -from dataloaders import create_train_dataloader, create_eval_dataloader - -if __name__ == "__main__": - class Config(object): - def __init__(self): - pass - - config = Config() - config.data_dir = "/home/data/perf/ssd" - config.input_size = 300 - config.n_gpu = 1 - config.distributed = False - config.local_rank = 0 - config.local_seed = 32 - config.num_workers = 4 - config.train_batch_size = 32 - config.eval_batch_size = 32 - config.fp16 = True - config.fast_nms = False - config.fast_cj = False - config.dali = False - config.dali_sync = False - config.dali_cache = 0 - config.nhwc = False - config.pad_input = False - config.jit = False - config.use_nvjpeg = False - - train_loader, epoch_size, train_sampler = create_train_dataloader(config) - for batch in train_loader: - print(len(batch)) - break - val_loader, inv_map, cocoGt = create_eval_dataloader(config) - from dataloaders.prefetcher import eval_prefetcher - val_loader = eval_prefetcher(iter(val_loader), - torch.cuda.current_device(), - config.pad_input, - config.nhwc, - config.fp16) - for batch in val_loader: - print(len(batch)) - break - print("finished!") \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/train/__init__.py b/cv/detection/ssd/pytorch/base/train/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cv/detection/ssd/pytorch/base/train/evaluator.py b/cv/detection/ssd/pytorch/base/train/evaluator.py deleted file mode 100644 index 1132827e7..000000000 --- a/cv/detection/ssd/pytorch/base/train/evaluator.py +++ /dev/null @@ -1,116 +0,0 @@ -from concurrent.futures import ProcessPoolExecutor -import sys - -import numpy as np -from pycocotools.cocoeval import COCOeval -import torch -import torch.distributed as dist - - -import utils -from dataloaders.dataloader import create_eval_dataloader -from dataloaders.prefetcher import eval_prefetcher - -import config -from box_coder import build_ssd300_coder - -class Evaluator: - - def __init__(self, config): - self.config = config - self.eval_count = 0 - - self._dataloader = None - self.fetch_dataloader() - - self.ret = [] - - self.overlap_threshold = 0.50 - self.nms_max_detections = 200 - self.encoder = build_ssd300_coder(config.fast_nms) - - def fetch_dataloader(self): - if self._dataloader is None: - self._dataloader, self.inv_map, self.cocoGt = create_eval_dataloader(config) - return self._dataloader - - def evaluate_coco(self, final_results, cocoGt): - if self.config.use_coco_ext: - cocoDt = cocoGt.loadRes(final_results, use_ext=True) - E = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True) - else: - cocoDt = cocoGt.loadRes(final_results) - E = COCOeval(cocoGt, cocoDt, iouType='bbox') - E.evaluate() - E.accumulate() - E.summarize() - print("Current AP: {:.5f} AP".format(E.stats[0])) - return E.stats[0] - - def evaluate(self, trainer): - self.eval_count += 1 - eval_dataloader = eval_prefetcher(iter(self._dataloader), - torch.cuda.current_device(), - config.pad_input, - config.nhwc, - config.fp16) - trainer.model.eval() - ret = [] - with torch.no_grad(): - for batch in eval_dataloader: - img, img_id, img_size = batch - _, ploc, plabel = trainer.inference(img) - - # torch.save({ - # "bbox": ploc, - # "scores": plabel, - # "criteria": self.overlap_threshold, - # "max_output": self.nms_max_detections, - # }, "decode_inputs_{}.pth".format(config.local_rank)) - # exit() - - for idx in range(ploc.shape[0]): - # ease-of-use for specific predictions - ploc_i = ploc[idx, :, :].unsqueeze(0) - plabel_i = plabel[idx, :, :].unsqueeze(0) - - result = self.encoder.decode_batch(ploc_i, plabel_i, self.overlap_threshold, self.nms_max_detections)[0] - - htot, wtot = img_size[0][idx].item(), img_size[1][idx].item() - loc, label, prob = [r.cpu().numpy() for r in result] - for loc_, label_, prob_ in zip(loc, label, prob): - ret.append([img_id[idx], loc_[0] * wtot, \ - loc_[1] * htot, - (loc_[2] - loc_[0]) * wtot, - (loc_[3] - loc_[1]) * htot, - prob_, - self.inv_map[label_]]) - - trainer.model.train() - ret = np.array(ret).astype(np.float32) - if self.config.distributed: - ret_copy = torch.tensor(ret).cuda() - ret_sizes = [torch.tensor(0).cuda() for _ in range(config.n_gpu)] - torch.distributed.all_gather(ret_sizes, torch.tensor(ret_copy.shape[0]).cuda()) - max_size = 0 - sizes = [] - for s in ret_sizes: - max_size = max(max_size, s.item()) - sizes.append(s.item()) - ret_pad = torch.cat([ret_copy, torch.zeros(max_size - ret_copy.shape[0], 7, dtype=torch.float32).cuda()]) - other_ret = [torch.zeros(max_size, 7, dtype=torch.float32).cuda() for i in range(config.n_gpu)] - torch.distributed.all_gather(other_ret, ret_pad) - cat_tensors = [] - for i in range(config.n_gpu): - cat_tensors.append(other_ret[i][:sizes[i]][:]) - - final_results = torch.cat(cat_tensors).cpu().numpy() - else: - final_results = ret - - if utils.is_main_process(): - eval_ap = self.evaluate_coco(final_results, self.cocoGt) - return eval_ap - else: - return 0 - diff --git a/cv/detection/ssd/pytorch/base/train/event/__init__.py b/cv/detection/ssd/pytorch/base/train/event/__init__.py deleted file mode 100644 index 2cd8c9e60..000000000 --- a/cv/detection/ssd/pytorch/base/train/event/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base import BaseTrainingEventInterface -from .base_adapter import BaseTrainingEventAdapter -from .compose import TrainingEventCompose -from .log import TrainingLogger \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/train/event/base.py b/cv/detection/ssd/pytorch/base/train/event/base.py deleted file mode 100644 index d9d76fafa..000000000 --- a/cv/detection/ssd/pytorch/base/train/event/base.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import Tuple, List - -import torch.nn -from torch import Tensor -from torch.cuda.amp import GradScaler -from torch.optim import Optimizer - -from train.training_state import TrainingState - - -SSD_MODEL = torch.nn.Module -BatchType = Tuple[Tensor, Tensor, Tensor] - -class BaseTrainingEventInterface(object): - - def __init__(self, config): - self.config = config - - def save_checkpoint(self, path: str, training_state: TrainingState): - pass - - def load_checkpoint(self, checkpoint): - pass - - def convert_model(self, model: SSD_MODEL) -> SSD_MODEL: - return model - - def create_optimizer(self, model: SSD_MODEL) -> Optimizer: - raise NotImplementedError() - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer) -> Tuple[SSD_MODEL, Optimizer]: - return model, optimizer - - def model_to_ddp(self, model: SSD_MODEL) -> SSD_MODEL: - return model - - def on_init_start(self): - pass - - def on_init_end(self): - pass - - def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): - pass - - def on_train_begin(self): - pass - - def on_train_end(self): - pass - - def on_epoch_begin(self, epoch: int): - pass - - def on_epoch_end(self, epoch: int): - pass - - def on_step_begin(self, step: int): - pass - - def on_step_end(self, step: int): - pass - - diff --git a/cv/detection/ssd/pytorch/base/train/event/base_adapter.py b/cv/detection/ssd/pytorch/base/train/event/base_adapter.py deleted file mode 100644 index 5c6b9b1c0..000000000 --- a/cv/detection/ssd/pytorch/base/train/event/base_adapter.py +++ /dev/null @@ -1,30 +0,0 @@ -from torch.optim import Optimizer - -from .base import BaseTrainingEventInterface - - -class BaseTrainingEventMix: - - def launch(self): - pass - - def create_optimizer(self, optimizer: Optimizer): - pass - - def on_init_evaluate(self, result: dict): - pass - - def on_evaluate(self, result: dict): - pass - - def on_step_end(self, step: int, result: dict = None): - pass - - -class BaseTrainingEventAdapter(BaseTrainingEventMix, BaseTrainingEventInterface): - pass - - - - - diff --git a/cv/detection/ssd/pytorch/base/train/event/compose.py b/cv/detection/ssd/pytorch/base/train/event/compose.py deleted file mode 100644 index a112f32e1..000000000 --- a/cv/detection/ssd/pytorch/base/train/event/compose.py +++ /dev/null @@ -1,109 +0,0 @@ -from typing import List, Union, Callable, Tuple - -from torch import Tensor -from torch.cuda.amp import GradScaler -from torch.optim import Optimizer - -from .base import BaseTrainingEventInterface as TrainingEventInterface, SSD_MODEL -from .base_adapter import BaseTrainingEventMix, BaseTrainingEventAdapter -from train.training_state import TrainingState - - -class TrainingEventCompose(BaseTrainingEventAdapter): - - def __init__(self, interface: TrainingEventInterface, events: List[BaseTrainingEventAdapter]): - super(TrainingEventCompose, self).__init__(interface.config) - - self.interface = interface - self.events = events - - def launch(self): - self._call_events_func(self.launch, with_interface=False) - - def save_checkpoint(self, path: str, training_state: TrainingState): - self.interface.save_checkpoint(path, training_state) - self._call_events_func(self.save_checkpoint, with_interface=False, path=path, training_state=training_state) - - def load_checkpoint(self, checkpoint): - self.interface.load_checkpoint(checkpoint) - self._call_events_func(self.load_checkpoint, with_interface=False, checkpoint=checkpoint) - - def convert_model(self, model: SSD_MODEL) -> SSD_MODEL: - model = self.interface.convert_model(model) - self._call_events_func(self.convert_model, with_interface=False, model=model) - return model - - def create_optimizer(self, model: SSD_MODEL) -> Optimizer: - optimizer = self.interface.create_optimizer(model) - self._call_events_func(self.create_optimizer, with_interface=False, optimizer=optimizer) - return optimizer - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer) -> Tuple[SSD_MODEL, Optimizer]: - model, optimizer = self.interface.model_to_fp16(model, optimizer) - self._call_events_func(self.model_to_fp16, with_interface=False, model=model, optimizer=optimizer) - return model, optimizer - - def model_to_ddp(self, model: SSD_MODEL) -> SSD_MODEL: - model = self.interface.model_to_ddp(model) - self._call_events_func(self.model_to_ddp, with_interface=False, model=model) - return model - - def on_init_evaluate(self, result: dict): - self._call_events_func(self.on_init_evaluate, with_interface=False, result=result) - - def on_evaluate(self, result: dict): - self._call_events_func(self.on_evaluate, with_interface=False, result=result) - - def on_init_start(self): - self._call_events_func(self.on_init_start, with_interface=True) - - def on_init_end(self): - self._call_events_func(self.on_init_end, with_interface=True) - - def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler = None): - return self.interface.on_backward(step, loss, optimizer, grad_scaler) - - def on_train_begin(self): - self._call_events_func(self.on_train_begin, with_interface=True) - - def on_train_end(self): - self._call_events_func(self.on_train_end, with_interface=True) - - def on_epoch_begin(self, epoch: int): - self._call_events_func(self.on_epoch_begin, with_interface=True, epoch=epoch) - - def on_epoch_end(self, epoch: int): - self._call_events_func(self.on_epoch_end, with_interface=True, epoch=epoch) - - def on_step_begin(self, step: int): - self._call_events_func(self.on_step_begin, with_interface=True, step=step) - - def on_step_end(self, step: int, result: dict = None): - self.interface.on_step_end(step) - self._call_events_func(self.on_step_end, with_interface=False, step=step, result=result) - - def _call_events_func(self, func: Union[str, Callable], with_interface=False, *args, **kwargs): - func_name = self._get_func_name(func) - events = self.events - if with_interface: - events = [self.interface] + events - - result = [] - for event in events: - ret = None - if hasattr(event, func_name): - ret = getattr(event, func_name)(*args, **kwargs) - result.append(ret) - return result - - def _get_func_name(self, func: Union[str, Callable]): - if isinstance(func, str): - return func - - if callable(func): - return func.__name__ - - return None - - - diff --git a/cv/detection/ssd/pytorch/base/train/event/log.py b/cv/detection/ssd/pytorch/base/train/event/log.py deleted file mode 100644 index 083ded561..000000000 --- a/cv/detection/ssd/pytorch/base/train/event/log.py +++ /dev/null @@ -1,145 +0,0 @@ -import copy -import inspect -import os -import os.path as ospath -from typing import Tuple, Union, Iterable - -from torch import Tensor -from torch.cuda.amp import GradScaler -from torch.optim import Optimizer - -from config.config_manager import get_properties_from_config -from utils.logging import PerfLogger, LogEvent, PerfLogLevel -from .base import SSD_MODEL -from .base_adapter import BaseTrainingEventAdapter - - -STACKLEVEL = 4 - - -class TrainingLogger(BaseTrainingEventAdapter): - - def __init__(self, config, logger: PerfLogger=None, log_freq: int = 0): - super(TrainingLogger, self).__init__(config) - self.config = config - self.log_freq = log_freq - level = PerfLogLevel.INFO if log_freq > 0 else PerfLogLevel.SUBMITTION - if logger is None: - logger = PerfLogger.get_default_logger(rank=config.local_rank, level=level) - self.logger = logger - - self.model = None - self.submitter = None - - def launch(self): - self.logger.log(LogEvent.launch_training, "Launch training", stacklevel=STACKLEVEL) - config_path: str = self.config.config - config_dict = get_properties_from_config(self.config) - for key, value in config_dict.items(): - if type(value) not in [int, float, str, bool] and not isinstance(value, Iterable): - config_dict[key] = str(value) - - # Extract definition of training event - try: - training_event_class = self.config.training_event - if not inspect.isclass(training_event_class): - training_event_class = training_event_class.__class__ - training_event_class_define = inspect.getabsfile(training_event_class) - training_event_class_define = training_event_class_define.rsplit(".py", maxsplit=1)[0] - training_event_class_define += ":" + training_event_class.__name__ - except: - training_event_class_define = str(self.config.training_event) - config_dict['training_event'] = training_event_class_define - - # Like /path/to/proj/submitter/model/config/config_xxx.py - if config_path.startswith("."): - config_path = ospath.abspath(config_path) - - config_path_nodes = config_path.rsplit(sep="/", maxsplit=4) - submitter = config_path_nodes[1] - model = config_path_nodes[2] - self.logger.init_logger(submitter=submitter, - model=model, - config_path=config_path, - config=config_dict, - stacklevel=STACKLEVEL) - - self.model = model - self.submitter = submitter - - def convert_model(self, model: SSD_MODEL): - model_class = type(model) - model_info = dict( - type = model_class.__name__, - module = model_class.__module__ if hasattr(model_class, "__module__") else "None" - ) - self._log_event(LogEvent.convert_model, model_info) - - def create_optimizer(self, optimizer: Optimizer): - optimizer_class = type(optimizer) - optimizer_info = dict( - type=optimizer_class.__name__, - module=optimizer_class.__module__ if hasattr(optimizer_class, "__module__") else "None" - ) - self._log_event(LogEvent.create_optimizer, optimizer_info) - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer): - fp16_info = dict( - fp16 = self.config.fp16 if hasattr(self.config, "fp16") else False - ) - self._log_event(LogEvent.model_to_fp16, fp16_info) - - def model_to_ddp(self, model: SSD_MODEL): - model_class = type(model) - model_info = dict( - type=model_class.__name__, - module=model_class.__module__ if hasattr(model_class, "__module__") else None - ) - self._log_event(LogEvent.model_to_ddp, model_info) - - def on_init_evaluate(self, result: dict): - self._log_event(LogEvent.init_evaluation, result) - - def on_evaluate(self, result: dict): - self._log_event(LogEvent.evaluation, result) - - def on_init_start(self): - self._log_event(LogEvent.init_start) - - def on_init_end(self): - self._log_event(LogEvent.init_end, "Finish initialization") - - def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): - pass - - def on_train_begin(self): - self._log_event(LogEvent.train_begin) - - def on_train_end(self): - self._log_event(LogEvent.train_end) - - def on_epoch_begin(self, epoch: int): - epoch_info = dict(epoch=epoch) - self._log_event(LogEvent.epoch_begin, epoch_info) - - def on_epoch_end(self, epoch: int): - epoch_info = dict(epoch=epoch) - self._log_event(LogEvent.epoch_end, epoch_info) - - def on_step_begin(self, step: int): - pass - - def on_step_end(self, step: int, result: dict=None): - if (self.log_freq <= 0 or step % self.log_freq != 0) and step != 1: - return - if result is None: - step_info = dict() - else: - step_info = copy.copy(result) - - step_info['step'] = step - self._log_event(LogEvent.step_end, step_info) - - def _log_event(self, event, *args, **kwargs): - self.logger.log(event, stacklevel=STACKLEVEL, *args, **kwargs) - diff --git a/cv/detection/ssd/pytorch/base/train/trainer.py b/cv/detection/ssd/pytorch/base/train/trainer.py deleted file mode 100644 index e4739b26e..000000000 --- a/cv/detection/ssd/pytorch/base/train/trainer.py +++ /dev/null @@ -1,229 +0,0 @@ -import time -import os -import sys -from bisect import bisect -from typing import Union - -import numpy as np -import torch -from torch.cuda.amp import GradScaler - -import utils -from model import create_model -from train.evaluator import Evaluator -from train.training_state import TrainingState -from train.event import TrainingEventCompose as TrainingEvent -from box_coder import dboxes300_coco - -from model.losses import OptLoss, Loss - - -Device = Union[torch.device, str, None] - - -def lr_warmup(optim, warmup_iter, iter_num, base_lr, args): - if iter_num < warmup_iter: - warmup_step = base_lr / (warmup_iter * (2 ** args.warmup_factor)) - new_lr = base_lr - (warmup_iter - iter_num) * warmup_step - for param_group in optim.param_groups: - param_group['lr'] = new_lr - return new_lr - else: - return base_lr - - -class Trainer(object): - - def __init__(self, config, training_event: TrainingEvent, - evaluator: Evaluator, - training_state: TrainingState, - grad_scaler: GradScaler, - device: Device): - super(Trainer, self).__init__() - self.config = config - self.training_event = training_event - self.training_state = training_state - self.grad_scaler = grad_scaler - - self.device = device - self.optimizer = None - self.model = None - self.evaluator = evaluator - self.success = torch.zeros(1).cuda() - dboxes = dboxes300_coco() - if self.config.dali: - self.loss_fun = OptLoss().cuda() - else: - self.loss_fun = Loss(dboxes).cuda() - - def init(self): - self.model = create_model(self.config) - self.model = self.training_event.convert_model(self.model) - self.optimizer = self.training_event.create_optimizer(self.model) - self.model, self.optimizer = self.training_event.model_to_fp16(self.model, self.optimizer) - self.model = self.training_event.model_to_ddp(self.model) - # self.training_state.base_lr = self.optimizer.param_groups[0]['lr'] - self.training_state.base_lr = self.optimizer.defaults['lr'] - if utils.is_main_process(): - print("==="*20) - print("config lr: {}, optimizer lr: {}".format(self.config.learning_rate, self.training_state.base_lr)) - print("==="*20) - - self._init_model() - self.model.train() - self._verify_model() - - def _init_model(self): - if self.config.checkpoint: - checkpoint = torch.load(self.config.checkpoint, map_location="cpu") - self.training_event.load_checkpoint(checkpoint) - self.training_state.iter_num = self.config.iteration - - def _verify_model(self): - input_c = 4 if self.config.pad_input else 3 - if self.config.nhwc: - example_shape = [self.config.train_batch_size, 300, 300, input_c] - else: - example_shape = [self.config.train_batch_size, input_c, 300, 300] - example_input = torch.randn(*example_shape).cuda() - if self.config.fp16: - example_input = example_input.half() - if self.config.jit: - # DDP has some Python-side control flow. If we JIT the entire DDP-wrapped module, - # the resulting ScriptModule will elide this control flow, resulting in allreduce - # hooks not being called. If we're running distributed, we need to extract and JIT - # the wrapped .module. - # Replacing a DDP-ed ssd300 with a script_module might also cause the AccumulateGrad hooks - # to go out of scope, and therefore silently disappear. - module_to_jit = self.model.module if self.config.distributed else self.model - if self.config.distributed: - self.model.module = torch.jit.trace(module_to_jit, example_input, check_trace=False) - else: - self.model = torch.jit.trace(module_to_jit, example_input, check_trace=False) - ploc, plabel = self.model(example_input) - loss = ploc[0, 0, 0] + plabel[0, 0, 0] - dloss = torch.randn_like(loss) - # Cause cudnnFind for dgrad, wgrad to run - loss.backward(dloss) - for p in self.model.parameters(): - p.grad = None - - def train_one_epoch(self, train_dataloader): - if self.training_state.epoch in self.config.lr_decay_epochs: - self.training_state.base_lr *= self.config.lr_decay_factor - print(self.config.local_rank, "base_lr decay step #" + str(bisect(self.config.lr_decay_epochs, self.training_state.epoch))) - for param_group in self.optimizer.param_groups: - param_group['lr'] = self.training_state.base_lr - if self.training_state.epoch <= self.config.epoch: - print("Start continue training from epoch: {}, iter: {}, skip epoch {}".format(self.config.epoch, self.config.iteration, self.training_state.epoch)) - return - - self.training_event.on_epoch_begin(self.training_state.epoch) - step_start_time = time.time() - for batch in train_dataloader: - # print([len(x) for x in batch]) - img, bbox, label = batch - if not self.config.dali: - img = img.cuda() - bbox = bbox.cuda() - label = label.cuda() - - self.training_state.lr = lr_warmup(self.optimizer, self.config.warmup, self.training_state.iter_num, self.training_state.base_lr, self.config) - if (img is None) or (bbox is None) or (label is None): - print("No labels in batch") - continue - self.training_event.on_step_begin(self.training_state.iter_num) - self.train_one_step(img, bbox, label) - - other_state = dict() - if self.training_state.iter_num % self.config.gradient_accumulation_steps == 0: - step_end_time = time.time() - step_total_time = step_end_time - step_start_time - fps = (utils.global_batch_size(self.config) * self.config.gradient_accumulation_steps) / step_total_time - other_state["avg_samples/s"] = fps - step_start_time = step_end_time - - step_info = self.training_state.to_dict(**other_state) - self.training_event.on_step_end(self.training_state.iter_num, result=step_info) - self.training_state.iter_num += 1 - - if self.config.dali: - train_dataloader.reset() - if self.training_state.epoch in self.config.evaluation: - if self.config.distributed: - world_size = float(utils.get_world_size()) - for bn_name, bn_buf in self.model.module.named_buffers(recurse=True): - if ('running_mean' in bn_name) or ('running_var' in bn_name): - torch.distributed.all_reduce(bn_buf, op=torch.distributed.ReduceOp.SUM) - bn_buf /= world_size - - eval_start = time.time() - self.training_state.eval_ap = self.evaluator.evaluate(self) - eval_end = time.time() - eval_result = dict(epoch=self.training_state.epoch, - eval_ap=self.training_state.eval_ap, - time=eval_end - eval_start) - self.training_event.on_evaluate(eval_result) - if utils.is_main_process(): - if self.config.save_checkpoint: - print("saving model...") - if not os.path.isdir(self.config.output): - os.mkdir(self.config.output) - self.training_event.save_checkpoint(self.config.output, self.training_state) - self.detect_training_status(self.training_state) - if self.training_state.converged: - self.success = torch.ones(1).cuda() - if self.config.distributed: - torch.distributed.broadcast(self.success, 0) - if self.success[0]: - print("Process {} train success!".format(self.config.local_rank)) - self.training_state.end_training = True - self.training_event.on_epoch_end(self.training_state.epoch) - - def train_one_step(self, img, bbox, label): - self.training_state.loss, _, _ = self.forward(img, bbox, label) - if self.training_state.epoch == self.config.epoch + 1 and self.training_state.iter_num == self.config.iteration: - self.training_state.avg_loss = self.training_state.loss.item() - else: - if np.isfinite(self.training_state.loss.item()): - self.training_state.avg_loss = 0.999 * self.training_state.avg_loss + 0.001 * self.training_state.loss.item() - else: - print("model exploded (corrupted by Inf or Nan)") - sys.exit() - self.training_event.on_backward(self.training_state.iter_num, self.training_state.loss, self.optimizer, self.grad_scaler) - - def forward(self, img, bbox, label, training=True): - # origin input shape is: (bs, 3, 300, 300) - # using dali and nhwc input shape is: (bs, 300, 300, 4) - ploc, plabel = self.model(img) - ploc, plabel = ploc.float(), plabel.float() - if training: - N = img.shape[0] - bbox.requires_grad = False - label.requires_grad = False - # reshape (N*8732X4 -> Nx8732x4) and transpose (Nx8732x4 -> Nx4x8732) - bbox = bbox.view(N, -1, 4).transpose(1, 2).contiguous() - # reshape (N*8732 -> Nx8732) and cast to Long - label = label.view(N, -1).long() - - # torch.save({ - # "ploc": ploc, - # "plabel": plabel, - # "gloc": bbox, - # "glabel": label, - # }, "loss.pth_{}".format(self.config.local_rank)) - # exit() - loss = self.loss_fun(ploc, plabel, bbox, label) - return loss, None, None - else: - return None, ploc, plabel - - def inference(self, img): - return self.forward(img, None, None, False) - - def detect_training_status(self, training_state: TrainingState): - if training_state.eval_ap >= self.config.threshold: - training_state.converged_success() - if training_state.converged or training_state.epoch >= self.config.epochs: - training_state.end_training = True - return training_state.end_training \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/train/training_state.py b/cv/detection/ssd/pytorch/base/train/training_state.py deleted file mode 100644 index a33cb19d0..000000000 --- a/cv/detection/ssd/pytorch/base/train/training_state.py +++ /dev/null @@ -1,53 +0,0 @@ - -import torch -import utils - - -class TrainingState: - _trainer = None - _status = 'aborted' # later set to 'success' if termination criteria met - - iter_num = 0 - - loss: float = 0.0 - avg_loss: float = 0.0 - base_lr: float = 0.0 - lr: float = 0.0 - - epoch: int = 0 - end_training: bool = False - converged: bool = False - - eval_ap = 0 - - init_time = 0 - raw_train_time = 0 - - def status(self): - if self.converged: - self._status = "success" - return self._status - - def converged_success(self): - self.end_training = True - self.converged = True - - def to_dict(self, **kwargs): - state_dict = dict() - - for var_name, value in self.__dict__.items(): - if not var_name.startswith("_") and utils.is_property(value): - state_dict[var_name] = value - - exclude = ["eval_ap", "converged", "init_time", "raw_train_time"] - for exkey in exclude: - if exkey in state_dict: - state_dict.pop(exkey) - - state_dict.update(kwargs) - - for k in state_dict.keys(): - if torch.is_tensor(state_dict[k]): - state_dict[k] = state_dict[k].item() - - return state_dict diff --git a/cv/detection/ssd/pytorch/base/utils/__init__.py b/cv/detection/ssd/pytorch/base/utils/__init__.py deleted file mode 100644 index 9c0ed2c68..000000000 --- a/cv/detection/ssd/pytorch/base/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -import inspect - -from .check import check_config -from .dist import * - -def is_property(value): - status = [ - not callable(value), - not inspect.isclass(value), - not inspect.ismodule(value), - not inspect.ismethod(value), - not inspect.isfunction(value), - not inspect.isbuiltin(value), - "classmethod object" not in str(value) - ] - - return all(status) \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/base/utils/check.py b/cv/detection/ssd/pytorch/base/utils/check.py deleted file mode 100644 index 9b5f9ea19..000000000 --- a/cv/detection/ssd/pytorch/base/utils/check.py +++ /dev/null @@ -1,72 +0,0 @@ -import os - -import torch - - -def get_config_arg(config, name): - if hasattr(config, name): - value = getattr(config, name) - if value is not None: - return value - - if name in os.environ: - return os.environ[name] - - return None - - -def check_config(config): - print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( - config.device, config.n_gpu, config.local_rank != -1, config.fp16)) - - if config.gradient_accumulation_steps < 1: - raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( - config.gradient_accumulation_steps)) - - # if config.fp16: - # assert config.opt_level == 2 - - # nhwc can only be used with fp16 - if config.nhwc: - assert config.fp16 - - # input padding can only be used with NHWC - if config.pad_input: - assert config.nhwc - - # no dali can only be used with NCHW and no padding - if not config.dali: - assert (not config.nhwc) - assert (not config.pad_input) - assert (not config.use_nvjpeg) - assert (not config.dali_cache) - - if config.dali_cache > 0: - assert config.use_nvjpeg - - if config.jit: - assert config.nhwc # jit can not be applied with apex::syncbn used for non-nhwc - - -# Check that the run is valid for specified group BN arg -def validate_group_bn(bn_groups): - if torch.distributed.is_initialized(): - world_size = torch.distributed.get_world_size() - else: - world_size = 1 - - # Can't have larger group than ranks - assert(bn_groups <= world_size) - - # must have only complete groups - assert(world_size % bn_groups == 0) - - - - - - - - - - diff --git a/cv/detection/ssd/pytorch/base/utils/dist.py b/cv/detection/ssd/pytorch/base/utils/dist.py deleted file mode 100644 index e3549c125..000000000 --- a/cv/detection/ssd/pytorch/base/utils/dist.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -import numpy as np -import torch -import torch.distributed as dist - -from contextlib import contextmanager -import random - -from .check import validate_group_bn - - -def generate_seeds(rng, size): - """ - Generate list of random seeds - - :param rng: random number generator - :param size: length of the returned list - """ - seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)] - return seeds - - -def broadcast_seeds(seeds, device): - """ - Broadcasts random seeds to all distributed workers. - Returns list of random seeds (broadcasted from workers with rank 0). - - :param seeds: list of seeds (integers) - :param device: torch.device - """ - if torch.distributed.is_available() and torch.distributed.is_initialized(): - seeds_tensor = torch.LongTensor(seeds).to(device) - torch.distributed.broadcast(seeds_tensor, 0) - seeds = seeds_tensor.tolist() - return seeds - - -def setup_seeds(config): - torch.cuda.set_device(config.local_rank) - config.local_seed = (config.seed + get_rank()) % 2**32 - print(get_rank(), "Using seed = {}".format(config.local_seed)) - random.seed(config.local_seed) - torch.manual_seed(config.local_seed) - np.random.seed(seed=config.local_seed) - return - - -def barrier(): - """ - Works as a temporary distributed barrier, currently pytorch - doesn't implement barrier for NCCL backend. - Calls all_reduce on dummy tensor and synchronizes with GPU. - """ - if torch.distributed.is_available() and torch.distributed.is_initialized(): - torch.distributed.all_reduce(torch.cuda.FloatTensor(1)) - torch.cuda.synchronize() - - -def get_rank(default=0): - """ - Gets distributed rank or returns zero if distributed is not initialized. - """ - if torch.distributed.is_available() and torch.distributed.is_initialized(): - rank = torch.distributed.get_rank() - else: - rank = default - return rank - - -def get_world_size(): - """ - Gets total number of distributed workers or returns one if distributed is - not initialized. - """ - if torch.distributed.is_available() and torch.distributed.is_initialized(): - world_size = torch.distributed.get_world_size() - else: - world_size = 1 - return world_size - - -def main_proc_print(*args, **kwargs): - if is_main_process(): - print(*args, **kwargs) - - -def set_device(cuda, local_rank): - """ - Sets device based on local_rank and returns instance of torch.device. - - :param cuda: if True: use cuda - :param local_rank: local rank of the worker - """ - if cuda: - torch.cuda.set_device(local_rank) - device = torch.device('cuda') - else: - device = torch.device('cpu') - return device - - -def init_dist_training_env(config): - if config.distributed is False: - device = torch.device("cuda") - num_gpus = 1 - else: - torch.cuda.set_device(config.local_rank) - device = torch.device("cuda", config.local_rank) - host_addr_full = 'tcp://' + os.environ["MASTER_ADDR"] + ':' + os.environ["MASTER_PORT"] - rank = int(os.environ["RANK"]) - world_size = int(os.environ["WORLD_SIZE"]) - torch.distributed.init_process_group(backend=config.dist_backend, init_method=host_addr_full, rank=rank, world_size=world_size) - num_gpus = torch.distributed.get_world_size() - validate_group_bn(config.bn_group) - return device, num_gpus - - -def global_batch_size(config): - return config.train_batch_size * config.n_gpu - - -@contextmanager -def sync_workers(): - """ - Yields distributed rank and synchronizes all workers on exit. - """ - rank = get_rank() - yield rank - barrier() - - -def is_main_process(): - if dist.is_initialized(): - if "LOCAL_RANK" in os.environ: - return int(os.environ["LOCAL_RANK"]) == 0 - else: - return get_rank() == 0 - - return True - - -def format_step(step): - if isinstance(step, str): - return step - s = "" - if len(step) > 0: - s += "Training Epoch: {} ".format(step[0]) - if len(step) > 1: - s += "Training Iteration: {} ".format(step[1]) - if len(step) > 2: - s += "Validation Iteration: {} ".format(step[2]) - return s diff --git a/cv/detection/ssd/pytorch/base/utils/logging.py b/cv/detection/ssd/pytorch/base/utils/logging.py deleted file mode 100644 index 2f576619b..000000000 --- a/cv/detection/ssd/pytorch/base/utils/logging.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import sys -import time -import logging -import json -from logging import currentframe -from typing import NamedTuple, Union, Tuple, Optional -from collections import OrderedDict - -from enum import IntEnum - - -_srcfile = os.path.normcase(logging.addLevelName.__code__.co_filename) - - -class LogKeys: - default_logger_name = "PerfLogger" - - # Log format - log_header = "PerfLog" - log_template = "[{header}] {message}" - - # Submitted info - submmiter: str = "submmiter" - model: str = "model" - optimizer_type: str = "optimizer_type" - config: str = "config" - config_path: str = "config_path" - - # Event - event: str = "event" - value: str = "value" - - # Metadata - metadata: str = "metadata" - called_log_file = "file" - called_log_file_lineno = "lineno" - time_ms = "time_ms" - rank = "rank" - - # Other message - other_message: str = "other" - - -class PerfLogLevel(IntEnum): - - INFO = 100 - SUBMITTION = 101 - - @staticmethod - def from_string(level: str): - return PerfLogLevel.__dict__[level.upper()] - - @classmethod - def register_to_logging(cls, logging): - for level_name, level in PerfLogLevel.__dict__.items(): - if isinstance(level, cls): - logging.addLevelName(level.value, level_name) - - -PerfLogLevel.register_to_logging(logging) - - -class LogEventField(NamedTuple): - - name: str - rank: Union[int, list] = -1 - level: PerfLogLevel = PerfLogLevel.SUBMITTION - - -class LogEvent: - - submitted_info = LogEventField("SUBMITTED_INFO", rank=0) - launch_training = LogEventField("LAUNCH_TRAINING") - convert_model = LogEventField("CONVERT_MODEL", rank=0) - create_optimizer = LogEventField("CREATE_OPTIMIZER", rank=0) - model_to_fp16 = LogEventField("MODEL_TO_FP16", rank=0) - model_to_ddp = LogEventField("MODEL_TO_DDP", rank=0) - init_start = LogEventField("INIT_START", rank=0) - init_end = LogEventField("INIT_END", rank=0) - train_begin = LogEventField("TRAIN_BEGIN", rank=0) - train_end = LogEventField("TRAIN_END", rank=0) - epoch_begin = LogEventField("EPOCH_BEGIN", rank=0, level=PerfLogLevel.INFO) - epoch_end = LogEventField("EPOCH_END", rank=0, level=PerfLogLevel.INFO) - step_begin = LogEventField("STEP_BEGIN", rank=0, level=PerfLogLevel.INFO) - step_end = LogEventField("STEP_END", rank=0, level=PerfLogLevel.INFO) - init_evaluation = LogEventField("INIT_EVALUATION", rank=0) - evaluation = LogEventField("EVALUATION", rank=0) - finished = LogEventField("FINISHED", rank=0) - - @staticmethod - def from_string(key: str): - return LogEvent.__dict__[key.lower()] - - -class PerfLogger: - - _singleton = None - - def __init__(self, rank: int, - level: Union[str, PerfLogLevel]=PerfLogLevel.SUBMITTION, - logger: logging.Logger=None): - self.rank = rank - - if isinstance(level, str): - level = PerfLogLevel.from_string(level) - self.level = level - - if logger is None: - logger = logging.Logger(LogKeys.default_logger_name) - - self.logger = logger - - self.previous_log_time = None - - @property - def _current_time_ms(self): - current = int(time.time() * 1e3) - self.previous_log_time = current - return current - - def init_logger(self, submitter: str, model: str, config_path: str, config: dict, *args, **kwargs): - message = { - LogKeys.submmiter: submitter, - LogKeys.model: model, - LogKeys.config_path: config_path, - LogKeys.config: config - } - - self.log(LogEvent.submitted_info, message, *args, **kwargs) - - - def log(self, event: Union[str, LogEventField], message: Optional[Union[str, dict]]=None, *args, **kwargs): - if isinstance(event, str): - event = LogEvent.from_string(event) - - show_log = any([ - event.rank == 0 and self.rank == 0, - event.rank == -1, - ]) and any([ - event.level == PerfLogLevel.SUBMITTION, - event.level == self.level - ]) - - if not show_log: - return - - stacklevel = 1 - if "stacklevel" in kwargs: - stacklevel = kwargs.pop("stacklevel") - - call_info = self.get_caller(stacklevel=stacklevel) - - message = self._encode_message(event, message, call_info) - self.logger.log(self.level.value, message, *args, **kwargs) - - def _encode_message(self, event: LogEventField, - message: Union[str, dict], - call_info: Tuple[str, int]) -> str: - if isinstance(message, str): - message ={LogKeys.other_message: message} - message = OrderedDict({ - LogKeys.event: event.name, - LogKeys.value: message - }) - called_file, lineno = call_info - metadata = { - LogKeys.called_log_file: called_file, - LogKeys.called_log_file_lineno: lineno, - LogKeys.time_ms: self._current_time_ms, - LogKeys.rank: self.rank - } - - message[LogKeys.metadata] = metadata - message = json.dumps(message) - - return self._log_template(message) - - def _log_template(self, message: str): - return LogKeys.log_template.format(header=LogKeys.log_header, message=message) - - def get_caller(self, stacklevel=1) -> Tuple[str, int]: - f = currentframe() - - if stacklevel == 0: - default_file_name = f.f_code.co_filename - default_lineno = f.f_lineno - return (default_file_name, default_lineno) - - # On some versions of IronPython, currentframe() returns None if - # IronPython isn't run with -X:Frames. - if f is not None: - f = f.f_back - orig_f = f - while f and stacklevel > 1: - f = f.f_back - stacklevel -= 1 - if not f: - f = orig_f - rv = ("(unknown file)", -1) - - while hasattr(f, "f_code"): - co = f.f_code - filename = os.path.normcase(co.co_filename) - if filename == _srcfile: - f = f.f_back - continue - rv = (co.co_filename, f.f_lineno) - break - return rv - - - @classmethod - def get_default_logger(cls, rank: int=-1, - level: Union[str, PerfLogLevel]=PerfLogLevel.SUBMITTION, - logger: logging.Logger=None): - if cls._singleton is None: - cls._singleton = cls(rank=rank, level=level, logger=logger) - - return cls._singleton - - - - - - - - - - - - - - - diff --git a/cv/detection/ssd/pytorch/base/utils/paths.py b/cv/detection/ssd/pytorch/base/utils/paths.py deleted file mode 100644 index 37691d743..000000000 --- a/cv/detection/ssd/pytorch/base/utils/paths.py +++ /dev/null @@ -1,18 +0,0 @@ -import os.path as ospath - - -MODEL_DIR = ospath.abspath( - ospath.join( - __file__, - "../../../../" - ) -) - -CURRENT_MODEL_NAME = ospath.basename(MODEL_DIR) - -PROJ_DIR = ospath.abspath( - ospath.join( - MODEL_DIR, - "pytorch" - ) -) diff --git a/cv/detection/ssd/pytorch/base/model/losses/loss.py b/cv/detection/ssd/pytorch/base_model.py similarity index 100% rename from cv/detection/ssd/pytorch/base/model/losses/loss.py rename to cv/detection/ssd/pytorch/base_model.py index ae2db5d13..c177b5c0c 100644 --- a/cv/detection/ssd/pytorch/base/model/losses/loss.py +++ b/cv/detection/ssd/pytorch/base_model.py @@ -1,5 +1,5 @@ -import torch.nn as nn import torch +import torch.nn as nn class Loss(nn.Module): diff --git a/cv/detection/ssd/pytorch/bind.sh b/cv/detection/ssd/pytorch/bind.sh new file mode 100644 index 000000000..dd100a3de --- /dev/null +++ b/cv/detection/ssd/pytorch/bind.sh @@ -0,0 +1,212 @@ +#! /bin/bash +set -euo pipefail + +print_usage() { + cat << EOF +${0} [options] [--] COMMAND [ARG...] + +Control binding policy for each task. Assumes one rank will be launched for each GPU. + +Options: + --cpu=MODE + * exclusive -- bind each rank to an exclusive set of cores near its GPU + * exclusive,nosmt -- bind each rank to an exclusive set of cores near its GPU, without hyperthreading + * node -- bind each rank to all cores in the NUMA node nearest its GPU [default] + * *.sh -- bind each rank using the bash associative array bind_cpu_cores or bind_cpu_nodes from a file + * off -- don't bind + --mem=MODE + * node -- bind each rank to the nearest NUMA node [default] + * *.sh -- bind each rank using the bash associative array bind_mem from a file + * off -- don't bind + --ib=MODE + * single -- bind each rank to a single IB device near its GPU + * off -- don't bind [default] + --cluster=CLUSTER + Select which cluster is being used. May be required if system params cannot be detected. +EOF +} + +################################################################################ +# Argument parsing +################################################################################ + +cpu_mode='node' +mem_mode='node' +ib_mode='off' +cluster='' +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) print_usage ; exit 0 ;; + --cpu=*) cpu_mode="${1/*=/}"; shift ;; + --cpu) cpu_mode="$2"; shift 2 ;; + --mem=*) mem_mode="${1/*=/}"; shift ;; + --mem) mem_mode="$2"; shift 2 ;; + --ib=*) ib_mode="${1/*=/}"; shift ;; + --ib) ib_mode="$2"; shift 2 ;; + --cluster=*) cluster="${1/*=/}"; shift ;; + --cluster) cluster="$2"; shift 2 ;; + --) shift; break ;; + *) break ;; + esac +done +if [ $# -lt 1 ]; then + echo 'ERROR: no command given' 2>&1 + print_usage + exit 1 +fi + +################################################################################ +# Get system params +################################################################################ + +# LOCAL_RANK is set with an enroot hook for Pytorch containers +# SLURM_LOCALID is set by Slurm +# OMPI_COMM_WORLD_LOCAL_RANK is set by mpirun +readonly local_rank="${LOCAL_RANK:=${SLURM_LOCALID:=${OMPI_COMM_WORLD_LOCAL_RANK:-}}}" +if [ -z "${local_rank}" ]; then + echo 'ERROR: cannot read LOCAL_RANK from env' >&2 + exit 1 +fi + +num_gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader,nounits) +if [ "${local_rank}" -ge "${num_gpus}" ]; then + echo "ERROR: local rank is ${local_rank}, but there are only ${num_gpus} gpus available" >&2 + exit 1 +fi + +get_lscpu_value() { + awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}" +} +lscpu_out=$(lscpu) +num_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}") +num_nodes=$(get_lscpu_value 'NUMA node(s)' <<< "${lscpu_out}") +cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}") + +echo "num_sockets = ${num_sockets} num_nodes=${num_nodes} cores_per_socket=${cores_per_socket}" + +readonly cores_per_node=$(( (num_sockets * cores_per_socket) / num_nodes )) +if [ ${num_gpus} -gt 1 ]; then + readonly gpus_per_node=$(( num_gpus / num_nodes )) +else + readonly gpus_per_node=1 +fi +readonly cores_per_gpu=$(( cores_per_node / gpus_per_node )) +readonly local_node=$(( local_rank / gpus_per_node )) + + +declare -a ibdevs=() +case "${cluster}" in + circe) + # Need to specialize for circe because IB detection is hard + ibdevs=(mlx5_1 mlx5_2 mlx5_3 mlx5_4 mlx5_7 mlx5_8 mlx5_9 mlx5_10) + ;; + selene) + # Need to specialize for selene because IB detection is hard + ibdevs=(mlx5_0 mlx5_1 mlx5_2 mlx5_3 mlx5_6 mlx5_7 mlx5_8 mlx5_9) + ;; + '') + if ibstat_out="$(ibstat -l 2>/dev/null | sort -V)" ; then + mapfile -t ibdevs <<< "${ibstat_out}" + fi + ;; + *) + echo "ERROR: Unknown cluster '${cluster}'" >&2 + exit 1 + ;; +esac +readonly num_ibdevs="${#ibdevs[@]}" + +################################################################################ +# Setup for exec +################################################################################ + +declare -a numactl_args=() + +case "${cpu_mode}" in + exclusive) + numactl_args+=( "$(printf -- "--physcpubind=%u-%u,%u-%u" \ + $(( local_rank * cores_per_gpu )) \ + $(( (local_rank + 1) * cores_per_gpu - 1 )) \ + $(( local_rank * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) )) \ + $(( (local_rank + 1) * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) - 1 )) \ + )" ) + ;; + exclusive,nosmt) + numactl_args+=( "$(printf -- "--physcpubind=%u-%u" \ + $(( local_rank * cores_per_gpu )) \ + $(( (local_rank + 1) * cores_per_gpu - 1 )) \ + )" ) + ;; + node) + numactl_args+=( "--cpunodebind=${local_node}" ) + ;; + *.sh) + source "${cpu_mode}" + if [ -n "${bind_cpu_cores:-}" ]; then + numactl_args+=( "--physcpubind=${bind_cpu_cores[${local_rank}]}" ) + elif [ -n "${bind_cpu_nodes:-}" ]; then + numactl_args+=( "--cpunodebind=${bind_cpu_nodes[${local_rank}]}" ) + else + echo "ERROR: invalid CPU affinity file ${cpu_mode}." >&2 + exit 1 + fi + ;; + off|'') + ;; + *) + echo "ERROR: invalid cpu mode '${cpu_mode}'" 2>&1 + print_usage + exit 1 + ;; +esac + +case "${mem_mode}" in + node) + numactl_args+=( "--membind=${local_node}" ) + ;; + *.sh) + source "${mem_mode}" + if [ -z "${bind_mem:-}" ]; then + echo "ERROR: invalid memory affinity file ${mem_mode}." >&2 + exit 1 + fi + numactl_args+=( "--membind=${bind_mem[${local_rank}]}" ) + ;; + off|'') + ;; + *) + echo "ERROR: invalid mem mode '${mem_mode}'" 2>&1 + print_usage + exit 1 + ;; +esac + +case "${ib_mode}" in + single) + if [ "${num_ibdevs}" -eq 0 ]; then + echo "WARNING: used '$0 --ib=single', but there are 0 IB devices available; skipping IB binding." 2>&1 + else + readonly ibdev="${ibdevs[$(( local_rank * num_ibdevs / num_gpus ))]}" + export OMPI_MCA_btl_openib_if_include="${OMPI_MCA_btl_openib_if_include-$ibdev}" + export UCX_NET_DEVICES="${UCX_NET_DEVICES-$ibdev:1}" + fi + ;; + off|'') + ;; + *) + echo "ERROR: invalid ib mode '${ib_mode}'" 2>&1 + print_usage + exit 1 + ;; +esac + +################################################################################ +# Exec +################################################################################ + +if [ "${#numactl_args[@]}" -gt 0 ] ; then + set -x + exec numactl "${numactl_args[@]}" -- "${@}" +else + exec "${@}" +fi diff --git a/cv/detection/ssd/pytorch/base/bind_launch.py b/cv/detection/ssd/pytorch/bind_launch.py similarity index 75% rename from cv/detection/ssd/pytorch/base/bind_launch.py rename to cv/detection/ssd/pytorch/bind_launch.py index f1fb7423e..0e985a8f2 100644 --- a/cv/detection/ssd/pytorch/base/bind_launch.py +++ b/cv/detection/ssd/pytorch/bind_launch.py @@ -1,32 +1,17 @@ import sys import subprocess import os -import os.path as ospath -from argparse import ArgumentParser +import socket +from argparse import ArgumentParser, REMAINDER - -MODEL_DIR = ospath.abspath( - ospath.join( - __file__, - "../../../" - ) -) - -MODEL = ospath.basename(MODEL_DIR) - -PROJ_DIR = ospath.abspath( - ospath.join( - MODEL_DIR, - "pytorch" - ) -) - - -def _parse_known_args(parser, *args, **kwargs): - return parser.parse_known_args(*args, **kwargs) +import torch def parse_args(): + """ + Helper function parsing the command line options + @retval ArgumentParser + """ parser = ArgumentParser(description="PyTorch distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") @@ -70,35 +55,16 @@ def parse_args(): "followed by all the arguments for the " "training script") - parser.add_argument("--config", type=str, required=True) - parser.add_argument("--name", type=str, required=True) - - args, training_script_args = _parse_known_args(parser) - args.training_script_args = training_script_args - - return args - - -def get_cuda_visible_devices(gpus=1): - if "CUDA_VISIBLE_DEVICES" in os.environ: - return os.environ['CUDA_VISIBLE_DEVICES'] - return ','.join([str(gpu_id) for gpu_id in range(gpus)]) - + # rest from the training program + parser.add_argument('training_script_args', nargs=REMAINDER) + return parser.parse_args() def main(): args = parse_args() - config_full_name = f"config_{args.config}.py" - config_path = ospath.join(PROJ_DIR, args.name, "config", config_full_name) - - _, args.nnodes, args.nproc_per_node = args.config.split("x") - - args.nnodes = int(args.nnodes) - args.nproc_per_node = int(args.nproc_per_node) # variables for numactrl binding - NSOCKETS = args.nsockets_per_node - NGPUS_PER_SOCKET = (args.nproc_per_node // args.nsockets_per_node) + (1 if (args.nproc_per_node % args.nsockets_per_node) else 0) + NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET NCORES_PER_GPU_REMAIN=args.ncores_per_socket - NGPUS_PER_SOCKET*NCORES_PER_GPU @@ -110,8 +76,6 @@ def main(): current_env["MASTER_ADDR"] = args.master_addr current_env["MASTER_PORT"] = str(args.master_port) current_env["WORLD_SIZE"] = str(dist_world_size) - current_env["NODE_RANK"] = str(args.node_rank) - current_env["CUDA_VISIBLE_DEVICES"] = get_cuda_visible_devices(args.nproc_per_node) processes = [] @@ -119,7 +83,6 @@ def main(): # each process's rank dist_rank = args.nproc_per_node * args.node_rank + local_rank current_env["RANK"] = str(dist_rank) - current_env["LOCAL_RANK"] = str(local_rank) # Instead of binding to a set of cores which this task has exclusive access to, # bind to all cores on the local NUMA node (may share them with other ranks) @@ -150,17 +113,18 @@ def main(): args.training_script, "--local_rank={}".format(local_rank) ] \ - + args.training_script_args + [f"{config_path}"] + + args.training_script_args - print("=" * 80) - print("= numactlargs_flag") - print(cmd) - print("=" * 80) process = subprocess.Popen(cmd, env=current_env) processes.append(process) + proc_status = [] + for process in processes: process.wait() + proc_status.append(process.returncode != 0) + + exit(all(proc_status)) if __name__ == "__main__": diff --git a/cv/detection/ssd/pytorch/base/box_coder.py b/cv/detection/ssd/pytorch/box_coder.py similarity index 76% rename from cv/detection/ssd/pytorch/base/box_coder.py rename to cv/detection/ssd/pytorch/box_coder.py index d4b7e6871..1f6722ad3 100644 --- a/cv/detection/ssd/pytorch/base/box_coder.py +++ b/cv/detection/ssd/pytorch/box_coder.py @@ -2,12 +2,38 @@ import random import torch import torch.nn.functional as F +from SSD import _C as C import numpy as np import itertools from math import sqrt +def generate_bbox_stacks(bboxes): + offsets = [0] + + for bbox in bboxes: + offsets.append(bbox.shape[0] + offsets[-1]) + + offsets = torch.tensor(np.array(offsets).astype(np.int32)).cuda() + + return len(bboxes), torch.cat(bboxes), offsets, bboxes + + +def load_bboxes(box_list, random_rows=True): + tensor_list = [] + + for b in box_list: + if random_rows: + n_rows = random.randint(1, b.shape[0]) + t = torch.tensor(np.array(b[0:n_rows, :]).astype(np.float32)).cuda() + else: + t = torch.tensor(np.array(b).astype(np.float32)).cuda() + tensor_list.append(t) + + return generate_bbox_stacks(tensor_list) + + def calc_iou_tensor(box1, box2): """ Calculation of IoU based on two boxes tensor, Reference to https://github.com/kuangliu/pytorch-ssd @@ -102,7 +128,6 @@ class DefaultBoxes(object): if order == "ltrb": return self.dboxes_ltrb if order == "xywh": return self.dboxes - # This class is from https://github.com/kuangliu/pytorch-ssd class Encoder(object): """ @@ -125,14 +150,13 @@ class Encoder(object): max_output : maximum number of output bboxes """ - def __init__(self, dboxes, fast_nms=False): + def __init__(self, dboxes): self.dboxes = dboxes(order="ltrb") self.dboxes_xywh = dboxes(order="xywh").unsqueeze(dim=0) self.nboxes = self.dboxes.size(0) #print("# Bounding boxes: {}".format(self.nboxes)) self.scale_xy = dboxes.scale_xy self.scale_wh = dboxes.scale_wh - self.fast_nms = fast_nms # self.dboxes = self.dboxes.cuda() # self.dboxes_xywh = self.dboxes_xywh.cuda() @@ -140,6 +164,12 @@ class Encoder(object): def encode(self, bboxes_in, labels_in, criteria = 0.5): try: + # source boxes + # N, bboxes_cat, offsets, bboxes = load_bboxes([bboxes_in, self.dboxes]) + # # target boxes + # _, _, _, targets = load_bboxes([self.dboxes]) + # ious = C.calc_ious(N, bboxes_cat, offsets, *targets) + ious = calc_iou_tensor(bboxes_in, self.dboxes) best_dbox_ious, best_dbox_idx = ious.max(dim=0) best_bbox_ious, best_bbox_idx = ious.max(dim=1) @@ -155,6 +185,15 @@ class Encoder(object): labels_out = torch.zeros(self.nboxes, dtype=torch.long) #print(maxloc.shape, labels_in.shape, labels_out.shape) + #print("labels_out") + #print(labels_out.shape) + #print("masks") + #print(masks.shape) + #print("labels_in") + #print(labels_in.shape) + #print("best_dbox_idx") + #print(best_dbox_idx.shape) + labels_out[masks] = labels_in[best_dbox_idx[masks]] bboxes_out = self.dboxes.clone() bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :] @@ -205,7 +244,7 @@ class Encoder(object): return bboxes_in, F.softmax(scores_in, dim=-1) - def decode_batch(self, bboxes_in, scores_in, criteria=0.45, max_output=200, nms_valid_thresh=0.05): + def decode_batch(self, bboxes_in, scores_in, criteria = 0.45, max_output=200): bboxes, probs = self.scale_back_batch(bboxes_in, scores_in) output = [] @@ -213,77 +252,13 @@ class Encoder(object): for bbox, prob in zip(bboxes.split(1, 0), probs.split(1, 0)): bbox = bbox.squeeze(0) prob = prob.squeeze(0) - if self.fast_nms: - output.append(self.fast_decode_single(bbox, prob, criteria, max_output, - nms_valid_thresh=nms_valid_thresh)) - else: - try: - output.append(self.decode_single(bbox, prob, criteria, max_output, - nms_valid_thresh=nms_valid_thresh)) - except: - output.append([ - torch.Tensor([]).reshape(0, 4).to(bbox.device), \ - torch.tensor([], dtype=torch.long), \ - torch.Tensor([]).to(bbox.device) - ]) - return output + output.append(self.decode_single(bbox, prob, criteria, max_output)) #print(output[-1]) return output # perform non-maximum suppression - def decode_single(self, bboxes_in, scores_in, criteria, max_output, - max_num=200, nms_valid_thresh=0.05): - # Reference to https://github.com/amdegroot/ssd.pytorch - bboxes_out = [] - scores_out = [] - labels_out = [] - - for i, score in enumerate(scores_in.split(1, 1)): - # skip background - # print(score[score>0.90]) - if i == 0: continue - # print(i) - - score = score.squeeze(1) - mask = score > nms_valid_thresh - - bboxes, score = bboxes_in[mask, :], score[mask] - if score.size(0) == 0: continue - - score_sorted, score_idx_sorted = score.sort(dim=0) - - # select max_output indices - score_idx_sorted = score_idx_sorted[-max_num:] - candidates = [] - # maxdata, maxloc = scores_in.sort() - - while score_idx_sorted.numel() > 0: - idx = score_idx_sorted[-1].item() - bboxes_sorted = bboxes[score_idx_sorted, :] - bboxes_idx = bboxes[idx, :].unsqueeze(dim=0) - iou_sorted = calc_iou_tensor(bboxes_sorted, - bboxes_idx).squeeze() - # we only need iou < criteria - score_idx_sorted = score_idx_sorted[iou_sorted < criteria] - candidates.append(idx) - - bboxes_out.append(bboxes[candidates, :]) - scores_out.append(score[candidates]) - labels_out.extend([i] * len(candidates)) - - bboxes_out, labels_out, scores_out = torch.cat(bboxes_out, dim=0), \ - torch.tensor(labels_out, - dtype=torch.long), \ - torch.cat(scores_out, dim=0) - - _, max_ids = scores_out.sort(dim=0) - max_ids = max_ids[-max_output:] - return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids] - - # perform non-maximum suppression - def fast_decode_single(self, bboxes_in, scores_in, criteria, max_output, max_num=200, nms_valid_thresh=0.05): + def decode_single(self, bboxes_in, scores_in, criteria, max_output, max_num=200): # Reference to https://github.com/amdegroot/ssd.pytorch - from SSD import _C as C bboxes_out = [] scores_out = [] @@ -336,5 +311,5 @@ def dboxes300_coco(): dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios) return dboxes -def build_ssd300_coder(fast_nms): - return Encoder(dboxes300_coco(), fast_nms) +def build_ssd300_coder(): + return Encoder(dboxes300_coco()) diff --git a/cv/detection/ssd/pytorch/build_ssd.sh b/cv/detection/ssd/pytorch/build_ssd.sh new file mode 100644 index 000000000..084de47ad --- /dev/null +++ b/cv/detection/ssd/pytorch/build_ssd.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +PYTHON_PATH=$(which python3) +${PYTHON_PATH} -m pip list | grep "^torch .*+corex" || { + echo "ERROR: building SSD requries the corex torch has been installed." + exit 1 +} + +a=$(pip3 show torch|awk '/Version:/ {print $NF}'); b=(${a//+/ }); c=(${b//./ }) +if [[ ${c[0]} -eq 1 ]]; then + rm -rf csrc && ln -s csrc_pt1 csrc +elif [[ ${c[0]} -eq 2 ]]; then + rm -rf csrc && ln -s csrc_pt2 csrc +else + echo "ERROR: torch version ${a} is not as expected, please check." + exit 1 +fi + + +${PYTHON_PATH} setup.py build 2>&1 | tee compile.log; [[ ${PIPESTATUS[0]} == 0 ]] || exit +${PYTHON_PATH} setup.py bdist_wheel -d build_pip || exit +rm -rf SSD.egg-info + +# Return 0 status if all finished +exit 0 diff --git a/cv/detection/ssd/pytorch/clean_ssd.sh b/cv/detection/ssd/pytorch/clean_ssd.sh new file mode 100644 index 000000000..569634adc --- /dev/null +++ b/cv/detection/ssd/pytorch/clean_ssd.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +PYTHON_PATH=$(which python3) + +rm -rf build +${PYTHON_PATH} setup.py clean || true +rm -rf build_pip + +# Return 0 status if all finished +exit 0 diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/box_encoder_cuda.cu b/cv/detection/ssd/pytorch/csrc_pt1/box_encoder_cuda.cu similarity index 99% rename from cv/detection/ssd/pytorch/iluvatar/csrc/box_encoder_cuda.cu rename to cv/detection/ssd/pytorch/csrc_pt1/box_encoder_cuda.cu index e9b311fab..641af7ab3 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/box_encoder_cuda.cu +++ b/cv/detection/ssd/pytorch/csrc_pt1/box_encoder_cuda.cu @@ -19,14 +19,14 @@ #include #include -#include -#include +// #include +// #include #include #include #include - +#define THCudaCheck(x) C10_CUDA_CHECK(x) //#define DEBUG // calculate the IoU of a single box against another box diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/interface.cpp b/cv/detection/ssd/pytorch/csrc_pt1/interface.cpp similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/csrc/interface.cpp rename to cv/detection/ssd/pytorch/csrc_pt1/interface.cpp diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Descriptors.cpp b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/Descriptors.cpp similarity index 100% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Descriptors.cpp rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/Descriptors.cpp diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.h b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/Descriptors.h similarity index 99% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.h rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/Descriptors.h index f8c03619b..280d069a7 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.h +++ b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/Descriptors.h @@ -21,6 +21,7 @@ #include "Exceptions.h" +#include #include #include #include diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Exceptions.h b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/Exceptions.h similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Exceptions.h rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/Exceptions.h diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/ParamsHash.h b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/ParamsHash.h similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/ParamsHash.h rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/ParamsHash.h diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/batch_norm.cu b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/batch_norm.cu similarity index 96% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/batch_norm.cu rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/batch_norm.cu index b1e88a47d..8294132bc 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/batch_norm.cu +++ b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/batch_norm.cu @@ -20,13 +20,13 @@ #include #include #include -#include +// #include #include #include -#include "THC/THC.h" - +// #include "THC/THC.h" +#include #include "Descriptors.h" #include @@ -41,19 +41,23 @@ const float BN_MIN_EPSILON = 1e-4; // tensor instead. struct Workspace { Workspace(size_t size) : size(size), data(NULL) { - data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size); + // data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size); + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + dataPtr = allocator.allocate(size); + data = dataPtr.get(); } Workspace(const Workspace&) = delete; Workspace(Workspace&&) = default; Workspace& operator=(Workspace&&) = default; - ~Workspace() { - if (data) { - THCudaFree(at::globalContext().lazyInitCUDA(), data); - } - } - + // ~Workspace() { + // if (data) { + // THCudaFree(at::globalContext().lazyInitCUDA(), data); + // } + // } + ~Workspace() = default; size_t size; void* data; + c10::DataPtr dataPtr; }; // Return {y. save_mean, save_var, reserve} diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/conv.cpp b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/conv.cpp similarity index 98% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/conv.cpp rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/conv.cpp index d8c975570..3c9a29b17 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/conv.cpp +++ b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/conv.cpp @@ -23,8 +23,8 @@ #include #include -#include "THC/THC.h" - +// #include "THC/THC.h" +#include #include #include "Descriptors.h" // #include @@ -41,7 +41,7 @@ #include #include #include - +#define THCudaCheck(x) C10_CUDA_CHECK(x) namespace at { namespace native { namespace nhwc { // TODO: Go through all the checking code again and make sure @@ -358,19 +358,23 @@ BenchmarkCache bwd_filter_algos; // tensor instead. struct Workspace { Workspace(size_t size) : size(size), data(NULL) { - data = THCudaMalloc(globalContext().lazyInitCUDA(), size); + // data = THCudaMalloc(globalContext().lazyInitCUDA(), size); + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + dataPtr = allocator.allocate(size); + data = dataPtr.get(); } Workspace(const Workspace&) = delete; Workspace(Workspace&&) = default; Workspace& operator=(Workspace&&) = default; - ~Workspace() { - if (data) { - THCudaFree(globalContext().lazyInitCUDA(), data); - } - } - + // ~Workspace() { + // if (data) { + // THCudaFree(globalContext().lazyInitCUDA(), data); + // } + // } + ~Workspace() = default; size_t size; void* data; + c10::DataPtr dataPtr; }; template diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/max_pool.cu b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/max_pool.cu similarity index 99% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/max_pool.cu rename to cv/detection/ssd/pytorch/csrc_pt1/nhwc/max_pool.cu index 5bd12266b..a4b39f1c4 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/max_pool.cu +++ b/cv/detection/ssd/pytorch/csrc_pt1/nhwc/max_pool.cu @@ -20,7 +20,7 @@ #include #include #include -#include +// #include #include "Descriptors.h" diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nms.cu b/cv/detection/ssd/pytorch/csrc_pt1/nms.cu similarity index 99% rename from cv/detection/ssd/pytorch/nvidia/csrc/nms.cu rename to cv/detection/ssd/pytorch/csrc_pt1/nms.cu index 3cf8bac4b..930af4313 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/nms.cu +++ b/cv/detection/ssd/pytorch/csrc_pt1/nms.cu @@ -19,13 +19,14 @@ #include #include -#include -#include +// #include +// #include #include #include #include +#define THCudaCheck(x) C10_CUDA_CHECK(x) namespace nms_internal { diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/random_horiz_flip.cu b/cv/detection/ssd/pytorch/csrc_pt1/random_horiz_flip.cu similarity index 98% rename from cv/detection/ssd/pytorch/nvidia/csrc/random_horiz_flip.cu rename to cv/detection/ssd/pytorch/csrc_pt1/random_horiz_flip.cu index ff906e1f6..68bb33b0e 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/random_horiz_flip.cu +++ b/cv/detection/ssd/pytorch/csrc_pt1/random_horiz_flip.cu @@ -19,9 +19,9 @@ #include #include -#include -#include - +// #include +// #include +#define THCudaCheck(x) C10_CUDA_CHECK(x) #include #include diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/box_encoder_cuda.cu b/cv/detection/ssd/pytorch/csrc_pt2/box_encoder_cuda.cu similarity index 99% rename from cv/detection/ssd/pytorch/nvidia/csrc/box_encoder_cuda.cu rename to cv/detection/ssd/pytorch/csrc_pt2/box_encoder_cuda.cu index e9b311fab..641af7ab3 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/box_encoder_cuda.cu +++ b/cv/detection/ssd/pytorch/csrc_pt2/box_encoder_cuda.cu @@ -19,14 +19,14 @@ #include #include -#include -#include +// #include +// #include #include #include #include - +#define THCudaCheck(x) C10_CUDA_CHECK(x) //#define DEBUG // calculate the IoU of a single box against another box diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/interface.cpp b/cv/detection/ssd/pytorch/csrc_pt2/interface.cpp similarity index 100% rename from cv/detection/ssd/pytorch/nvidia/csrc/interface.cpp rename to cv/detection/ssd/pytorch/csrc_pt2/interface.cpp diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.cpp b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.cpp similarity index 99% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.cpp rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.cpp index afe0466c9..f3143f923 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/Descriptors.cpp +++ b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.cpp @@ -20,7 +20,7 @@ #include "Descriptors.h" #include -#include + #include #include #include diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Descriptors.h b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.h similarity index 99% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Descriptors.h rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.h index c8556f293..280d069a7 100644 --- a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Descriptors.h +++ b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/Descriptors.h @@ -21,6 +21,7 @@ #include "Exceptions.h" +#include #include #include #include @@ -29,6 +30,8 @@ #if !defined(TORCH_CUDA_API) && defined(AT_CUDA_API) #define TORCH_CUDA_API AT_CUDA_API +#else +#define TORCH_CUDA_API #endif namespace at { namespace native { namespace nhwc { diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Exceptions.h b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/Exceptions.h similarity index 100% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/Exceptions.h rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/Exceptions.h diff --git a/cv/detection/ssd/pytorch/nvidia/csrc/nhwc/ParamsHash.h b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/ParamsHash.h similarity index 100% rename from cv/detection/ssd/pytorch/nvidia/csrc/nhwc/ParamsHash.h rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/ParamsHash.h diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/batch_norm.cu b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/batch_norm.cu similarity index 96% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/batch_norm.cu rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/batch_norm.cu index b1e88a47d..8294132bc 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/batch_norm.cu +++ b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/batch_norm.cu @@ -20,13 +20,13 @@ #include #include #include -#include +// #include #include #include -#include "THC/THC.h" - +// #include "THC/THC.h" +#include #include "Descriptors.h" #include @@ -41,19 +41,23 @@ const float BN_MIN_EPSILON = 1e-4; // tensor instead. struct Workspace { Workspace(size_t size) : size(size), data(NULL) { - data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size); + // data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size); + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + dataPtr = allocator.allocate(size); + data = dataPtr.get(); } Workspace(const Workspace&) = delete; Workspace(Workspace&&) = default; Workspace& operator=(Workspace&&) = default; - ~Workspace() { - if (data) { - THCudaFree(at::globalContext().lazyInitCUDA(), data); - } - } - + // ~Workspace() { + // if (data) { + // THCudaFree(at::globalContext().lazyInitCUDA(), data); + // } + // } + ~Workspace() = default; size_t size; void* data; + c10::DataPtr dataPtr; }; // Return {y. save_mean, save_var, reserve} diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/conv.cpp b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/conv.cpp similarity index 98% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/conv.cpp rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/conv.cpp index d8c975570..b3b010a63 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/conv.cpp +++ b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/conv.cpp @@ -23,8 +23,8 @@ #include #include -#include "THC/THC.h" - +// #include "THC/THC.h" +#include #include #include "Descriptors.h" // #include @@ -41,7 +41,7 @@ #include #include #include - +#define THCudaCheck(x) C10_CUDA_CHECK(x) namespace at { namespace native { namespace nhwc { // TODO: Go through all the checking code again and make sure @@ -358,19 +358,23 @@ BenchmarkCache bwd_filter_algos; // tensor instead. struct Workspace { Workspace(size_t size) : size(size), data(NULL) { - data = THCudaMalloc(globalContext().lazyInitCUDA(), size); + // data = THCudaMalloc(globalContext().lazyInitCUDA(), size); + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + dataPtr = allocator.allocate(size); + data = dataPtr.get(); } Workspace(const Workspace&) = delete; Workspace(Workspace&&) = default; Workspace& operator=(Workspace&&) = default; - ~Workspace() { - if (data) { - THCudaFree(globalContext().lazyInitCUDA(), data); - } - } - + // ~Workspace() { + // if (data) { + // THCudaFree(globalContext().lazyInitCUDA(), data); + // } + // } + ~Workspace() = default; size_t size; void* data; + c10::DataPtr dataPtr; }; template @@ -429,7 +433,7 @@ size_t getMaxWorkspaceSize( int device; THCudaCheck(cudaGetDevice(&device)); - c10::cuda::CUDACachingAllocator::cacheInfo(device, &tmp_bytes, &max_block_size); + c10::cuda::CUDACachingAllocator::cacheInfo(device, &max_block_size); for (int i = 0; i < n_algo; i++) { cudnnStatus_t err; diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/max_pool.cu b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/max_pool.cu similarity index 99% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/max_pool.cu rename to cv/detection/ssd/pytorch/csrc_pt2/nhwc/max_pool.cu index 5bd12266b..a4b39f1c4 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nhwc/max_pool.cu +++ b/cv/detection/ssd/pytorch/csrc_pt2/nhwc/max_pool.cu @@ -20,7 +20,7 @@ #include #include #include -#include +// #include #include "Descriptors.h" diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/nms.cu b/cv/detection/ssd/pytorch/csrc_pt2/nms.cu similarity index 99% rename from cv/detection/ssd/pytorch/iluvatar/csrc/nms.cu rename to cv/detection/ssd/pytorch/csrc_pt2/nms.cu index 3cf8bac4b..930af4313 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/nms.cu +++ b/cv/detection/ssd/pytorch/csrc_pt2/nms.cu @@ -19,13 +19,14 @@ #include #include -#include -#include +// #include +// #include #include #include #include +#define THCudaCheck(x) C10_CUDA_CHECK(x) namespace nms_internal { diff --git a/cv/detection/ssd/pytorch/iluvatar/csrc/random_horiz_flip.cu b/cv/detection/ssd/pytorch/csrc_pt2/random_horiz_flip.cu similarity index 97% rename from cv/detection/ssd/pytorch/iluvatar/csrc/random_horiz_flip.cu rename to cv/detection/ssd/pytorch/csrc_pt2/random_horiz_flip.cu index ff906e1f6..cb43a49b4 100644 --- a/cv/detection/ssd/pytorch/iluvatar/csrc/random_horiz_flip.cu +++ b/cv/detection/ssd/pytorch/csrc_pt2/random_horiz_flip.cu @@ -19,9 +19,9 @@ #include #include -#include -#include - +// #include +// #include +#define THCudaCheck(x) C10_CUDA_CHECK(x) #include #include @@ -141,7 +141,7 @@ std::vector random_horiz_flip( auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( - img.type(), + img.scalar_type(), "HorizFlipImagesAndBoxes", [&] { HorizFlipImagesAndBoxes<<>>( diff --git a/cv/detection/ssd/pytorch/base/dataloaders/build_pipeline.py b/cv/detection/ssd/pytorch/data/build_pipeline.py similarity index 58% rename from cv/detection/ssd/pytorch/base/dataloaders/build_pipeline.py rename to cv/detection/ssd/pytorch/data/build_pipeline.py index 17ae06e71..1f4e1b0ee 100644 --- a/cv/detection/ssd/pytorch/base/dataloaders/build_pipeline.py +++ b/cv/detection/ssd/pytorch/data/build_pipeline.py @@ -15,18 +15,20 @@ import torch from .native_pipeline import build_native_pipeline -from .input_iterators import ConvertDaliInputIterator +from .input_iterators import ConvertDaliInputIterator, RateMatcher, FakeInputIterator +from mlperf_logger import log_event +from mlperf_logging.mllog import constants """ Build a train pipe for training (without touching the data) returns train_pipe """ -def prebuild_pipeline(config): - if config.dali: +def prebuild_pipeline(args): + if args.dali: from .dali_pipeline import prebuild_dali_pipeline - return prebuild_dali_pipeline(config) + return prebuild_dali_pipeline(args) else: return None @@ -36,19 +38,28 @@ Build a data pipeline for either training or eval Training : returns loader, epoch_size Eval : returns loader, inv_class_map, cocoGt """ -def build_pipeline(config, training=True, pipe=None): +def build_pipeline(args, training=True, pipe=None): # Handle training / testing differently due to different # outputs. But still want to do this to abstract out the # use of EncodingInputIterator and RateMatcher if training: - if config.dali: + if args.dali: from .dali_pipeline import build_dali_pipeline - train_loader, epoch_size = build_dali_pipeline(config, training=True, pipe=pipe) - train_sampler = None - train_loader = ConvertDaliInputIterator(train_loader) + builder_fn = build_dali_pipeline else: - train_loader, epoch_size, train_sampler = build_native_pipeline(config, training=True, pipe=pipe) - return train_loader, epoch_size, train_sampler - else: - return build_native_pipeline(config, training=False) + builder_fn = build_native_pipeline + train_loader, epoch_size = builder_fn(args, training=True, pipe=pipe) + log_event(key=constants.TRAIN_SAMPLES, value=epoch_size) + + if args.dali: + train_loader = ConvertDaliInputIterator(train_loader) + if args.fake_input: + train_loader = FakeInputIterator(train_loader, epoch_size, args.N_gpu) + + if args.input_batch_multiplier > 1: + train_loader = RateMatcher(input_it=train_loader, output_size=args.batch_size) + + return train_loader, epoch_size + else: + return build_native_pipeline(args, training=False) diff --git a/cv/detection/ssd/pytorch/base/dataloaders/dali_iterator.py b/cv/detection/ssd/pytorch/data/dali_iterator.py similarity index 94% rename from cv/detection/ssd/pytorch/base/dataloaders/dali_iterator.py rename to cv/detection/ssd/pytorch/data/dali_iterator.py index 42ba88166..266ebda80 100644 --- a/cv/detection/ssd/pytorch/base/dataloaders/dali_iterator.py +++ b/cv/detection/ssd/pytorch/data/dali_iterator.py @@ -27,10 +27,12 @@ from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types +import time + # Defines the pipeline for a single GPU for _training_ class COCOPipeline(Pipeline): - def __init__(self, batch_size, device_id, file_root, annotations_file, num_gpus, - anchors_ltrb_list, meta_files_path=None, + def __init__(self, batch_size, device_id, file_root, meta_files_path, annotations_file, num_gpus, + anchors_ltrb_list, output_fp16=False, output_nhwc=False, pad_output=False, num_threads=1, seed=15, dali_cache=-1, dali_async=True, use_nvjpeg=False): @@ -40,10 +42,15 @@ class COCOPipeline(Pipeline): exec_async=dali_async) self.use_nvjpeg = use_nvjpeg - try: - shard_id = torch.distributed.get_rank() + #try: + #shard_id = torch.distributed.get_rank() # Note: <= 19.05 was a RuntimeError, 19.06 is now throwing AssertionError - except (RuntimeError, AssertionError): + #except (RuntimeError, AssertionError): + #shard_id = 0 + import torch.distributed as dist + if dist.is_available() and dist.is_initialized(): + shard_id = dist.get_rank() + else: shard_id = 0 if meta_files_path == None: diff --git a/cv/detection/ssd/pytorch/base/dataloaders/dali_pipeline.py b/cv/detection/ssd/pytorch/data/dali_pipeline.py similarity index 42% rename from cv/detection/ssd/pytorch/base/dataloaders/dali_pipeline.py rename to cv/detection/ssd/pytorch/data/dali_pipeline.py index 6d19450d8..b5e5cc26c 100644 --- a/cv/detection/ssd/pytorch/base/dataloaders/dali_pipeline.py +++ b/cv/detection/ssd/pytorch/data/dali_pipeline.py @@ -7,32 +7,27 @@ from box_coder import dboxes300_coco anchors_ltrb_list = dboxes300_coco()("ltrb").numpy().flatten().tolist() -def prebuild_dali_pipeline(config): - """ - Equivalent to SSD Transformer. - :param config: configuration - :return: Dali Pipeline - """ - train_annotate = os.path.join(config.data_dir, "annotations/bbox_only_instances_train2017.json") - train_coco_root = os.path.join(config.data_dir, "train2017") - pipe = COCOPipeline(config.train_batch_size, - config.local_rank, train_coco_root, - train_annotate, config.n_gpu, +def prebuild_dali_pipeline(args): + train_annotate = os.path.join(os.path.dirname(__file__), "../../../../../bbox_only_instances_train2017.json") + train_coco_root = os.path.join(args.data, "train2017") + pipe = COCOPipeline(args.batch_size * args.input_batch_multiplier, + args.local_rank, train_coco_root, + args.meta_files_path, train_annotate, args.N_gpu, anchors_ltrb_list, - num_threads=config.num_workers, - output_fp16=config.fp16, output_nhwc=config.nhwc, - pad_output=config.pad_input, seed=config.local_seed - 2**31, - use_nvjpeg=config.use_nvjpeg, - dali_cache=config.dali_cache, - dali_async=(not config.dali_sync)) + num_threads=args.num_workers, + output_fp16=args.use_fp16, output_nhwc=args.nhwc, + pad_output=args.pad_input, seed=args.local_seed - 2**31, + use_nvjpeg=args.use_nvjpeg, + dali_cache=args.dali_cache, + dali_async=(not args.dali_sync)) pipe.build() return pipe -def build_dali_pipeline(config, training=True, pipe=None): +def build_dali_pipeline(args, training=True, pipe=None): # pipe is prebuilt without touching the data from nvidia.dali.plugin.pytorch import DALIGenericIterator train_loader = DALIGenericIterator(pipelines=[pipe], output_map= ['image', 'bbox', 'label'], - size=pipe.epoch_size()['train_reader'] // config.n_gpu, + size=pipe.epoch_size()['train_reader'] // args.N_gpu, auto_reset=True) return train_loader, pipe.epoch_size()['train_reader'] diff --git a/cv/detection/ssd/pytorch/base/dataloaders/input_iterators.py b/cv/detection/ssd/pytorch/data/input_iterators.py similarity index 99% rename from cv/detection/ssd/pytorch/base/dataloaders/input_iterators.py rename to cv/detection/ssd/pytorch/data/input_iterators.py index fda527e2b..59d86fc81 100644 --- a/cv/detection/ssd/pytorch/base/dataloaders/input_iterators.py +++ b/cv/detection/ssd/pytorch/data/input_iterators.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import torch +from SSD import _C as C + + class ConvertDaliInputIterator(object): def __init__(self, dali_it): self._dali_it = dali_it diff --git a/cv/detection/ssd/pytorch/data/native_pipeline.py b/cv/detection/ssd/pytorch/data/native_pipeline.py new file mode 100644 index 000000000..ebb7b5a30 --- /dev/null +++ b/cv/detection/ssd/pytorch/data/native_pipeline.py @@ -0,0 +1,165 @@ +import torch +import os + +from functools import partial + +from torch.utils.data import DataLoader +from mlperf_logger import log_event +from mlperf_logging.mllog import constants + +from utils import COCODetection, SSDCropping, SSDTransformer, SSDTransformerNoDali +from box_coder import dboxes300_coco +from .sampler import GeneralDistributedSampler + +from pycocotools.coco import COCO +import numpy as np + + +def SSDCollator(batch, is_training=False): + # batch is: [image (300x300) Tensor, image_id, (htot, wtot), bboxes (8732, 4) Tensor, labels (8732) Tensor] + images = [] + image_ids = [] + image_sizes = [] + bboxes = [] + bbox_offsets = [0] + labels = [] + + for item in batch: + images.append(item[0].view(1, *item[0].shape)) + image_ids.append(item[1]) + image_sizes.append(item[2]) + bboxes.append(item[3]) + labels.append(item[4]) + + bbox_offsets.append(bbox_offsets[-1] + item[3].shape[0]) + + images = torch.cat(images) + bbox_offsets = np.array(bbox_offsets).astype(np.int32) + + if is_training: + return [images, torch.cat(bboxes), torch.cat(labels), torch.tensor(bbox_offsets)] + else: + return [images, torch.tensor(image_ids), image_sizes, torch.cat(bboxes), torch.cat(labels), torch.tensor(bbox_offsets)] + + +def SSDCollatorNoDali(batch, is_training=False): + # batch is: [image (300x300) Tensor, image_id, (htot, wtot), bboxes (8732, 4) Tensor, labels (8732) Tensor] + images = [] + image_ids = [] + image_sizes = [] + bboxes = [] + bbox_offsets = [0] + labels = [] + + for img, img_id, img_size, bbox, label in batch: + images.append(img.view(1, *img.shape)) + image_ids.append(img_id) + image_sizes.append(img_size) + bboxes.append(bbox) + labels.append(label) + bbox_offsets.append(bbox_offsets[-1] + bbox.shape[0]) + + images = torch.cat(images) + N = images.shape[0] + bboxes = torch.cat(bboxes).view(N, -1, 4) + labels = torch.cat(labels).view(N, -1) + if is_training: + res = [images, bboxes, labels] + else: + res = [images, torch.tensor(image_ids), image_sizes, torch.cat(bboxes), torch.cat(labels), torch.tensor(bbox_offsets)] + return res + + +def generate_mean_std(args): + mean_val = [0.485, 0.456, 0.406] + std_val = [0.229, 0.224, 0.225] + + if args.pad_input: + mean_val.append(0.) + std_val.append(1.) + mean = torch.tensor(mean_val).cuda() + std = torch.tensor(std_val).cuda() + + if args.nhwc: + view = [1, 1, 1, len(mean_val)] + else: + view = [1, len(mean_val), 1, 1] + + mean = mean.view(*view) + std = std.view(*view) + + if args.use_fp16: + mean = mean.half() + std = std.half() + + return mean, std + +def build_train_pipe(args): + if args.dali: + train_annotate = os.path.join(os.path.dirname(__file__), "../../../../../bbox_only_instances_train2017.json") + else: + train_annotate = os.path.join(args.data, "annotations/instances_train2017.json") + train_coco_root = os.path.join(args.data, "train2017") + + input_size = args.input_size + if args.dali: + train_trans = SSDTransformer((input_size, input_size), val=False) + else: + dboxes = dboxes300_coco() + train_trans = SSDTransformerNoDali(dboxes, (input_size, input_size), val=False) + train_coco = COCODetection(train_coco_root, train_annotate, train_trans) + + if args.distributed: + train_sampler = GeneralDistributedSampler(train_coco, pad=args.pad_input) + else: + train_sampler = None + + if args.dali: + train_loader = DataLoader(train_coco, + batch_size=args.batch_size*args.input_batch_multiplier, + shuffle=(train_sampler is None), + sampler=train_sampler, + num_workers=args.num_workers, + collate_fn=partial(SSDCollator, is_training=True)) + else: + train_loader = DataLoader(train_coco, + batch_size=args.batch_size, + shuffle=(train_sampler is None), + sampler=train_sampler, + num_workers=args.num_workers, + collate_fn=partial(SSDCollatorNoDali, is_training=True) + ) + return train_loader, len(train_loader) + + +def build_eval_pipe(args): + # Paths + val_annotate = os.path.join(os.path.dirname(__file__), "../../../../../bbox_only_instances_val2017.json") + val_coco_root = os.path.join(args.data, "val2017") + + input_size = args.input_size + val_trans = SSDTransformer((input_size, input_size), val=True) + cocoGt = COCO(annotation_file=val_annotate) + val_coco = COCODetection(val_coco_root, val_annotate, val_trans, cocoGt.dataset) + log_event(key=constants.EVAL_SAMPLES, value=len(val_coco)) + + if args.distributed: + val_sampler = GeneralDistributedSampler(val_coco, pad=args.pad_input) + else: + val_sampler = None + + val_dataloader = DataLoader(val_coco, + batch_size=args.eval_batch_size, + shuffle=False, # Note: distributed sampler is shuffled :( + sampler=val_sampler, + num_workers=args.num_workers) + + inv_map = {v:k for k,v in val_coco.label_map.items()} + + return val_dataloader, inv_map, cocoGt + +def build_native_pipeline(args, training=True, pipe=None): + if training: + return build_train_pipe(args) + else: + return build_eval_pipe(args) diff --git a/cv/detection/ssd/pytorch/base/dataloaders/prefetcher.py b/cv/detection/ssd/pytorch/data/prefetcher.py similarity index 100% rename from cv/detection/ssd/pytorch/base/dataloaders/prefetcher.py rename to cv/detection/ssd/pytorch/data/prefetcher.py diff --git a/cv/detection/ssd/pytorch/base/dataloaders/sampler.py b/cv/detection/ssd/pytorch/data/sampler.py similarity index 100% rename from cv/detection/ssd/pytorch/base/dataloaders/sampler.py rename to cv/detection/ssd/pytorch/data/sampler.py diff --git a/cv/detection/ssd/pytorch/default/config/config_V100x1x1.py b/cv/detection/ssd/pytorch/default/config/config_V100x1x1.py deleted file mode 100644 index 11b12e1df..000000000 --- a/cv/detection/ssd/pytorch/default/config/config_V100x1x1.py +++ /dev/null @@ -1,32 +0,0 @@ -from training_event import DefaultTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = False -dist_backend = "nccl" - -save_checkpoint = False - -seed = 42 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [40, 50] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = False -delay_allreduce = False - - -training_event = DefaultTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/default/config/config_V100x1x8.py b/cv/detection/ssd/pytorch/default/config/config_V100x1x8.py deleted file mode 100644 index 3932faacf..000000000 --- a/cv/detection/ssd/pytorch/default/config/config_V100x1x8.py +++ /dev/null @@ -1,31 +0,0 @@ -from training_event import DefaultTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 128 -eval_batch_size = 160 -learning_rate = 2.5e-3 -weight_decay_rate = 5e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [40, 50] -warmup = 300 -warmup_factor = 0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = False -delay_allreduce = False - - -training_event = DefaultTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/default/config/config_nv_V100x1x8.py b/cv/detection/ssd/pytorch/default/config/config_nv_V100x1x8.py deleted file mode 100644 index 80d62b686..000000000 --- a/cv/detection/ssd/pytorch/default/config/config_nv_V100x1x8.py +++ /dev/null @@ -1,32 +0,0 @@ -from training_event import DefaultTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = False -delay_allreduce = True - - -training_event = DefaultTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/default/config/training_event.py b/cv/detection/ssd/pytorch/default/config/training_event.py deleted file mode 100644 index 0c14b73e0..000000000 --- a/cv/detection/ssd/pytorch/default/config/training_event.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Tuple - -import torch -import torch.distributed as dist -from torch.cuda.amp import GradScaler -from torch.cuda.amp import autocast -from torch.nn.parallel import DistributedDataParallel as NativeDDP -from torch.optim import Optimizer - -from optimizers import create_optimizer -from train.event.base import BaseTrainingEventInterface, SSD_MODEL -from train.training_state import TrainingState - - -class DefaultTrainingEvent(BaseTrainingEventInterface): - - def __init__(self, config): - super(DefaultTrainingEvent, self).__init__(config) - self.model = None - self.optimizer = None - self.autocast_ctx = None - - def save_checkpoint(self, path: str, training_state: TrainingState): - torch.save({ - "model": self.model.state_dict(), - "optimizer": self.optimizer.state_dict(), - "epoch": training_state.epoch, - "iter_num": training_state.iter_num, - }, "{}/epoch{}_{}.pt".format(path, training_state.epoch, round(training_state.eval_ap, 5))) - - def load_checkpoint(self, checkpoint): - self.model.load_state_dict(checkpoint["model"], strict=True) - self.optimizer.load_state_dict(checkpoint["optimizer"]) - self.config.iteration = checkpoint["iter_num"] - self.config.epoch = checkpoint["epoch"] - - def create_optimizer(self, model: SSD_MODEL) -> Optimizer: - config = self.config - current_momentum = 0.9 - current_lr = config.learning_rate * (config.train_batch_size * config.n_gpu / 32) - self.optimizer = torch.optim.SGD(model.parameters(), lr=current_lr, - momentum=current_momentum, - weight_decay=config.weight_decay_rate) - return self.optimizer - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer) -> Tuple[SSD_MODEL, Optimizer]: - self.model = model - self.optimizer = optimizer - return self.model, self.optimizer - - def model_to_ddp(self, model: SSD_MODEL) -> SSD_MODEL: - if self.config.distributed: - self.model = NativeDDP(model, - device_ids=[self.config.local_rank]) - else: - self.model = model - return self.model - - def on_step_begin(self, step: int): - self.autocast_ctx = autocast(self.config.fp16) - self.autocast_ctx.__enter__() - - def on_backward(self, step: int, loss: torch.Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): - self.autocast_ctx.__exit__(None, None, None) - - scaled_loss = grad_scaler.scale(loss) - scaled_loss.backward() - update_step = step % self.config.gradient_accumulation_steps == 0 - if update_step: - self.update_model_params(optimizer, grad_scaler) - - def update_model_params(self, optimizer: Optimizer, grad_scaler: GradScaler=None): - grad_scaler.step(optimizer) - grad_scaler.update() - - for param in self.model.parameters(): - param.grad = None - - diff --git a/cv/detection/ssd/pytorch/download_dataset.sh b/cv/detection/ssd/pytorch/download_dataset.sh new file mode 100644 index 000000000..b3f96d04f --- /dev/null +++ b/cv/detection/ssd/pytorch/download_dataset.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Get COCO 2017 data sets +dir=$(pwd) +mkdir /coco; cd /coco +curl -O http://images.cocodataset.org/zips/train2017.zip; unzip train2017.zip +curl -O http://images.cocodataset.org/zips/val2017.zip; unzip val2017.zip +curl -O http://images.cocodataset.org/annotations/annotations_trainval2017.zip; unzip annotations_trainval2017.zip +${dir}/prepare-json.py --keep-keys annotations/instances_val2017.json annotations/bbox_only_instances_val2017.json +${dir}/prepare-json.py annotations/instances_train2017.json annotations/bbox_only_instances_train2017.json +cd $dir diff --git a/cv/detection/ssd/pytorch/eval.py b/cv/detection/ssd/pytorch/eval.py new file mode 100644 index 000000000..a4c459163 --- /dev/null +++ b/cv/detection/ssd/pytorch/eval.py @@ -0,0 +1,258 @@ +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import torch +import time +import numpy as np +import io + +from ssd300 import SSD300 +from box_coder import dboxes300_coco, build_ssd300_coder +from parse_config import parse_args, validate_arguments, validate_group_bn +from data.build_pipeline import build_pipeline +from data.prefetcher import eval_prefetcher +from async_evaluator import AsyncEvaluator + +import sys + +# necessary pytorch imports +import torch.utils.data.distributed +import torch.distributed as dist + +# Apex imports +try: + import apex_C + import apex + from apex.parallel import DistributedDataParallel as DDP + from apex.fp16_utils import * + from apex.multi_tensor_apply import multi_tensor_applier + import amp_C +except ImportError: + raise ImportError("Please install APEX from https://github.com/nvidia/apex") + +from SSD import _C as C + +def print_message(rank, *print_args): + if rank == 0: + print(*print_args) + +""" +Take results and produce mAP on COCO + +Intended to be used with an async evaluator, and run on a single +node -- calling code is responsible for that delegation +""" +def evaluate_coco(epoch, final_results, cocoGt, local_rank, threshold, lr): + from pycocotools.cocoeval import COCOeval + cocoDt = cocoGt.loadRes(final_results) + + E = COCOeval(cocoGt, cocoDt, iouType='bbox') + E.evaluate() + E.accumulate() + E.summarize() + print("Epoch: {}, Current AP: {:.5f} AP goal: {:.5f}, lr: {}".format(epoch, E.stats[0], threshold, lr)) + sys.stdout.flush() + + return E.stats[0] + +def coco_eval(args, model, coco, cocoGt, encoder, inv_map, epoch, iteration, lr, evaluator=None): + from pycocotools.cocoeval import COCOeval + model.eval() + threshold = args.threshold + batch_size = args.eval_batch_size + use_fp16 = args.use_fp16 + local_rank = args.local_rank + N_gpu = args.N_gpu + use_nhwc = args.nhwc + pad_input = args.pad_input + distributed = args.distributed + + ret = [] + overlap_threshold = 0.50 + nms_max_detections = 200 + start = time.time() + + # Wrap dataloader for prefetching + coco = eval_prefetcher(iter(coco), + torch.cuda.current_device(), + args.pad_input, + args.nhwc, + args.use_fp16) + + for nbatch, (img, img_id, img_size) in enumerate(coco): + with torch.no_grad(): + # Get predictions + ploc, plabel = model(img) + ploc, plabel = ploc.float(), plabel.float() + + # Handle the batch of predictions produced + # This is slow, but consistent with old implementation. + for idx in range(ploc.shape[0]): + # ease-of-use for specific predictions + ploc_i = ploc[idx, :, :].unsqueeze(0) + plabel_i = plabel[idx, :, :].unsqueeze(0) + + result = encoder.decode_batch(ploc_i, plabel_i, overlap_threshold, nms_max_detections)[0] + + htot, wtot = img_size[0][idx].item(), img_size[1][idx].item() + loc, label, prob = [r.cpu().numpy() for r in result] + for loc_, label_, prob_ in zip(loc, label, prob): + ret.append([img_id[idx], loc_[0]*wtot, \ + loc_[1]*htot, + (loc_[2] - loc_[0])*wtot, + (loc_[3] - loc_[1])*htot, + prob_, + inv_map[label_]]) + + # Now we have all predictions from this rank, gather them all together + # if necessary + ret = np.array(ret).astype(np.float32) + + # Multi-GPU eval + if distributed: + # NCCL backend means we can only operate on GPU tensors + ret_copy = torch.tensor(ret).cuda() + + # Everyone exchanges the size of their results + ret_sizes = [torch.tensor(0).cuda() for _ in range(N_gpu)] + torch.distributed.all_gather(ret_sizes, torch.tensor(ret_copy.shape[0]).cuda()) + + # Get the maximum results size, as all tensors must be the same shape for + # the all_gather call we need to make + max_size = 0 + sizes = [] + for s in ret_sizes: + max_size = max(max_size, s.item()) + sizes.append(s.item()) + + # Need to pad my output to max_size in order to use in all_gather + ret_pad = torch.cat([ret_copy, torch.zeros(max_size-ret_copy.shape[0], 7, dtype=torch.float32).cuda()]) + + # allocate storage for results from all other processes + other_ret = [torch.zeros(max_size, 7, dtype=torch.float32).cuda() for i in range(N_gpu)] + # Everyone exchanges (padded) results + torch.distributed.all_gather(other_ret, ret_pad) + + # Now need to reconstruct the _actual_ results from the padded set using slices. + cat_tensors = [] + for i in range(N_gpu): + cat_tensors.append(other_ret[i][:sizes[i]][:]) + + final_results = torch.cat(cat_tensors).cpu().numpy() + else: + # Otherwise full results are just our results + final_results = ret + + print_message(args.rank, "Predicting Ended, total time: {:.2f} s".format(time.time()-start)) + + # All results are assembled -- if rank == 0 start async evaluation (if enabled) + if args.rank == 0 and (evaluator is not None): + evaluator.submit_task(epoch, evaluate_coco, epoch, final_results, cocoGt, local_rank, threshold, lr) + + model.train() + return + + +def load_checkpoint(model, checkpoint): + print("loading model checkpoint", checkpoint) + od = torch.load(checkpoint) + + # remove proceeding 'module' from checkpoint + saved_model = od["model"] + for k in list(saved_model.keys()): + if k.startswith('module.'): + saved_model[k[7:]] = saved_model.pop(k) + model.load_state_dict(saved_model) + +def setup_distributed(args): + # Setup multi-GPU if necessary + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + + if args.distributed: + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', + init_method='env://') + args.local_seed = 0 # set_seeds(args) + # start timing here + if args.distributed: + args.N_gpu = torch.distributed.get_world_size() + args.rank = torch.distributed.get_rank() + else: + args.N_gpu = 1 + args.rank = 0 + + validate_group_bn(args.bn_group) + + return args + +# setup everything (model, etc) to run eval +def run_eval(args, lr): + args = setup_distributed(args) + + from pycocotools.coco import COCO + + local_seed = args.local_seed + + encoder = build_ssd300_coder() + + val_annotate = os.path.join(args.data, "annotations/instances_val2017.json") + val_coco_root = os.path.join(args.data, "val2017") + + cocoGt = COCO(annotation_file=val_annotate) + + val_loader, inv_map = build_pipeline(args, training=False) + + model_options = { + 'use_nhwc' : args.nhwc, + 'pad_input' : args.pad_input, + 'bn_group' : args.bn_group, + 'pretrained' : False, + } + + ssd300_eval = SSD300(args, args.num_classes, **model_options).cuda() + if args.use_fp16: + convert_network(ssd300_eval, torch.half) + ssd300_eval.eval() + + if args.checkpoint is not None: + load_checkpoint(ssd300_eval, args.checkpoint) + + evaluator = AsyncEvaluator(num_threads=1) + + coco_eval(args, + ssd300_eval, + val_loader, + cocoGt, + encoder, + inv_map, + 0, # epoch + 0, # iter_num + lr, + evaluator=evaluator) + + res = evaluator.task_result(0) + +if __name__ == "__main__": + args = parse_args() + validate_arguments(args) + + torch.backends.cudnn.benchmark = True + torch.set_num_threads(1) + lr = 0.001 + run_eval(args, lr) + + diff --git a/cv/detection/ssd/pytorch/base/fused_color_jitter.py b/cv/detection/ssd/pytorch/fused_color_jitter.py similarity index 100% rename from cv/detection/ssd/pytorch/base/fused_color_jitter.py rename to cv/detection/ssd/pytorch/fused_color_jitter.py diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x1.py b/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x1.py deleted file mode 100644 index 714de8849..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x1.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = False -dist_backend = "nccl" - -save_checkpoint = False - -seed = 1769250163 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 160 -eval_batch_size = 160 -learning_rate = 5.2e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.2 -lr_decay_epochs = [34, 45] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8.py b/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8.py deleted file mode 100644 index 281fd970f..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_2.1.0.py b/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_2.1.0.py deleted file mode 100644 index b5dd65e1b..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_2.1.0.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 1769250163 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 160 -eval_batch_size = 160 -learning_rate = 5.2e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.2 -lr_decay_epochs = [34, 45] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_wsl.py b/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_wsl.py deleted file mode 100644 index fb1638a61..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_V100x1x8_wsl.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 2651384829 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 200 -eval_batch_size = 200 -learning_rate = 5.15e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.2 -lr_decay_epochs = [34, 45] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_nodali_V100x1x8.py b/cv/detection/ssd/pytorch/iluvatar/config/config_nodali_V100x1x8.py deleted file mode 100644 index 279669912..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_nodali_V100x1x8.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = False -dali_sync = False -dali_cache = 0 -nhwc = False -pad_input = False -jit = False -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/config_nv_V100x1x8.py b/cv/detection/ssd/pytorch/iluvatar/config/config_nv_V100x1x8.py deleted file mode 100644 index 281fd970f..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/config_nv_V100x1x8.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = False -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/converter.py b/cv/detection/ssd/pytorch/iluvatar/config/converter.py deleted file mode 100644 index e61209dcb..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/converter.py +++ /dev/null @@ -1,15 +0,0 @@ -import copy -import math -from torch.utils import checkpoint - -from ssd300 import SSD300 - - -def convert_model(model, config): - model_options = { - 'use_nhwc': config.nhwc, - 'pad_input': config.pad_input, - 'bn_group': config.bn_group, - } - model = SSD300(config, config.num_classes, **model_options).cuda() - return model diff --git a/cv/detection/ssd/pytorch/iluvatar/config/environment_variables.sh b/cv/detection/ssd/pytorch/iluvatar/config/environment_variables.sh deleted file mode 100644 index e28aba1a3..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/environment_variables.sh +++ /dev/null @@ -1,10 +0,0 @@ -# ================================================= -# Export variables -# ================================================= - -export CONTAINER_MOUNTS="--gpus all" -NVCC_ARGUMENTS="-U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ --expt-relaxed-constexpr -ftemplate-depth=1024 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_80,code=compute_80" -if [[ "$PYTORCH_BUILD_VERSION" == 1.8* ]]; then - NVCC_ARGUMENTS="${NVCC_ARGUMENTS} -D_PYTORCH18" -fi -export NVCC_ARGUMENTS \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/conv.py b/cv/detection/ssd/pytorch/iluvatar/config/nhwc/conv.py deleted file mode 100644 index 1bdddc757..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/conv.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from torch.nn.modules.conv import _ConvNd - -import SSD._C as C - -import collections -from itertools import repeat - - -def _ntuple(n): - def parse(x): - if isinstance(x, collections.Iterable): - return x - return tuple(repeat(x, n)) - return parse - -_single = _ntuple(1) -_pair = _ntuple(2) -_triple = _ntuple(3) -_quadruple = _ntuple(4) - -class conv2d_NHWC_impl(torch.autograd.Function): - - @staticmethod - def forward(ctx, x, w, bias=None, stride=(1,1), padding=(0,0), dilation=(1,1), groups=1): - # Save constants for bprop - ctx.stride = stride - ctx.padding = padding - ctx.dilation = dilation - ctx.groups = groups - ctx.need_bias_grad = bias is not None - ctx.save_for_backward(x, w) - - if bias is None: - return C.cudnn_convolution_nhwc(x, w, - padding, stride, dilation, - groups, - torch.backends.cudnn.benchmark, False) - else: - return C.cudnn_convolution_with_bias_nhwc(x, w, bias, - padding, stride, dilation, - groups, - torch.backends.cudnn.benchmark, False) - - @staticmethod - def backward(ctx, grad_y): - x, w = ctx.saved_variables - - if ctx.need_bias_grad: - dx, dw, db = C.cudnn_convolution_backward_with_bias_nhwc(x, grad_y, w, - ctx.padding, ctx.stride, ctx.dilation, ctx.groups, - torch.backends.cudnn.benchmark, False, - list(ctx.needs_input_grad[0:3])) - if ctx.needs_input_grad[0]: - return dx, dw, db, None, None, None, None - else: - return None, dw, db, None, None, None, None - else: - dx, dw = C.cudnn_convolution_backward_nhwc(x, grad_y, w, - ctx.padding, ctx.stride, ctx.dilation, ctx.groups, - torch.backends.cudnn.benchmark, False, - list(ctx.needs_input_grad[0:2])) - if ctx.needs_input_grad[0]: - return dx, dw, None, None, None, None, None - else: - return None, dw, None, None, None, None, None - -class Conv2d_NHWC(_ConvNd): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - dilation = _pair(dilation) - super(Conv2d_NHWC, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - False, _pair(0), groups, bias=bias, padding_mode='zeros') - - # permute filters - self.weight = torch.nn.Parameter(self.weight.permute(0, 2, 3, 1).contiguous()) - - def forward(self, x): - return conv2d_NHWC_impl.apply(x, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups) - diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_bn_cudnn.py b/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_bn_cudnn.py deleted file mode 100644 index f74edfbe3..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_bn_cudnn.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch - -try: - from .batch_norm import * -except: - from batch_norm import * - - -torch.backends.cudnn.benchmark=True - -N = 64 -C = 256 -H = 56 -W = 56 - -fuse_relu = True -fuse_add = True - -bn = BatchNorm2d_NHWC(C, fuse_relu=fuse_relu).cuda() - -# Make a NCHW copy of everything -bn_nchw = torch.nn.BatchNorm2d(C).cuda() -# Need to set this for consistency -bn_nchw.eps = 1e-4 - -# copy the parameters -bn_nchw.weight = torch.nn.Parameter(bn.weight.clone()) -bn_nchw.bias = torch.nn.Parameter(bn.bias.clone()) - -# generate random inputs -x = torch.randn(N, C, H, W).cuda().half() -z = torch.randn(N, C, H, W).cuda().half() - -# Copy input tensors -x_copy = x.clone() -z_copy = z.clone() -x_copy.requires_grad = True -z_copy.requires_grad = True - - -# Transpose -> NHWC -x = x.permute(0,2,3,1).contiguous() -z = z.permute(0,2,3,1).contiguous() -x.requires_grad = True -z.requires_grad = True - -# generate a random signal to backprop, copy -g0 = torch.randn(N, H, W, C).cuda().half() -g0_nchw = g0.clone().permute(0,3,1,2).contiguous() - -# Run NHWC fwd -out = bn(x, z if fuse_add else None) -# Run NHWC bwd -out.backward(g0) - -# Run NCHW fwd -out_nchw = bn_nchw(x_copy) -if fuse_add: - out_nchw += z_copy -if fuse_relu: - out_nchw = out_nchw.relu() - -# Run NCHW bwd -out_nchw.backward(g0_nchw) - -# Permute NHWC results -> NCHW for comparison -out_nhwc = out.permute(0,3,1,2) -x_grad_nhwc = x.grad.permute(0,3,1,2) -if fuse_add: - z_grad_nhwc = z.grad.permute(0,3,1,2) - -atol = 1e-5 -rtol = 1e-3 -print('X: ', torch.allclose(out_nhwc, out_nchw, atol=atol, rtol=rtol)) -print('dS: ', torch.allclose(bn.weight.grad, bn_nchw.weight.grad, atol=atol, rtol=rtol)) -print('dB: ', torch.allclose(bn.bias.grad, bn_nchw.bias.grad, atol=atol, rtol=rtol)) -print('dX: ', torch.allclose(x_grad_nhwc, x_copy.grad, atol=atol, rtol=rtol)) -if fuse_add: - print('dZ: ', torch.allclose(z_grad_nhwc, z_copy.grad, atol=atol, rtol=rtol)) diff --git a/cv/detection/ssd/pytorch/iluvatar/config/ssd300.py b/cv/detection/ssd/pytorch/iluvatar/config/ssd300.py deleted file mode 100644 index 7e550beb4..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/ssd300.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.nn as nn -# from base_model import L2Norm, ResNet - -try: - from .resnet import ResNet, resnet34 - from .nhwc.conv import Conv2d_NHWC -except: - from resnet import ResNet, resnet34 - from nhwc.conv import Conv2d_NHWC - - -class SSD300(nn.Module): - """ - Build a SSD module to take 300x300 image input, - and output 8732 per class bounding boxes - - label_num: number of classes (including background 0) - """ - def __init__(self, args, label_num, use_nhwc=False, pad_input=False, bn_group=1, pretrained=True): - - super(SSD300, self).__init__() - - self.label_num = label_num - self.use_nhwc = use_nhwc - self.pad_input = pad_input - self.bn_group = bn_group - - # Explicitly RN34 all the time - out_channels = 256 - out_size = 38 - self.out_chan = [out_channels, 512, 512, 256, 256, 256] - - # self.model = ResNet(self.use_nhwc, self.pad_input, self.bn_group) - - rn_args = { - 'bn_group' : bn_group, - 'pad_input' : pad_input, - 'nhwc' : use_nhwc, - 'pretrained' : pretrained, - 'ssd_mods' : True, - } - - self.model = resnet34(**rn_args) - - self._build_additional_features() - - padding_channels_to = 8 - self._build_multibox_heads(use_nhwc, padding_channels_to) - - # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 - - # intitalize all weights - with torch.no_grad(): - self._init_weights() - - def _build_multibox_heads(self, use_nhwc, padding_channels_to=8): - self.num_defaults = [4, 6, 6, 6, 4, 4] - self.mbox = [] - self.padding_amounts = [] - - if self.use_nhwc: - conv_fn = Conv2d_NHWC - else: - conv_fn = nn.Conv2d - # Multiple to pad channels to - for nd, oc in zip(self.num_defaults, self.out_chan): - # Horizontally fuse loc and conf convolutions - my_num_channels = nd*(4+self.label_num) - if self.use_nhwc: - # Want to manually pad to get HMMA kernels in NHWC case - padding_amount = padding_channels_to - (my_num_channels % padding_channels_to) - else: - padding_amount = 0 - self.padding_amounts.append(padding_amount) - self.mbox.append(conv_fn(oc, my_num_channels + padding_amount, kernel_size=3, padding=1)) - - self.mbox = nn.ModuleList(self.mbox) - - - """ - Output size from RN34 is always 38x38 - """ - def _build_additional_features(self): - self.additional_blocks = [] - - if self.use_nhwc: - conv_fn = Conv2d_NHWC - else: - conv_fn = nn.Conv2d - - def build_block(input_channels, inter_channels, out_channels, stride=1, pad=0): - return nn.Sequential( - conv_fn(input_channels, inter_channels, kernel_size=1), - nn.ReLU(inplace=True), - conv_fn(inter_channels, out_channels, kernel_size=3, stride=stride, padding=pad), - nn.ReLU(inplace=True) - ) - - strides = [2, 2, 2, 1, 1] - intermediates = [256, 256, 128, 128, 128] - paddings = [1, 1, 1, 0, 0] - - for i, im, o, stride, pad in zip(self.out_chan[:-1], intermediates, self.out_chan[1:], strides, paddings): - self.additional_blocks.append(build_block(i, im, o, stride=stride, pad=pad)) - - self.additional_blocks = nn.ModuleList(self.additional_blocks) - - def _init_additional_weights(self): - addn_blocks = [*self.additional_blocks] - # Need to handle additional blocks differently in NHWC case due to xavier initialization - for layer in addn_blocks: - for param in layer.parameters(): - if param.dim() > 1: - if self.use_nhwc: - # xavier_uniform relies on fan-in/-out, so need to use NCHW here to get - # correct values (K, R) instead of the correct (K, C) - nchw_param_data = param.data.permute(0, 3, 1, 2).contiguous() - nn.init.xavier_uniform_(nchw_param_data) - # Now permute correctly-initialized param back to NHWC - param.data.copy_(nchw_param_data.permute(0, 2, 3, 1).contiguous()) - else: - nn.init.xavier_uniform_(param) - - def _init_multibox_weights(self): - layers = [ *self.mbox ] - - for layer, default, padding in zip(layers, self.num_defaults, self.padding_amounts): - for param in layer.parameters(): - if param.dim() > 1 and self.use_nhwc: - # Need to be careful - we're initialising [loc, conf, pad] with - # all 3 needing to be treated separately - conf_channels = default * self.label_num - loc_channels = default * 4 - pad_channels = padding - # Split the parameter into separate parts along K dimension - conf, loc, pad = param.data.split([conf_channels, loc_channels, pad_channels], dim=0) - - # Padding should be zero - pad_data = torch.zeros_like(pad.data) - - def init_loc_conf(p): - p_data = p.data.permute(0, 3, 1, 2).contiguous() - nn.init.xavier_uniform_(p_data) - p_data = p_data.permute(0, 2, 3, 1).contiguous() - return p_data - - # Location and confidence data - loc_data = init_loc_conf(loc) - conf_data = init_loc_conf(conf) - - # Put the full weight together again along K and copy - param.data.copy_(torch.cat([conf_data, loc_data, pad_data], dim=0)) - elif param.dim() > 1: - nn.init.xavier_uniform_(param) - - def _init_weights(self): - self._init_additional_weights() - self._init_multibox_weights() - - # Shape the classifier to the view of bboxes - def bbox_view(self, src, mbox): - locs = [] - confs = [] - for s, m, num_defaults, pad in zip(src, mbox, self.num_defaults, self.padding_amounts): - mm = m(s) - conf_channels = num_defaults * self.label_num - loc_channels = num_defaults * 4 - - if self.use_nhwc: - conf, loc, _ = mm.split([conf_channels, loc_channels, pad], dim=3) - conf, loc = conf.contiguous(), loc.contiguous() - # We now have unfused [N, H, W, C] - # Layout is a little awkward here. - # Take C = c * d, then we actually have: - # [N, H, W, c*d] - # flatten HW first: - # [N, H, W, c*d] -> [N, HW, c*d] - locs.append( - loc.view(s.size(0), -1, 4 * num_defaults).permute(0, 2, 1).contiguous().view(loc.size(0), 4, -1)) - confs.append( - conf.view(s.size(0), -1, self.label_num * num_defaults).permute(0, 2, 1).contiguous().view(conf.size(0), self.label_num, -1)) - else: - conf, loc = mm.split([conf_channels, loc_channels], dim=1) - conf, loc = conf.contiguous(), loc.contiguous() - # flatten the anchors for this layer - locs.append(loc.view(s.size(0), 4, -1)) - confs.append(conf.view(s.size(0), self.label_num, -1)) - - cat_dim = 2 - locs, confs = torch.cat(locs, cat_dim), torch.cat(confs, cat_dim) - - return locs, confs - - def forward(self, data): - - layers = self.model(data) - - # last result from network goes into additional blocks - x = layers - # If necessary, transpose back to NCHW - additional_results = [] - for i, l in enumerate(self.additional_blocks): - x = l(x) - additional_results.append(x) - - # do we need the l2norm on the first result? - src = [layers, *additional_results] - # Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4 - - locs, confs = self.bbox_view(src, self.mbox) - - # For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results - return locs, confs - diff --git a/cv/detection/ssd/pytorch/iluvatar/config/training_event.py b/cv/detection/ssd/pytorch/iluvatar/config/training_event.py deleted file mode 100644 index 3fdb8fba4..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/config/training_event.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -from typing import Tuple - -import torch -import apex -from torch.cuda.amp import GradScaler -from torch.optim import Optimizer - -from train.event.base import BaseTrainingEventInterface -from train.event.base import BatchType, SSD_MODEL -from train.training_state import TrainingState - -from converter import convert_model - - -class ApexTrainingEvent(BaseTrainingEventInterface): - - def __init__(self, config): - super(ApexTrainingEvent, self).__init__(config) - self.model = None - self.optimizer = None - self.overflow_buf = None - - def save_checkpoint(self, path: str, training_state: TrainingState): - torch.save({ - "model": self.model.state_dict(), - "optimizer": self.optimizer.state_dict(), - "amp": apex.amp.state_dict(), - "master params": list(apex.amp.master_params(self.optimizer)), - "epoch": training_state.epoch, - "iter_num": training_state.iter_num, - }, "{}/epoch{}_{}.pt".format(path, training_state.epoch, round(training_state.eval_ap, 5))) - - def load_checkpoint(self, checkpoint): - self.model.load_state_dict(checkpoint["model"], strict=True) - self.optimizer.load_state_dict(checkpoint["optimizer"]) - self.config.iteration = checkpoint["iter_num"] - self.config.epoch = checkpoint["epoch"] - if checkpoint.get("amp", None): - apex.amp.load_state_dict(checkpoint["amp"]) - if checkpoint.get("master params", None): - for param, saved_param in zip(apex.amp.master_params(self.optimizer), checkpoint["master params"]): - param.data.copy_(saved_param.data) - - def on_init_start(self): - pass - - def convert_model(self, model: SSD_MODEL) -> SSD_MODEL: - self.model = convert_model(model, self.config) - return self.model - - def create_optimizer(self, model: SSD_MODEL) -> Optimizer: - config = self.config - base_lr = 2.5e-3 - requested_lr_multiplier = config.learning_rate / base_lr - adjusted_multiplier = max(1, round(requested_lr_multiplier * config.train_batch_size * config.n_gpu / 32)) - - current_lr = base_lr * adjusted_multiplier - current_weight_decay = config.weight_decay_rate - - self.optimizer = apex.optimizers.FusedSGD(model.parameters(), - lr=current_lr, - momentum=0.9, - weight_decay=current_weight_decay) - return self.optimizer - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer) -> Tuple[SSD_MODEL, Optimizer]: - self.model, self.optimizer = apex.amp.initialize(model, optimizer, opt_level="O{}".format(self.config.opt_level), loss_scale=128.) - return self.model, self.optimizer - - def model_to_ddp(self, model: SSD_MODEL) -> SSD_MODEL: - config = self.config - if config.distributed: - if config.delay_allreduce: - print(config.local_rank, "Delaying allreduces to the end of backward()") - self.model = apex.parallel.DistributedDataParallel(model, - gradient_predivide_factor=config.n_gpu / 8.0, - delay_allreduce=config.delay_allreduce, - retain_allreduce_buffers=config.fp16) - else: - self.model = model - return self.model - - def on_step_begin(self, step: int): - pass - - def on_step_end(self, step: int): - pass - - def on_backward(self, step: int, loss: torch.Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): - with apex.amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - update_step = step % self.config.gradient_accumulation_steps == 0 - if update_step: - self.update_model_params(optimizer, grad_scaler) - - def update_model_params(self, optimizer: Optimizer, grad_scaler: GradScaler=None): - optimizer.step() - for param in self.model.parameters(): - param.grad = None - - - diff --git a/cv/detection/ssd/pytorch/iluvatar/reset.sh b/cv/detection/ssd/pytorch/iluvatar/reset.sh deleted file mode 100644 index 0380b4762..000000000 --- a/cv/detection/ssd/pytorch/iluvatar/reset.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# clean cache for host memory -echo 3 > /proc/sys/vm/drop_caches - -# reset BI -ixsmi -r - - diff --git a/cv/detection/ssd/pytorch/install_ssd.sh b/cv/detection/ssd/pytorch/install_ssd.sh new file mode 100644 index 000000000..a65d5c927 --- /dev/null +++ b/cv/detection/ssd/pytorch/install_ssd.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +TARGET_DIR=${TARGET_DIR:-} + +PYTHON_PATH=$(which python3) +PYTHON_DIST_PATH=${TARGET_DIR}/lib/python3/dist-packages + +PKG_DIR="build_pip" +PKG_NAME="ssd" + +if [[ ! -d ${PKG_DIR} ]]; then + echo "ERROR: Package directory ${PKG_DIR} doesn't exist" + exit 1 +fi + +latest_pkg="$(ls -t ${PKG_DIR} | grep -i ${PKG_NAME} | head -1)" +if [[ "${latest_pkg}" == "" ]]; then + echo "ERROR: Cannot find latest ${PKG_NAME} package" + exit 1 +else + echo "INFO: Found latest package ${latest_pkg} in directory ${PKG_DIR}" +fi + +if [[ "${TARGET_DIR}" != "" ]]; then + ${PYTHON_PATH} -m pip install --upgrade --no-deps -t ${PYTHON_DIST_PATH} ${PKG_DIR}/${latest_pkg} || exit + echo "SSD-0.1 installed in ${PYTHON_DIST_PATH}; please add it to your PYTHONPATH." +else + ${PYTHON_PATH} -m pip uninstall ${PKG_NAME} -y + ${PYTHON_PATH} -m pip install --no-deps ${PKG_DIR}/${latest_pkg} || exit +fi + +# Return 0 status if all finished +exit 0 diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_conv.py b/cv/detection/ssd/pytorch/master_params.py similarity index 33% rename from cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_conv.py rename to cv/detection/ssd/pytorch/master_params.py index 2ccf96905..a7defbf39 100644 --- a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_conv.py +++ b/cv/detection/ssd/pytorch/master_params.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,27 @@ # limitations under the License. import torch +import apex_C -try: - from .conv import * -except: - from conv import * +def check_type_split(buckets): + for bucket in buckets: + this_type = bucket[0].type() + for param in bucket: + if param.type() != this_type: + raise ValueError("Each bucket must contain only params of the same type.") -torch.backends.cudnn.benchmark=True -N = 64 -C = 32 -K = 256 -H = 56 -W = 56 +def create_flat_master(model_buckets): + # Ideally, we'd like to flatten the model params as well, and reset the float params' .data + # attributes to point directly into the flattened master buffers. However, my version that does + # so is yielding CUDNN_STATUS_BAD_PARAM errors when running with distributed and nhwc. + # I ended up making the safe choice of not altering what the params' .data members point to. + check_type_split(model_buckets) -conv = Conv2d_NHWC(C, K, 1).cuda().half() -weight_orig = conv.weight.clone() -conv.weight = torch.nn.Parameter(conv.weight.permute(0,2,3,1).contiguous()) + flat_master_buckets = [apex_C.flatten([p.detach().clone().float() for p in model_bucket]) + for model_bucket in model_buckets] -# Make a NCHW copy of everything -conv_nchw = torch.nn.Conv2d(C, K, 1, bias=False).cuda().half() -conv_nchw.weight = torch.nn.Parameter(weight_orig) + for flat_master in flat_master_buckets: + flat_master.requires_grad_() -x = torch.randn(N, C, H, W).cuda().half() - -# Copy input tensor -x_copy = x.clone() -x_copy.requires_grad = True - -# Transpose -> NHWC -x = x.permute(0,2,3,1).contiguous() -x.requires_grad = True - -g0 = torch.randn(N, H, W, K).cuda().half() -g0_nchw = g0.clone().permute(0,3,1,2).contiguous() - -out = conv(x) -out = out.relu_() -out.backward(g0) - -out_nchw = conv_nchw(x_copy) -out_nchw = out_nchw.relu_() - -out_nchw.backward(g0_nchw) - -out_nhwc = out.permute(0,3,1,2) -#print(out_nhwc) -#print(out_nchw) - -print(torch.allclose(out_nhwc, out_nchw, atol=1e-5, rtol=1e-3)) -print(torch.allclose(conv.weight.grad.permute(0,3,1,2), conv_nchw.weight.grad, atol=1e-5, rtol=1e-3)) + return flat_master_buckets diff --git a/cv/detection/ssd/pytorch/mlperf_log_utils.py b/cv/detection/ssd/pytorch/mlperf_log_utils.py new file mode 100644 index 000000000..be94d97ce --- /dev/null +++ b/cv/detection/ssd/pytorch/mlperf_log_utils.py @@ -0,0 +1,34 @@ +import collections +import os +import subprocess + +from mlperf_logging.mllog import constants +from mlperf_logger import log_event, configure_logger + + +def mlperf_submission_log(benchmark): + + num_nodes = os.environ.get('SLURM_JOB_NUM_NODES', 1) + + configure_logger(benchmark) + + log_event( + key=constants.SUBMISSION_BENCHMARK, + value=benchmark, + ) + + log_event( + key=constants.SUBMISSION_ORG, + value='NVIDIA') + + log_event( + key=constants.SUBMISSION_DIVISION, + value='closed') + + log_event( + key=constants.SUBMISSION_STATUS, + value='onprem') + + log_event( + key=constants.SUBMISSION_PLATFORM, + value=f'{num_nodes}xSUBMISSION_PLATFORM_PLACEHOLDER') diff --git a/cv/detection/ssd/pytorch/mlperf_logger.py b/cv/detection/ssd/pytorch/mlperf_logger.py new file mode 100644 index 000000000..3a7f22d4f --- /dev/null +++ b/cv/detection/ssd/pytorch/mlperf_logger.py @@ -0,0 +1,101 @@ +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import os +import subprocess +import torch +import numpy as np +from mlperf_logging import mllog +import random + +mllogger = mllog.get_mllogger() + +def log_start(*args, **kwargs): + _log_print(mllogger.start, *args, **kwargs) +def log_end(*args, **kwargs): + _log_print(mllogger.end, *args, **kwargs) +def log_event(*args, **kwargs): + _log_print(mllogger.event, *args, **kwargs) +def _log_print(logger, *args, **kwargs): + """ + Wrapper for MLPerf compliance logging calls. + All arguments but 'log_all_ranks' are passed to + mlperf_logging.mllog. + If 'log_all_ranks' is set to True then all distributed workers will print + logging message, if set to False then only worker with rank=0 will print + the message. + """ + if 'stack_offset' not in kwargs: + kwargs['stack_offset'] = 3 + if 'value' not in kwargs: + kwargs['value'] = None + + if kwargs.pop('log_all_ranks', False): + log = True + else: + log = (get_rank() == 0) + + if log: + logger(*args, **kwargs) + + +def configure_logger(benchmark): + mllog.config(filename=os.path.join(os.path.dirname(os.path.abspath(__file__)), f'{benchmark}.log')) + mllogger = mllog.get_mllogger() + mllogger.logger.propagate = False + + +def barrier(): + """ + Works as a temporary distributed barrier, currently pytorch + doesn't implement barrier for NCCL backend. + Calls all_reduce on dummy tensor and synchronizes with GPU. + """ + if torch.distributed.is_initialized(): + torch.distributed.all_reduce(torch.cuda.FloatTensor(1)) + torch.cuda.synchronize() + + +def get_rank(): + """ + Gets distributed rank or returns zero if distributed is not initialized. + """ + if torch.distributed.is_initialized(): + rank = torch.distributed.get_rank() + else: + rank = 0 + return rank + +def broadcast_seeds(seed, device): + if torch.distributed.is_initialized(): + seeds_tensor = torch.LongTensor([seed]).to(device) + torch.distributed.broadcast(seeds_tensor, 0) + seed = seeds_tensor.item() + return seed + +def set_seeds(args): + torch.cuda.set_device(args.local_rank) + device = torch.device('cuda') + + # make sure that all workers has the same master seed + log_event(key=mllog.constants.SEED, value=args.seed) + args.seed = broadcast_seeds(args.seed, device) + + local_seed = (args.seed + get_rank()) % 2**32 + print(get_rank(), "Using seed = {}".format(local_seed)) + random.seed(local_seed) + torch.manual_seed(local_seed) + np.random.seed(seed=local_seed) + return local_seed diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/batch_norm.py b/cv/detection/ssd/pytorch/nhwc/batch_norm.py similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/config/nhwc/batch_norm.py rename to cv/detection/ssd/pytorch/nhwc/batch_norm.py diff --git a/cv/detection/ssd/pytorch/nhwc/cifar10_nhwc.py b/cv/detection/ssd/pytorch/nhwc/cifar10_nhwc.py new file mode 100644 index 000000000..e3b24c68b --- /dev/null +++ b/cv/detection/ssd/pytorch/nhwc/cifar10_nhwc.py @@ -0,0 +1,229 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import torch +import torchvision +import torchvision.transforms as transforms +from tqdm import tqdm +import math +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +import matplotlib.pyplot as plt +import pylab +import numpy as np +from torch.autograd import Variable + +from argparse import ArgumentParser +from apex.fp16_utils import * +from apex.parallel import DistributedDataParallel as DDP + +from vgg_nhwc import VggBN_NHWC +from resnet_nhwc_cifar10 import * + +def parse_args(): + parser = ArgumentParser(description="NHWC CIFAR10 test") + parser.add_argument('--local_rank', '--local-rank', default=0, type=int, + help='Used for multi-process training. Can either be manually set ' + + 'or automatically set by using \'python -m multiproc\'.') + parser.add_argument('--net', type=str, choices=['vgg', 'rn18', 'rn34'], default='vgg') + + return parser.parse_args() + +def train_and_test(args): + # Setup multi-GPU if necessary + args.distributed = False + args.world_size = 1 + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + args.world_size = int(os.environ['WORLD_SIZE']) + + if args.distributed: + torch.cuda.set_device(args.local_rank) + + torch.distributed.init_process_group(backend='nccl', + init_method='env://') + + + torch.manual_seed(0) + torch.cuda.manual_seed(0) + + batch_size = 32 + learning_rate = 0.01 + learning_rate_decay = 0.0005 + momentum = 0.9 + epoch_step = 25 + max_epoch = 300 + + transform_train = transforms.Compose( + [transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + transform_test = transforms.Compose( + [transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + trainset = torchvision.datasets.CIFAR10(root='./data', train=True, + download=True, transform=transform_train) + testset = torchvision.datasets.CIFAR10(root='./data', train=False, + download=True, transform=transform_test) + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(trainset) + val_sampler = torch.utils.data.distributed.DistributedSampler(testset) + else: + train_sampler = None + val_sampler = None + + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, + shuffle=train_sampler is None, + sampler=train_sampler, num_workers=4) + + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, + shuffle=val_sampler is None, + sampler=val_sampler, num_workers=4) + + save_dir = "./save" + if not os.path.exists(save_dir) and args.local_rank == 0: + os.mkdir(save_dir) + + + def weights_init(m): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2./n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + if args.net == 'vgg': + net = VggBN_NHWC() + elif args.net == 'rn18': + net = ResNet18_NHWC() + elif args.net == 'rn34': + net = ResNet34_NHWC() + + net = network_to_half(net.cuda()) + net.apply(weights_init) + net.cuda() + + + criterion = nn.CrossEntropyLoss() + base_lr = learning_rate * args.world_size + optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=0.9, weight_decay=learning_rate_decay) + scheduler = lr_scheduler.StepLR(optimizer, step_size=epoch_step, gamma=0.5) + optimizer = FP16_Optimizer(optimizer, static_loss_scale=128.) + + if args.distributed: + net = DDP(net) + + test_accuracies = np.zeros(max_epoch) + for epoch in range(max_epoch): # loop over the dataset multiple times + #pbar = tqdm(trainloader) + #pbar.mininterval = 1 # update the processing bar at least 1 second + + """ + Initial Check + """ + """ + net.eval() + + if epoch == 0: + print('\033[0;31mInitial Check: \033[0m') + running_loss, correct, total = 0., 0., 0. + for i, data in enumerate(testloader, 0): + images, labels = data + images, labels = Variable(images.cuda()), Variable(labels.cuda()) + images = images.half().permute(0, 2, 3, 1).contiguous() + outputs = net(images) + loss = criterion(outputs, labels) + running_loss = running_loss * (i/(i+1.)) + loss.data[0] * (1./(i+1.) ) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels.data).sum() + print('Loss on the test images: %f ...... should be 2.3' % running_loss) + print('Accuracy on the test images: %f %% ...... should be 10%%' % (100. * correct / total)) + """ + + """ + Training ... + """ + net.train() + + running_loss, correct, total = 0., 0., 0. + scheduler.step() + + for i, data in enumerate(trainloader, 0): + # get the inputs + inputs, labels = data + # wrap them in Variable + inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda()) + inputs = inputs.half().permute(0, 2, 3, 1).contiguous() + inputs.require_grad = False + labels.require_grad = False + # zero the parameter gradients + optimizer.zero_grad() + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + optimizer.backward(loss) + optimizer.step() + + # update statistics + running_loss = running_loss * (i/(i+1.)) + loss.data[0] * (1./(i+1.) ) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels.data).sum() + print('\033[0;32m Statistics on epoch :%d learning rate: %f\033[0m' %(epoch, scheduler.get_lr()[0])) + print('Train Loss : %f Train Accuracy: %f %%' % (running_loss, 100. * correct / total)) + + """ + Testing ... + """ + net.eval() + + correct, total = 0., 0. + for data in testloader: + images, labels = data + images = Variable(images.cuda()).half().permute(0, 2, 3, 1).contiguous() + outputs = net(images) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels.cuda()).sum() + + print('[{}] {} / {} correct'.format(args.local_rank, correct, total)) + print('Test Accuracy: \033[1;33m%f %%\033[0m' % (100. * correct / total)) + test_accuracies[epoch] = 100. * correct / total + + """ + Saving model and accuracies, and ploting + """ + #np.save('./save/accuracies.npy', test_accuracies) + #torch.save(net.state_dict(), './save/model.%d.pkl' %epoch) + + #plt.figure() + #pylab.xlim(0, max_epoch + 1) + #pylab.ylim(0, 100) + #plt.plot(range(1, max_epoch +1), test_accuracies) + #plt.savefig('./save/accuracies.png') + #plt.close() + +if __name__ == "__main__": + torch.backends.cudnn.benchmark = True + + args = parse_args() + + train_and_test(args) + + diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/conv.py b/cv/detection/ssd/pytorch/nhwc/conv.py similarity index 92% rename from cv/detection/ssd/pytorch/nvidia/config/nhwc/conv.py rename to cv/detection/ssd/pytorch/nhwc/conv.py index 1bdddc757..94b81b255 100644 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/conv.py +++ b/cv/detection/ssd/pytorch/nhwc/conv.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import torch from torch.nn.modules.conv import _ConvNd @@ -23,9 +24,14 @@ from itertools import repeat def _ntuple(n): def parse(x): - if isinstance(x, collections.Iterable): - return x - return tuple(repeat(x, n)) + if sys.version_info < (3, 10, 0): + if isinstance(x, collections.Iterable): + return x + return tuple(repeat(x, n)) + else: + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) return parse _single = _ntuple(1) diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/max_pool.py b/cv/detection/ssd/pytorch/nhwc/max_pool.py similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/config/nhwc/max_pool.py rename to cv/detection/ssd/pytorch/nhwc/max_pool.py diff --git a/cv/detection/ssd/pytorch/nhwc/mnist_nhwc.py b/cv/detection/ssd/pytorch/nhwc/mnist_nhwc.py new file mode 100644 index 000000000..f35f04159 --- /dev/null +++ b/cv/detection/ssd/pytorch/nhwc/mnist_nhwc.py @@ -0,0 +1,133 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + +from nhwc.conv import Conv2d_NHWC +from nhwc.max_pool import MaxPool2d_NHWC + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = Conv2d_NHWC(1, 10, kernel_size=5) + self.pool1 = MaxPool2d_NHWC(2) + self.conv2 = Conv2d_NHWC(10, 20, kernel_size=5) + self.pool2 = MaxPool2d_NHWC(2) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(self.pool1(self.conv1(x))) + x = F.relu(self.pool2(self.conv2_drop(self.conv2(x)))) + x = x.permute(0, 3, 1, 2).contiguous() + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + data = data.permute(0, 2, 3, 1).contiguous().half() + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + data = data.permute(0, 2, 3, 1).contiguous().half() + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + +def main(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + args = parser.parse_args() + use_cuda = not args.no_cuda and torch.cuda.is_available() + + torch.manual_seed(args.seed) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.batch_size, shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.test_batch_size, shuffle=True, **kwargs) + + + model = Net().to(device).half() + optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + torch.backends.cudnn.benchmark = True + + for epoch in range(1, args.epochs + 1): + train(args, model, device, train_loader, optimizer, epoch) + test(args, model, device, test_loader) + + +if __name__ == '__main__': + main() diff --git a/cv/detection/ssd/pytorch/nhwc/resnet_nhwc_cifar10.py b/cv/detection/ssd/pytorch/nhwc/resnet_nhwc_cifar10.py new file mode 100644 index 000000000..f6f5c3b2d --- /dev/null +++ b/cv/detection/ssd/pytorch/nhwc/resnet_nhwc_cifar10.py @@ -0,0 +1,135 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +'''ResNet in PyTorch. + +For Pre-activation ResNet, see 'preact_resnet.py'. + +Reference: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + +from conv import Conv2d_NHWC +from batch_norm import BatchNorm2d_NHWC +from max_pool import MaxPool2d_NHWC + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = Conv2d_NHWC(in_planes, planes, kernel_size=3, stride=stride, padding=1) + self.bn1 = BatchNorm2d_NHWC(planes, fuse_relu=True) + self.conv2 = Conv2d_NHWC(planes, planes, kernel_size=3, stride=1, padding=1) + self.bn2 = BatchNorm2d_NHWC(planes, fuse_relu=False) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + Conv2d_NHWC(in_planes, self.expansion*planes, kernel_size=1, stride=stride), + BatchNorm2d_NHWC(self.expansion*planes, fuse_relu=False) + ) + + def forward(self, x): + out = self.bn1(self.conv1(x)) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = Conv2d_NHWC(in_planes, planes, kernel_size=1) + self.bn1 = BatchNorm2d_NHWC(planes, fuse_relu=True) + self.conv2 = Conv2d_NHWC(planes, planes, kernel_size=3, stride=stride, padding=1) + self.bn2 = BatchNorm2d_NHWC(planes, fuse_relu=True) + self.conv3 = Conv2d_NHWC(planes, self.expansion*planes, kernel_size=1) + self.bn3 = BatchNorm2d_NHWC(self.expansion*planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + Conv2d_NHWC(in_planes, self.expansion*planes, kernel_size=1, stride=stride), + BatchNorm2d_NHWC(self.expansion*planes) + ) + + def forward(self, x): + out = self.bn1(self.conv1(x)) + out = self.bn2(self.conv2(out)) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet_NHWC(nn.Module): + def __init__(self, block, num_blocks, num_classes=10): + super(ResNet_NHWC, self).__init__() + self.in_planes = 64 + + self.conv1 = Conv2d_NHWC(3, 64, kernel_size=3, stride=1, padding=1) + self.bn1 = BatchNorm2d_NHWC(64, fuse_relu=True) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.linear = nn.Linear(512*block.expansion, num_classes) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x): + out = self.bn1(self.conv1(x)) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + # Move back to NCHW for final parts + out = out.permute(0, 3, 1, 2).contiguous() + + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + out = self.linear(out) + return out + + +def ResNet18_NHWC(): + return ResNet_NHWC(BasicBlock, [2,2,2,2]) + +def ResNet34_NHWC(): + return ResNet_NHWC(BasicBlock, [3,4,6,3]) + +def ResNet50_NHWC(): + return ResNet_NHWC(Bottleneck, [3,4,6,3]) + +def ResNet101_NHWC(): + return ResNet_NHWC(Bottleneck, [3,4,23,3]) + +def ResNet152(): + return ResNet(Bottleneck, [3,8,36,3]) + diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_bn_cudnn.py b/cv/detection/ssd/pytorch/nhwc/test_bn_cudnn.py similarity index 97% rename from cv/detection/ssd/pytorch/nvidia/config/nhwc/test_bn_cudnn.py rename to cv/detection/ssd/pytorch/nhwc/test_bn_cudnn.py index f74edfbe3..a1350d2e8 100644 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_bn_cudnn.py +++ b/cv/detection/ssd/pytorch/nhwc/test_bn_cudnn.py @@ -12,14 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from conv import * +from batch_norm import * import torch -try: - from .batch_norm import * -except: - from batch_norm import * - - torch.backends.cudnn.benchmark=True N = 64 diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_conv.py b/cv/detection/ssd/pytorch/nhwc/test_conv.py similarity index 96% rename from cv/detection/ssd/pytorch/nvidia/config/nhwc/test_conv.py rename to cv/detection/ssd/pytorch/nhwc/test_conv.py index 2ccf96905..c080ff349 100644 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_conv.py +++ b/cv/detection/ssd/pytorch/nhwc/test_conv.py @@ -12,13 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nhwc.conv import * import torch -try: - from .conv import * -except: - from conv import * - torch.backends.cudnn.benchmark=True N = 64 diff --git a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_max_pool.py b/cv/detection/ssd/pytorch/nhwc/test_max_pool.py similarity index 96% rename from cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_max_pool.py rename to cv/detection/ssd/pytorch/nhwc/test_max_pool.py index 3212b76c1..77fbc6dcc 100644 --- a/cv/detection/ssd/pytorch/iluvatar/config/nhwc/test_max_pool.py +++ b/cv/detection/ssd/pytorch/nhwc/test_max_pool.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nhwc.conv import * +from nhwc.max_pool import * import torch -try: - from .max_pool import * -except: - from max_pool import * - torch.backends.cudnn.benchmark=True N = 64 diff --git a/cv/detection/ssd/pytorch/nvidia/config/Dockerfile b/cv/detection/ssd/pytorch/nvidia/config/Dockerfile deleted file mode 100644 index 77de5e1b4..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:20.06-py3 -FROM ${FROM_IMAGE_NAME} - -# Install dependencies for system configuration logger -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - infiniband-diags \ - pciutils \ - && rm -rf /var/lib/apt/lists/* - -# Configure environment variables -ENV OMP_NUM_THREADS=1 -ENV OPENCV_FOR_THREADS_NUM=1 - diff --git a/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x1.py b/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x1.py deleted file mode 100644 index 584f5ac91..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x1.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = False -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4043767865 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = True -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = True - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x8.py b/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x8.py deleted file mode 100644 index 07f345a48..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/config_V100x1x8.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = True -dali = True -dali_sync = False -dali_cache = -1 -nhwc = True -pad_input = True -jit = True -use_nvjpeg = True - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/nvidia/config/config_nodali_V100x1x8.py b/cv/detection/ssd/pytorch/nvidia/config/config_nodali_V100x1x8.py deleted file mode 100644 index 20d801472..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/config_nodali_V100x1x8.py +++ /dev/null @@ -1,44 +0,0 @@ -from training_event import ApexTrainingEvent - -# 1.Basic Configurations -n_gpu = 1 -distributed = True -dist_backend = "nccl" - -save_checkpoint = False - -seed = 4230048668 -log_freq = 20 - - -# 2.Model Training Configurations -gradient_accumulation_steps = 1 -train_batch_size = 120 -eval_batch_size = 160 -learning_rate = 2.92e-3 -weight_decay_rate = 1.6e-4 -lr_decay_factor = 0.1 -lr_decay_epochs = [44, 55] -warmup = 650 -warmup_factor = 0 -loss_scale = 0.0 - -# 3. Optimizer Configurations -num_workers = 4 -fp16 = True -opt_level = 2 -delay_allreduce = True -bn_group = 1 -fast_nms = True -fast_cj = True -use_coco_ext = True -dali = False -dali_sync = False -dali_cache = 0 -nhwc = False -pad_input = False -jit = False -use_nvjpeg = False - - -training_event = ApexTrainingEvent \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/nvidia/config/converter.py b/cv/detection/ssd/pytorch/nvidia/config/converter.py deleted file mode 100644 index e61209dcb..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/converter.py +++ /dev/null @@ -1,15 +0,0 @@ -import copy -import math -from torch.utils import checkpoint - -from ssd300 import SSD300 - - -def convert_model(model, config): - model_options = { - 'use_nhwc': config.nhwc, - 'pad_input': config.pad_input, - 'bn_group': config.bn_group, - } - model = SSD300(config, config.num_classes, **model_options).cuda() - return model diff --git a/cv/detection/ssd/pytorch/nvidia/config/environment_variables.sh b/cv/detection/ssd/pytorch/nvidia/config/environment_variables.sh deleted file mode 100644 index e28aba1a3..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/environment_variables.sh +++ /dev/null @@ -1,10 +0,0 @@ -# ================================================= -# Export variables -# ================================================= - -export CONTAINER_MOUNTS="--gpus all" -NVCC_ARGUMENTS="-U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ --expt-relaxed-constexpr -ftemplate-depth=1024 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_80,code=compute_80" -if [[ "$PYTORCH_BUILD_VERSION" == 1.8* ]]; then - NVCC_ARGUMENTS="${NVCC_ARGUMENTS} -D_PYTORCH18" -fi -export NVCC_ARGUMENTS \ No newline at end of file diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/batch_norm.py b/cv/detection/ssd/pytorch/nvidia/config/nhwc/batch_norm.py deleted file mode 100644 index b0e0402a9..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/batch_norm.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from torch.nn.modules.batchnorm import _BatchNorm - -import SSD._C as C - -import collections -from itertools import repeat - -class bn_NHWC_impl(torch.autograd.Function): - @staticmethod - def forward(ctx, x, s, b, rm, riv, mom, epsilon, fuse_relu=False, is_train=True, z=None): - if is_train: - ctx.epsilon = epsilon - ctx.momentum = mom - ctx.fuse_relu = fuse_relu - - ctx.fuse_add = False if z is None else True - - if z is not None: - y, save_mean, save_var, reserve = C.bn_add_fwd_nhwc_cudnn(x, z, s, b, rm, riv, mom, epsilon, fuse_relu) - else: - y, save_mean, save_var, reserve = C.bn_fwd_nhwc_cudnn(x, s, b, rm, riv, mom, epsilon, fuse_relu) - - ctx.save_for_backward(x, y, s, b, rm, riv, save_mean, save_var, reserve) - - return y - else: - if z is not None: - return C.bn_add_fwd_eval_nhwc_cudnn(x, z, s, b, rm, riv, mom, epsilon, fuse_relu) - else: - return C.bn_fwd_eval_nhwc_cudnn(x, s, b, rm, riv, mom, epsilon, fuse_relu) - - @staticmethod - def backward(ctx, grad_y): - x, y, s, b, rm, riv, save_mean, save_var, reserve = ctx.saved_variables - epsilon = ctx.epsilon - mom = ctx.momentum - fuse_relu = ctx.fuse_relu - fuse_add = ctx.fuse_add - - if ctx.fuse_add: - dx, dz, dscale, dbias = C.bn_add_bwd_nhwc_cudnn(x, y, grad_y, s, b, rm, riv, save_mean, save_var, reserve, mom, epsilon, fuse_relu) - else: - dx, _, dscale, dbias = C.bn_bwd_nhwc_cudnn(x, y, grad_y, s, b, rm, riv, save_mean, save_var, reserve, mom, epsilon, fuse_relu) - dz = None - - return dx, dscale, dbias, None, None, None, None, None, None, dz - - - -class BatchNorm2d_NHWC(_BatchNorm): - def __init__(self, num_features, fuse_relu=False): - super(BatchNorm2d_NHWC, self).__init__(num_features) - - self.fuse_relu = fuse_relu - - def forward(self, x, z=None): - return bn_NHWC_impl.apply(x, - self.weight, self.bias, - self.running_mean, self.running_var, - self.momentum, - self.eps, self.fuse_relu, self.training, z) - diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/max_pool.py b/cv/detection/ssd/pytorch/nvidia/config/nhwc/max_pool.py deleted file mode 100644 index 90035222f..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/max_pool.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from torch.nn.modules.pooling import MaxPool2d - -import SSD._C as C - -class max_pool_NHWC_impl(torch.autograd.Function): - @staticmethod - def forward(ctx, x, kernel_size, stride, padding, dilation): - ctx.kernel_size = kernel_size - ctx.stride = stride if stride is not None else 0 - ctx.padding = padding - ctx.dilation = dilation - - y = C.max_pool_fwd_nhwc(x, kernel_size, stride, padding, dilation) - - # Need to save y as well - ctx.save_for_backward(x, y) - return y - - @staticmethod - def backward(ctx, y_grad): - x, y = ctx.saved_variables - - kernel = ctx.kernel_size - stride = ctx.stride - padding = ctx.padding - dilation = ctx.dilation - - return C.max_pool_bwd_nhwc(x, - y, - y_grad, - kernel, - stride, - padding, - dilation), None, None, None, None - -class MaxPool2d_NHWC(MaxPool2d): - def __init__(self, kernel_size, stride=None, padding=0): - super(MaxPool2d_NHWC, self).__init__(kernel_size, stride=stride, padding=padding) - - def forward(self, x): - return max_pool_NHWC_impl.apply(x, - self.kernel_size, - self.stride, - self.padding, - self.dilation) diff --git a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_max_pool.py b/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_max_pool.py deleted file mode 100644 index 3212b76c1..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/nhwc/test_max_pool.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch - -try: - from .max_pool import * -except: - from max_pool import * - -torch.backends.cudnn.benchmark=True - -N = 64 -C = 256 -H = 56 -W = 56 - -kernel = 3 -stride = 1 -padding = 1 - -H_out = 1 + (H + 2*padding - kernel) // stride -W_out = 1 + (W + 2*padding - kernel) // stride - -pool = MaxPool2d_NHWC(kernel_size=kernel, stride=stride, padding=padding) - -# Make a NCHW copy of everything -pool_nchw = torch.nn.MaxPool2d(kernel_size=kernel, stride=stride, padding=padding).cuda() - -x = torch.randn(N, C, H, W).cuda().half() - -# Copy input tensor -x_copy = x.clone() -x_copy.requires_grad = True - -# Transpose -> NHWC -x = x.permute(0,2,3,1).contiguous() -x.requires_grad = True - -g0 = torch.randn(N, H_out, W_out, C).cuda().half() -g0_nchw = g0.clone().permute(0,3,1,2).contiguous() - -out = pool(x) -#out = out.relu_() -out.backward(g0) - -out_nchw = pool_nchw(x_copy) -#out_nchw = out_nchw.relu_() - -out_nchw.backward(g0_nchw) - -out_nhwc = out.permute(0,3,1,2) -#print(out_nhwc) -#print(out_nchw) - -print(torch.allclose(out_nhwc, out_nchw, atol=1e-5, rtol=1e-3)) -print(torch.allclose(x.grad.permute(0,3,1,2), x_copy.grad, atol=1e-5, rtol=1e-3)) - diff --git a/cv/detection/ssd/pytorch/nvidia/config/resnet.py b/cv/detection/ssd/pytorch/nvidia/config/resnet.py deleted file mode 100644 index f81f344c4..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/resnet.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch # for torch.cat and torch.zeros -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - -# Group batch norm -from apex.parallel import SyncBatchNorm as gbn -# Persistent group BN for NHWC case -import apex.parallel -# from apex.contrib.groupbn.batch_norm import BatchNorm2d_NHWC as gbn_persistent - - -try: - from .nhwc.batch_norm import BatchNorm2d_NHWC as gbn_persistent - from .nhwc.conv import Conv2d_NHWC - from .nhwc.max_pool import MaxPool2d_NHWC -except: - from nhwc.batch_norm import BatchNorm2d_NHWC as gbn_persistent - from nhwc.conv import Conv2d_NHWC - from nhwc.max_pool import MaxPool2d_NHWC - - -__all__ = ['resnet'] - -model_urls = { - 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', -} - - -class Layers_NCHW: - Conv2d = nn.Conv2d - MaxPool = nn.MaxPool2d - BnAddRelu = None # will be assigned at construction - - def __init__(self, bn_group, **kwargs): - super(Layers_NCHW, self).__init__() - self.nhwc = False - self.bn_group = bn_group - - if (bn_group > 1): - bn_base = gbn - else: - bn_base = nn.BatchNorm2d - - class BnAddRelu_(bn_base): - def __init__(self, planes, fuse_relu=False, bn_group=1): - if (bn_group > 1): - super(BnAddRelu_, self).__init__( - planes, - process_group=apex.parallel.create_syncbn_process_group(bn_group)) - else: - super(BnAddRelu_, self).__init__(planes) - - self.fuse_relu_flag = fuse_relu - - def forward(self, x, z=None): - out = super().forward(x) - if z is not None: - out = out.add_(z) - if self.fuse_relu_flag: - out = out.relu_() - return out - - # this is still Layers_NCHW::__init__ - self.BnAddRelu = BnAddRelu_ - - def build_bn(self, planes, fuse_relu=False): - return self.BnAddRelu(planes, fuse_relu, self.bn_group) - - -class Layers_NHWC: - Conv2d = Conv2d_NHWC - MaxPool = MaxPool2d_NHWC - - class BnAddRelu(gbn_persistent): - def __init__(self, planes, fuse_relu=False, bn_group=1): - super(Layers_NHWC.BnAddRelu, self).__init__(planes, - fuse_relu) - - def __init__(self, bn_group, **kwargs): - super(Layers_NHWC, self).__init__() - self.nhwc = True - self.bn_group = bn_group - - def build_bn(self, planes, fuse_relu): - return self.BnAddRelu(planes, fuse_relu, self.bn_group) - - - -def conv1x1(layer_types, in_planes, out_planes, stride=1): - """1x1 convolution""" - return layer_types.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, - bias=False) - -def conv3x3(layer_types, in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return layer_types.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, layerImpls, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(layerImpls, inplanes, planes, stride=stride) - self.bn1 = layerImpls.build_bn(planes, fuse_relu=True) - self.conv2 = conv3x3(layerImpls, planes, planes) - self.bn2 = layerImpls.build_bn(planes, fuse_relu=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - if self.downsample is not None: - residual = self.downsample(x) - - out = self.conv1(x) - out = self.bn1(out) - - out = self.conv2(out) - out = self.bn2(out, residual) - - return out - -class ResNet(nn.Module): - - def __init__(self, layerImpls, block, layers, num_classes=1000, - pad_input=False, ssd_mods=False, use_nhwc=False, - bn_group=1): - self.inplanes = 64 - super(ResNet, self).__init__() - if pad_input: - input_channels = 4 - else: - input_channels = 3 - self.conv1 = layerImpls.Conv2d(input_channels, 64, kernel_size=7, stride=2, - padding=3, bias=False) - self.bn1 = layerImpls.build_bn(64, fuse_relu=True) - self.maxpool = layerImpls.MaxPool(kernel_size=3, stride=2, padding=1) - - # Add conv{2,3,4} - self.layer1 = self._make_layer(layerImpls, block, 64, layers[0]) - self.layer2 = self._make_layer(layerImpls, block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(layerImpls, block, 256, layers[2], stride=1) - - # FIXME! This (a) fails for nhwc, and (b) is irrelevant if the user is - # also loading pretrained data (which we don't know about here, but - # know about in the caller (the "resnet()" function below). - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, layerImpls, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - layerImpls.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - layerImpls.build_bn(planes * block.expansion, fuse_relu=False), - ) - - layers = [] - layers.append(block(layerImpls, self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(layerImpls, self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.classifier(x) - - return x - -def _transpose_state(state, pad_input=False): - for k in state.keys(): - if len(state[k].shape) == 4: - if pad_input and "conv1.weight" in k and not 'layer' in k: - s = state[k].shape - state[k] = torch.cat([state[k], torch.zeros([s[0], 1, s[2], s[3]])], dim=1) - state[k] = state[k].permute(0, 2, 3, 1).contiguous() - return state - -def resnet34(pretrained=False, nhwc=False, ssd_mods=False, **kwargs): - """Constructs a ResNet model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - if nhwc: - layerImpls = Layers_NHWC(**kwargs) - else: - layerImpls = Layers_NCHW(**kwargs) - - block = BasicBlock - layer_list = [3, 4, 6, 3] - model = ResNet(layerImpls, block, layer_list, ssd_mods=ssd_mods, use_nhwc=nhwc, **kwargs) - - if pretrained: - orig_state_dict = model_zoo.load_url(model_urls['resnet34']) - - # Modify the state dict to remove conv5 / layer4 - state_dict = {k:orig_state_dict[k] for k in orig_state_dict if (not k.startswith('layer4') and not k.startswith('fc'))} - - pad_input = kwargs.get('pad_input', False) - if nhwc: - state_dict = _transpose_state(state_dict, pad_input) - - model.load_state_dict(state_dict) - return nn.Sequential(model.conv1, model.bn1, model.maxpool, model.layer1, model.layer2, model.layer3) diff --git a/cv/detection/ssd/pytorch/nvidia/config/training_event.py b/cv/detection/ssd/pytorch/nvidia/config/training_event.py deleted file mode 100644 index 3fdb8fba4..000000000 --- a/cv/detection/ssd/pytorch/nvidia/config/training_event.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -from typing import Tuple - -import torch -import apex -from torch.cuda.amp import GradScaler -from torch.optim import Optimizer - -from train.event.base import BaseTrainingEventInterface -from train.event.base import BatchType, SSD_MODEL -from train.training_state import TrainingState - -from converter import convert_model - - -class ApexTrainingEvent(BaseTrainingEventInterface): - - def __init__(self, config): - super(ApexTrainingEvent, self).__init__(config) - self.model = None - self.optimizer = None - self.overflow_buf = None - - def save_checkpoint(self, path: str, training_state: TrainingState): - torch.save({ - "model": self.model.state_dict(), - "optimizer": self.optimizer.state_dict(), - "amp": apex.amp.state_dict(), - "master params": list(apex.amp.master_params(self.optimizer)), - "epoch": training_state.epoch, - "iter_num": training_state.iter_num, - }, "{}/epoch{}_{}.pt".format(path, training_state.epoch, round(training_state.eval_ap, 5))) - - def load_checkpoint(self, checkpoint): - self.model.load_state_dict(checkpoint["model"], strict=True) - self.optimizer.load_state_dict(checkpoint["optimizer"]) - self.config.iteration = checkpoint["iter_num"] - self.config.epoch = checkpoint["epoch"] - if checkpoint.get("amp", None): - apex.amp.load_state_dict(checkpoint["amp"]) - if checkpoint.get("master params", None): - for param, saved_param in zip(apex.amp.master_params(self.optimizer), checkpoint["master params"]): - param.data.copy_(saved_param.data) - - def on_init_start(self): - pass - - def convert_model(self, model: SSD_MODEL) -> SSD_MODEL: - self.model = convert_model(model, self.config) - return self.model - - def create_optimizer(self, model: SSD_MODEL) -> Optimizer: - config = self.config - base_lr = 2.5e-3 - requested_lr_multiplier = config.learning_rate / base_lr - adjusted_multiplier = max(1, round(requested_lr_multiplier * config.train_batch_size * config.n_gpu / 32)) - - current_lr = base_lr * adjusted_multiplier - current_weight_decay = config.weight_decay_rate - - self.optimizer = apex.optimizers.FusedSGD(model.parameters(), - lr=current_lr, - momentum=0.9, - weight_decay=current_weight_decay) - return self.optimizer - - def model_to_fp16(self, model: SSD_MODEL, optimizer: Optimizer) -> Tuple[SSD_MODEL, Optimizer]: - self.model, self.optimizer = apex.amp.initialize(model, optimizer, opt_level="O{}".format(self.config.opt_level), loss_scale=128.) - return self.model, self.optimizer - - def model_to_ddp(self, model: SSD_MODEL) -> SSD_MODEL: - config = self.config - if config.distributed: - if config.delay_allreduce: - print(config.local_rank, "Delaying allreduces to the end of backward()") - self.model = apex.parallel.DistributedDataParallel(model, - gradient_predivide_factor=config.n_gpu / 8.0, - delay_allreduce=config.delay_allreduce, - retain_allreduce_buffers=config.fp16) - else: - self.model = model - return self.model - - def on_step_begin(self, step: int): - pass - - def on_step_end(self, step: int): - pass - - def on_backward(self, step: int, loss: torch.Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): - with apex.amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - update_step = step % self.config.gradient_accumulation_steps == 0 - if update_step: - self.update_model_params(optimizer, grad_scaler) - - def update_model_params(self, optimizer: Optimizer, grad_scaler: GradScaler=None): - optimizer.step() - for param in self.model.parameters(): - param.grad = None - - - diff --git a/cv/detection/ssd/pytorch/nvidia/reset.sh b/cv/detection/ssd/pytorch/nvidia/reset.sh deleted file mode 100644 index a11f3253f..000000000 --- a/cv/detection/ssd/pytorch/nvidia/reset.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# clean cache for host memory -echo 3 > /proc/sys/vm/drop_caches - -# reset GPU -nvidia-smi -r - - diff --git a/cv/detection/ssd/pytorch/nvidia/setup.py b/cv/detection/ssd/pytorch/nvidia/setup.py deleted file mode 100644 index a802a5560..000000000 --- a/cv/detection/ssd/pytorch/nvidia/setup.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import os - -import torch -from torch.utils.cpp_extension import CUDA_HOME -from torch.utils.cpp_extension import CppExtension -from torch.utils.cpp_extension import CUDAExtension - -from setuptools import find_packages -from setuptools import setup - -requirements = ["torch", "torchvision"] - - -def get_extensions(): - this_dir = os.path.dirname(os.path.abspath(__file__)) - extensions_dir = os.path.join(this_dir, "csrc") - - source_cpu = glob.glob(os.path.join(extensions_dir, "*.cpp")) - source_nhwc = glob.glob(os.path.join(extensions_dir, "nhwc", "*.cpp")) - source_nhwc += glob.glob(os.path.join(extensions_dir, "nhwc", "*.cu")) - source_cuda = glob.glob(os.path.join(extensions_dir, "*.cu")) - - print('c++: ', source_cpu) - print('NHWC: ', source_nhwc) - print('cuda: ', source_cuda) - sources = source_cpu + source_nhwc - extension = CppExtension - - define_macros = [] - - if CUDA_HOME is not None: - extension = CUDAExtension - sources += source_cuda - define_macros += [("WITH_CUDA", None)] - - sources = [os.path.join(extensions_dir, s) for s in sources] - - include_dirs = [extensions_dir] - extra_compile_flags= {'cxx' : []} - extra_compile_flags['nvcc'] = ['-DCUDA_HAS_FP16=1','-D__CUDA_NO_HALF_OPERATORS__','-D__CUDA_NO_HALF_CONVERSIONS__','-D__CUDA_NO_HALF2_OPERATORS__'] - - gencodes = [ - #'-gencode', 'arch=compute_50,code=sm_50', - #'-gencode', 'arch=compute_52,code=sm_52', - #'-gencode', 'arch=compute_60,code=sm_60', - #'-gencode', 'arch=compute_61,code=sm_61', - '-gencode', 'arch=compute_70,code=sm_70', - '-gencode', 'arch=compute_70,code=compute_70',] - - extra_compile_flags['nvcc'] += gencodes - - extra_compile_flags= {'cxx' : []} - extra_compile_flags['nvcc'] = ['-DCUDA_HAS_FP16=1','-D__CUDA_NO_HALF_OPERATORS__','-D__CUDA_NO_HALF_CONVERSIONS__','-D__CUDA_NO_HALF2_OPERATORS__'] - - gencodes = [ - #'-gencode', 'arch=compute_50,code=sm_50', - #'-gencode', 'arch=compute_52,code=sm_52', - #'-gencode', 'arch=compute_60,code=sm_60', - #'-gencode', 'arch=compute_61,code=sm_61', - '-gencode', 'arch=compute_70,code=sm_70', - '-gencode', 'arch=compute_70,code=compute_70',] - - extra_compile_flags['nvcc'] += gencodes - - ext_modules = [ - extension( - "SSD._C", - sources, - include_dirs=include_dirs, - define_macros=define_macros, - extra_compile_args=extra_compile_flags, - ) - ] - - return ext_modules - - -setup( - name="SSD", - version="0.1", - author="slayton", - url="", - description="SSD in pytorch", - packages=find_packages(exclude=("configs", "examples", "test",)), - # install_requires=requirements, - ext_modules=get_extensions(), - cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, -) diff --git a/cv/detection/ssd/pytorch/base/model/losses/opt_loss.py b/cv/detection/ssd/pytorch/opt_loss.py similarity index 77% rename from cv/detection/ssd/pytorch/base/model/losses/opt_loss.py rename to cv/detection/ssd/pytorch/opt_loss.py index 05eaeca33..b482811bc 100644 --- a/cv/detection/ssd/pytorch/base/model/losses/opt_loss.py +++ b/cv/detection/ssd/pytorch/opt_loss.py @@ -1,3 +1,17 @@ +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import torch class OptLoss(torch.jit.ScriptModule): diff --git a/cv/detection/ssd/pytorch/parse_config.py b/cv/detection/ssd/pytorch/parse_config.py new file mode 100644 index 000000000..146888e5f --- /dev/null +++ b/cv/detection/ssd/pytorch/parse_config.py @@ -0,0 +1,172 @@ +import torch +import os # for getenv() + +from argparse import ArgumentParser + +import random + +# adds mutually exclusive "--name" and "--no-name" command line arguments, with +# the result stored in a variable named "name" (with any dashes in "name" +# replaced by underscores) +# inspired by https://stackoverflow.com/a/31347222/2209313 +def add_bool_arg(group, name, default=False, help=''): + subgroup = group.add_mutually_exclusive_group(required=False) + name_with_underscore = name.replace('-', '_').replace(' ', '_') + + truehelp = help + falsehelp = help + if help != '': + falsehelp = 'do not ' + falsehelp + if default is True: + if truehelp != '': + truehelp = truehelp + ' ' + truehelp = truehelp + '(default)' + else: + if falsehelp != '': + falsehelp = falsehelp + ' ' + falsehelp = falsehelp + '(default)' + + subgroup.add_argument('--' + name, dest=name_with_underscore, action='store_true', help=truehelp) + subgroup.add_argument('--no-' + name, dest=name_with_underscore, action='store_false', help=falsehelp) + group.set_defaults(**{name_with_underscore:default}) + +def parse_args(): + parser = ArgumentParser(description="Train Single Shot MultiBox Detector" + " on COCO") + + data_group = parser.add_argument_group('data', 'data-related options') + # Data-related + data_group.add_argument('--data', '-d', type=str, default='/coco', + help='path to test and training data files') + data_group.add_argument('--meta_files_path', type=str, default=None, + help='path to COCO meta files') + data_group.add_argument('--batch-size', '-b', type=int, default=32, + help='number of examples for each iteration') + data_group.add_argument('--eval-batch-size', type=int, default=32, + help='number of examples for each evaluation iteration') + # input pipeline stuff + add_bool_arg(data_group, 'dali', default=True) # --dali (default) and --no-dali + data_group.add_argument('--fake-input', action='store_true', + help='run input pipeline with fake data (avoid all i/o and work except on very first call)') + data_group.add_argument('--input-batch-multiplier', type=int, default=1, + help='run input pipeline at batch size times larger than that given in --batch-size') + data_group.add_argument('--dali-sync', action='store_true', + help='run dali in synchronous mode instead of the (default) asynchronous') + data_group.add_argument('--dali-cache', type=int, default=-1, + help="cache size (in GB) for Dali's nvjpeg caching") + data_group.add_argument('--use-nvjpeg', action='store_true') + data_group.add_argument('--use-roi-decode', action='store_true', + help="DEPRECATED: Dali input pipeline uses roi decode if and only if --dali-cache is not set" ) + + # model-related + model_group = parser.add_argument_group('model', 'Model-related options') + model_group.add_argument('--model-path', type=str, default='./vgg16n.pth') + model_group.add_argument('--backbone', type=str, choices=['vgg16', 'vgg16bn', 'resnet18', 'resnet34', 'resnet50'], default='resnet34') + model_group.add_argument('--num-workers', type=int, default=4) + model_group.add_argument('--use-fp16', action='store_true') + model_group.add_argument('--print-interval', type=int, default=20) + model_group.add_argument('--jit', action='store_true') + model_group.add_argument('--nhwc', action='store_true') + model_group.add_argument('--pad-input', action='store_true') + model_group.add_argument('--num-classes', type=int, default=81) + model_group.add_argument('--input-size', type=int, default=300) + model_group.add_argument('--verify-checkpoint', action='store_true') + + # Solver-related + solver_group = parser.add_argument_group('solver', 'Solver-related options') + solver_group.add_argument('--epochs', '-e', type=int, default=800, + help='number of epochs for training') + add_bool_arg(solver_group, 'allreduce-running-stats', default=True, + help='allreduce batch norm running stats before evaluation') + solver_group.add_argument('--seed', '-s', type=int, default=random.SystemRandom().randint(0, 2**32 - 1), + help='manually set random seed for torch') + solver_group.add_argument('--threshold', '-t', type=float, default=0.212, + help='stop training early at threshold') + solver_group.add_argument('--iteration', type=int, default=0, + help='iteration to start from') + solver_group.add_argument('--checkpoint', type=str, default=None, + help='path to model checkpoint file') + add_bool_arg(solver_group, 'save', default=True, + help='save model checkpoints') + solver_group.add_argument('--evaluation', nargs='*', type=int, + default=[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80], + help='epochs at which to evaluate') + solver_group.add_argument('--warmup', type=int, default=None) + solver_group.add_argument('--warmup-factor', type=int, default=1, + help='mlperf rule parameter for controlling warmup curve') + solver_group.add_argument('--lr', type=float, default=2.68e-3) + solver_group.add_argument('--wd', type=float, default=5e-4) + solver_group.add_argument('--lr-decay-factor', type=float, default=0.1, + help='decay rate of learning rate. default is 0.1.') + parser.add_argument('--lr-decay-epochs', type=int, nargs='+', default=[44,55], + help='epochs at which learning rate decays. default is 44,55.') + solver_group.add_argument('--delay-allreduce', action='store_true') + solver_group.add_argument('--opt-loss', action='store_true', help='deprecated option, does nothing (loss is always optimized)') + solver_group.add_argument('--bn-group', type=int, default=1, choices=[1, 2, 4, 8], help='Group of processes to collaborate on BatchNorm ops') + + # Profiling + profiling_group = parser.add_argument_group('profiling', 'Profiling options') + profiling_group.add_argument('--profile', type=int, default=None, + help='iteration at which to early terminate') + profiling_group.add_argument('--profile-start', type=int, default=None, + help='iteration at which to turn on cuda and/or pytorch nvtx profiling') + profiling_group.add_argument('--profile-nvtx', action='store_true', + help='turn on pytorch nvtx annotations in addition to cuda profiling') + profiling_group.add_argument('--profile-gc-off', action='store_true', + help='call gc.disable() (useful for eliminating gc noise while profiling)') + profiling_group.add_argument('--profile-cudnn-get', action='store_true', + help='use cudnnGet() rather than cudnnFind() to eliminate a possible source of perf non-determinism') + profiling_group.add_argument('--profile-fake-optim', action='store_true', + help='turn off optimizer to get more accurate timing of the rest of the training pipe') + + # Distributed stuff + parser.add_argument('--local_rank', '--local-rank', default=os.getenv('LOCAL_RANK',0), type=int, + help='Used for multi-process training. Can either be manually set ' + + 'or automatically set by using \'python -m multiproc\'.') + parser.add_argument('--backend', default='nccl', type=str, help='choice the distributed backend (nccl or gloo)') + solver_group.add_argument('--opt-level', type=str, default="O0", choices=["O0", "O1", "O2"], help='control training mode(FP32 or FP16) by opt_level') + + return parser.parse_args() + +# make sure that arguments are all self-consistent +def validate_arguments(args): + # nhwc can only be used with fp16 + if args.nhwc: + assert(args.use_fp16) + + # input padding can only be used with NHWC + if args.pad_input: + assert(args.nhwc) + + # no dali can only be used with NCHW and no padding + if not args.dali: + assert(not args.nhwc) + assert(not args.pad_input) + assert(not args.use_nvjpeg) + assert(not args.dali_cache) + assert(not args.use_roi_decode) + + if args.use_roi_decode: + assert(args.dali_cache<=0) # roi decode also crops every epoch, so can't cache + + if args.dali_cache>0: + assert(args.use_nvjpeg) + + if args.jit: + assert(args.nhwc) #jit can not be applied with apex::syncbn used for non-nhwc + + return + +# Check that the run is valid for specified group BN arg +def validate_group_bn(bn_groups): + if torch.distributed.is_initialized(): + world_size = torch.distributed.get_world_size() + else: + world_size = 1 + + # Can't have larger group than ranks + assert(bn_groups <= world_size) + + # must have only complete groups + assert(world_size % bn_groups == 0) + diff --git a/cv/detection/ssd/pytorch/base/data_preprocessing/prepare_json.py b/cv/detection/ssd/pytorch/prepare-json.py similarity index 100% rename from cv/detection/ssd/pytorch/base/data_preprocessing/prepare_json.py rename to cv/detection/ssd/pytorch/prepare-json.py diff --git a/cv/detection/ssd/pytorch/base/requirements.txt b/cv/detection/ssd/pytorch/requirements.txt similarity index 35% rename from cv/detection/ssd/pytorch/base/requirements.txt rename to cv/detection/ssd/pytorch/requirements.txt index 37473d262..dd9e53d11 100644 --- a/cv/detection/ssd/pytorch/base/requirements.txt +++ b/cv/detection/ssd/pytorch/requirements.txt @@ -1,4 +1,2 @@ -# progress bars in model download and training scripts -pycocotools==2.0.2 ujson==1.35 - +pycocotools==2.0.2 diff --git a/cv/detection/ssd/pytorch/iluvatar/config/resnet.py b/cv/detection/ssd/pytorch/resnet.py similarity index 88% rename from cv/detection/ssd/pytorch/iluvatar/config/resnet.py rename to cv/detection/ssd/pytorch/resnet.py index 1c77b8a0c..739b95759 100644 --- a/cv/detection/ssd/pytorch/iluvatar/config/resnet.py +++ b/cv/detection/ssd/pytorch/resnet.py @@ -16,28 +16,50 @@ import torch # for torch.cat and torch.zeros import torch.nn as nn import torch.utils.model_zoo as model_zoo +from nhwc.conv import Conv2d_NHWC +from nhwc.batch_norm import BatchNorm2d_NHWC as gbn_persistent +from nhwc.max_pool import MaxPool2d_NHWC + # Group batch norm from apex.parallel import SyncBatchNorm as gbn # Persistent group BN for NHWC case from apex.contrib.groupbn.batch_norm import BatchNorm2d_NHWC import apex.parallel -try: - from .nhwc.conv import Conv2d_NHWC - from .nhwc.batch_norm import BatchNorm2d_NHWC as gbn_persistent - from .nhwc.max_pool import MaxPool2d_NHWC -except: - from nhwc.conv import Conv2d_NHWC - from nhwc.batch_norm import BatchNorm2d_NHWC as gbn_persistent - from nhwc.max_pool import MaxPool2d_NHWC +import subprocess +import os __all__ = ['resnet'] model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } +def download_with_wget(url, save_path): + filename = os.path.basename(url) + print(f"downloading file: {filename}, save to: {save_path}") + if os.path.exists(save_path+filename): + print(f"file {save_path+filename} has existed") + return + + os.makedirs(save_path, exist_ok=True) + cmd = "" + cmd = [ + 'wget', + '-q', + '--show-progress', + '-O', save_path+filename, + url + ] + + # 执行命令并等待完成 + subprocess.check_call(cmd) + class Layers_NCHW: Conv2d = nn.Conv2d MaxPool = nn.MaxPool2d @@ -221,6 +243,7 @@ def resnet34(pretrained=False, nhwc=False, ssd_mods=False, **kwargs): model = ResNet(layerImpls, block, layer_list, ssd_mods=ssd_mods, use_nhwc=nhwc, **kwargs) if pretrained: + download_with_wget(model_urls['resnet34'], '/root/.cache/torch/hub/checkpoints/') orig_state_dict = model_zoo.load_url(model_urls['resnet34']) # Modify the state dict to remove conv5 / layer4 diff --git a/cv/detection/ssd/pytorch/run.sub b/cv/detection/ssd/pytorch/run.sub new file mode 100644 index 000000000..bd97fee5e --- /dev/null +++ b/cv/detection/ssd/pytorch/run.sub @@ -0,0 +1,62 @@ +#!/bin/bash +#SBATCH --job-name single_stage_detector +set -euxo pipefail + +# Vars without defaults +: "${DGXSYSTEM:?DGXSYSTEM not set}" +: "${CONT:?CONT not set}" + +# Vars with defaults +: "${NEXP:=5}" +: "${DATESTAMP:=$(date +'%y%m%d%H%M%S%N')}" +: "${CLEAR_CACHES:=1}" +: "${DATADIR:=/raid/datasets/coco/coco-2017}" +: "${LOGDIR:=./results}" + +# Other vars +readonly _logfile_base="${LOGDIR}/${DATESTAMP}" +readonly _cont_name=single_stage_detector +_cont_mounts="${DATADIR}:/data,${LOGDIR}:/results" + +# MLPerf vars +MLPERF_HOST_OS=$(srun -N1 -n1 bash < ${LOG_DIR}/ssd_${#ADDR_ARRAY[@]}_machine_8card_${FORMAT,,}_batch_${BATCH_SIZE}_${BACKEND,,}_fps.log 2>&1 + done + fi + done + +} + + +function exec_ssh_by_master() { +# only at master host, start all other non master hosts run +if [ "$HOST_IP" == "${ADDR_ARRAY[0]}" ] +then + for i in "${!ADDR_ARRAY[@]}" + do + if [ "$i" != "0" ] + then + scp ${CUR_SCR} ${ADDR_ARRAY[$i]}:${CUR_DIR} + init_docker_container_by_non_master ${ADDR_ARRAY[$i]} + ssh ${ADDR_ARRAY[$i]} "docker exec -i ${CONTAINER_NAME} bash -c \"cd ${CUR_DIR}; bash ${CUR_SCR} \"" & + fi + done +fi + +} + + +function init_docker_container_by_non_master +{ + ADDR=$1 + matched_containers=`ssh ${ADDR} "docker ps -a"|grep "${CONTAINER_NAME}$"|wc -l` + if [ ${matched_containers} -gt "0" ] + then + echo "Warning: Found container ${CONTAINER_NAME} exists! Will delete it." + ssh ${ADDR} "docker stop ${CONTAINER_NAME}; docker rm ${CONTAINER_NAME}" + fi + ssh ${ADDR} "docker run -itd --name ${CONTAINER_NAME} ${CONTAINER_INIT_OPT} ${IMAGE} /bin/bash" + if [ "$?" != "0" ] + then + echo "Error: Init container ${CONTAINER_NAME} at ${ADDR} failed!" + exit -1 + fi +} + + + +function run_multi_machine_8card_end2end() +{ + export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + export GLOO_SOCKET_IFNAME="ib0" + FORMAT=$1 + BACKEND=$2 + BATCH_SIZE=$3 + num_epochs=90 + if [ "${FORMAT}" = "nchw" ]; then + args="" + else + args=${NHWC_PARAMS} + fi + + # do actual run when IP matched + for i in "${!ADDR_ARRAY[@]}" + do + if [ "$HOST_IP" == "${ADDR_ARRAY[$i]}" ] + then + ../../../../tools/reset.sh + echo "nodes: ${#ADDR_ARRAY[@]}, rank: $i, IP: $HOST_IP, MASTER_IP: ${ADDR_ARRAY[0]}" + python3 -u -m bind_launch --nnodes ${#ADDR_ARRAY[@]} --node_rank $i --nproc_per_node 8 --master_addr ${ADDR_ARRAY[0]} --nsockets_per_node 2 --ncores_per_socket ${CORES_PER_SOCKET} --no_membind \ + ./train.py --dali --data=${DATASET_DIR} --batch-size=${BATCH_SIZE} --warmup-factor=0 --warmup=650 --lr=2.68e-3 --threshold=0.23 --no-save --epochs ${num_epochs} --eval-batch-size=160 --wd=1.6e-4 --use-fp16 --delay-allreduce --lr-decay-factor=0.2 --lr-decay-epochs 34 45 --opt-level "O2" ${args} --backend ${BACKEND,,} > ${LOG_DIR}/ssd_${#ADDR_ARRAY[@]}_machine_8card_${FORMAT,,}_batch_${BATCH_SIZE}_${BACKEND,,}_fps.log 2>&1 + + fi + + done +} + +date +%m%d%H%M%S >> ${LOG_DIR}/time.log +exec_ssh_by_master +#run_multi_machine_8card_end2end nhwc gloo 128 +run_multi_machine_8card_end2end nhwc nccl 112 +#run_multi_machine_8card_end2end nhwc gloo 96 +#run_multi_machine_8card_end2end nhwc gloo 80 +#run_multi_machine_8card_end2end nhwc gloo 64 +#run_multi_machine_8card_end2end nhwc gloo 56 +date +%m%d%H%M%S >> ${LOG_DIR}/time.log diff --git a/cv/detection/ssd/pytorch/run_with_docker.sh b/cv/detection/ssd/pytorch/run_with_docker.sh new file mode 100644 index 000000000..c30e5051f --- /dev/null +++ b/cv/detection/ssd/pytorch/run_with_docker.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -euxo pipefail + +# Vars without defaults +: "${DGXSYSTEM:?DGXSYSTEM not set}" +: "${CONT:?CONT not set}" + +# Vars with defaults +: "${NEXP:=5}" +: "${DATESTAMP:=$(date +'%y%m%d%H%M%S%N')}" +: "${CLEAR_CACHES:=1}" +: "${DATADIR:=/raid/datasets/coco/coco-2017}" +: "${LOGDIR:=$(pwd)/results}" + +# Other vars +readonly _config_file="./config_${DGXSYSTEM}.sh" +readonly _logfile_base="${LOGDIR}/${DATESTAMP}" +readonly _cont_name=single_stage_detector +_cont_mounts=("--volume=${DATADIR}:/data" "--volume=${LOGDIR}:/results") + +# Setup directories +mkdir -p "${LOGDIR}" + +# Get list of envvars to pass to docker +source "${_config_file}" +mapfile -t _config_env < <(env -i bash -c ". ${_config_file} && compgen -e" | grep -E -v '^(PWD|SHLVL)') +_config_env+=(MLPERF_HOST_OS) +mapfile -t _config_env < <(for v in "${_config_env[@]}"; do echo "--env=$v"; done) + +# Cleanup container +cleanup_docker() { + docker container rm -f "${_cont_name}" || true +} +cleanup_docker +trap 'set -eux; cleanup_docker' EXIT + +# Setup container +nvidia-docker run --rm --init --detach \ + --net=host --uts=host --ipc=host --security-opt=seccomp=unconfined \ + --ulimit=stack=67108864 --ulimit=memlock=-1 \ + --name="${_cont_name}" "${_cont_mounts[@]}" \ + "${CONT}" sleep infinity +docker exec -it "${_cont_name}" true + +# Run experiments +for _experiment_index in $(seq 1 "${NEXP}"); do + ( + echo "Beginning trial ${_experiment_index} of ${NEXP}" + + # Print system info + docker exec -it "${_cont_name}" python -c " +import mlperf_log_utils +from mlperf_logging.mllog import constants +mlperf_log_utils.mlperf_submission_log(constants.SSD)" + + # Clear caches + if [ "${CLEAR_CACHES}" -eq 1 ]; then + sync && sudo /sbin/sysctl vm.drop_caches=3 + docker exec -it "${_cont_name}" python -c " +from mlperf_logging.mllog import constants +from mlperf_logger import log_event +log_event(key=constants.CACHE_CLEAR, value=True)" + fi + + # Run experiment + docker exec -it "${_config_env[@]}" "${_cont_name}" ./run_and_time.sh + ) |& tee "${_logfile_base}_${_experiment_index}.log" +done diff --git a/cv/detection/ssd/pytorch/iluvatar/setup.py b/cv/detection/ssd/pytorch/setup.py similarity index 100% rename from cv/detection/ssd/pytorch/iluvatar/setup.py rename to cv/detection/ssd/pytorch/setup.py diff --git a/cv/detection/ssd/pytorch/nvidia/config/ssd300.py b/cv/detection/ssd/pytorch/ssd300.py similarity index 98% rename from cv/detection/ssd/pytorch/nvidia/config/ssd300.py rename to cv/detection/ssd/pytorch/ssd300.py index 6ad66af9f..a97647c8d 100644 --- a/cv/detection/ssd/pytorch/nvidia/config/ssd300.py +++ b/cv/detection/ssd/pytorch/ssd300.py @@ -15,14 +15,9 @@ import torch import torch.nn as nn # from base_model import L2Norm, ResNet +from resnet import ResNet, resnet34 -try: - from .resnet import resnet34 - from .nhwc.conv import Conv2d_NHWC -except: - from resnet import resnet34 - from nhwc.conv import Conv2d_NHWC - +from nhwc.conv import Conv2d_NHWC class SSD300(nn.Module): """ diff --git a/cv/detection/ssd/pytorch/test.py b/cv/detection/ssd/pytorch/test.py new file mode 100644 index 000000000..183c43dfe --- /dev/null +++ b/cv/detection/ssd/pytorch/test.py @@ -0,0 +1,204 @@ +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from argparse import ArgumentParser +from utils import DefaultBoxes, Encoder, COCODetection +from base_model import Loss +from utils import SSDTransformer +from ssd300 import SSD300 +import torch +from torch.utils.data import DataLoader +import time +import numpy as np +import os + +# necessary pytorch imports +import torch.utils.data.distributed +import torch.distributed as dist +from torch.autograd import Variable + +# Apex imports +try: + from apex.parallel import DistributedDataParallel as DDP + from apex.fp16_utils import * +except ImportError: + raise ImportError("Please install APEX from https://github.com/nvidia/apex") + +# DALI import +from coco_pipeline import COCOPipeline, DALICOCOIterator + +from SSD import _C as C + + +def parse_args(): + parser = ArgumentParser(description="Train Single Shot MultiBox Detector" + " on COCO") + parser.add_argument('--data', '-d', type=str, default='/coco/coco2017', + help='path to test and training data files') + parser.add_argument('--batch-size', '-b', type=int, default=128, + help='number of examples for each iteration') + #parser.add_argument('--checkpoint', type=str, default=None, + # help='path to model checkpoint file', required=True) + parser.add_argument('--backbone', type=str, choices=['vgg16', 'vgg16bn', + 'resnet18', 'resnet34', 'resnet50'], default='resnet34') + parser.add_argument('--num-workers', type=int, default=3) + parser.add_argument('--fbu', type=int, default=1) + parser.add_argument('--use-fp16', action='store_true') + parser.add_argument('--use-train-dataset', action='store_true') + + # Distributed stuff + parser.add_argument('--local_rank', '--local-rank', default=0, type=int, + help='Used for multi-process training. Can either be manually set ' + + 'or automatically set by using \'python -m multiproc\'.') + + return parser.parse_args() + +def dboxes300_coco(): + figsize = 300 + feat_size = [38, 19, 10, 5, 3, 1] + steps = [8, 16, 32, 64, 100, 300] + # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py + scales = [21, 45, 99, 153, 207, 261, 315] + aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] + dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios) + return dboxes + +def test_coco(args): + # For testing purposes we have to use CUDA + use_cuda = True + + # Setup multi-GPU if necessary + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + + if args.distributed: + torch.cuda.set_device(args.local_rank) + + torch.distributed.init_process_group(backend='nccl', + init_method='env://') + + if args.distributed: + N_gpu = torch.distributed.get_world_size() + else: + N_gpu = 1 + + # Setup data, defaults + dboxes = dboxes300_coco() + encoder = Encoder(dboxes) + + if args.use_train_dataset: + annotate = os.path.join(args.data, "annotations/instances_train2017.json") + coco_root = os.path.join(args.data, "train2017") + img_number = 118287 + else: + annotate = os.path.join(args.data, "annotations/instances_val2017.json") + coco_root = os.path.join(args.data, "val2017") + img_number = 5000 + + pipe = COCOPipeline(args.batch_size, args.local_rank, coco_root, + annotate, N_gpu, num_threads=args.num_workers) + pipe.build() + test_run = pipe.run() + dataloader = DALICOCOIterator(pipe, img_number / N_gpu) + + # Build the model + ssd300 = SSD300(81, backbone=args.backbone, model_path='', dilation=False) + + """ + # Note: args.checkpoint is required, so this can never be false + if args.checkpoint is not None: + print("loading model checkpoint", args.checkpoint) + od = torch.load(args.checkpoint) + + # remove proceeding 'module' from checkpoint + model = od["model"] + for k in list(model.keys()): + if k.startswith('module.'): + model[k[7:]] = model.pop(k) + ssd300.load_state_dict(model) + """ + + + ssd300.cuda() + ssd300.eval() + loss_func = Loss(dboxes) + loss_func.cuda() + + # parallelize + if args.distributed: + ssd300 = DDP(ssd300) + + if args.use_fp16: + ssd300 = network_to_half(ssd300) + + if args.use_train_dataset and args.local_rank == 0: + print('Image 000000320612.jpg is in fact PNG and it will cause fail if ' + + 'used with nvJPEGDecoder in coco_pipeline') + + for epoch in range(2): + if epoch == 1 and args.local_rank == 0: + print("Performance computation starts") + s = time.time() + for i, data in enumerate(dataloader): + + with torch.no_grad(): + # Get data from pipeline + img = data[0][0][0] + bbox = data[0][1][0] + label = data[0][2][0] + label = label.type(torch.cuda.LongTensor) + bbox_offsets = data[0][3][0] + bbox_offsets = bbox_offsets.cuda() + + # Encode labels + N = img.shape[0] + if bbox_offsets[-1].item() == 0: + print("No labels in batch") + continue + bbox, label = C.box_encoder(N, bbox, bbox_offsets, label, + encoder.dboxes.cuda(), 0.5) + + # Prepare tensors for computing loss + M = bbox.shape[0] // N + bbox = bbox.view(N, M, 4) + label = label.view(N, M) + trans_bbox = bbox.transpose(1,2).contiguous() + gloc, glabel = Variable(trans_bbox, requires_grad=False), \ + Variable(label, requires_grad=False) + + if args.use_fp16: + img = img.half() + + for _ in range(args.fbu): + ploc, plabel = ssd300(img) + ploc, plabel = ploc.float(), plabel.float() + loss = loss_func(ploc, plabel, gloc, glabel) + + if epoch == 1 and args.local_rank == 0: + e = time.time() + print("Performance achieved: {:.2f} img/sec".format(img_number / (e - s))) + + dataloader.reset() + +def main(): + args = parse_args() + + torch.backends.cudnn.benchmark = True + + test_coco(args) + +if __name__ == "__main__": + main() diff --git a/cv/detection/ssd/pytorch/base/test/box_coder_test.py b/cv/detection/ssd/pytorch/test/box_coder_test.py similarity index 53% rename from cv/detection/ssd/pytorch/base/test/box_coder_test.py rename to cv/detection/ssd/pytorch/test/box_coder_test.py index 1516dbe83..4c3613739 100644 --- a/cv/detection/ssd/pytorch/base/test/box_coder_test.py +++ b/cv/detection/ssd/pytorch/test/box_coder_test.py @@ -1,9 +1,24 @@ -import sys -sys.path.append("..") - -from box_coder import DefaultBoxes, Encoder +import os +from argparse import ArgumentParser +from utils import DefaultBoxes, Encoder, COCODetection, SSDCropping +from PIL import Image +from base_model import Loss +from utils import SSDTransformer +from ssd300 import SSD300 +from sampler import GeneralDistributedSampler +from master_params import create_flat_master import torch +from torch.autograd import Variable +from torch.utils.data import DataLoader +import time +import numpy as np +import io +import random +import torchvision.transforms as transforms + +import sys +from SSD import _C as C def dboxes300_coco(): figsize = 300 @@ -18,8 +33,7 @@ def dboxes300_coco(): if __name__ == "__main__": dboxes = dboxes300_coco() - encoder = Encoder(dboxes, fast_nms=False) - encoder_fast = Encoder(dboxes, fast_nms=True) + encoder = Encoder(dboxes) saved_inputs = torch.load('inputs.pth') @@ -31,9 +45,5 @@ if __name__ == "__main__": print('bboxes: {}, scores: {}'.format(bboxes.shape, scores.shape)) for i in range(bboxes.shape[0]): - box1, label1, score1 = encoder.decode_batch(bboxes[i, :, :].unsqueeze(0), scores[i, :, :].unsqueeze(0), criteria, max_num)[0] - - box2, label2, score2 = \ - encoder_fast.decode_batch(bboxes[i, :, :].unsqueeze(0), scores[i, :, :].unsqueeze(0), criteria, max_num)[0] - - print('label: {}, fast label: {}'.format(label1, label2)) + box, label, score = encoder.decode_batch(bboxes[i, :, :].unsqueeze(0), scores[i, :, :].unsqueeze(0), criteria, max_num)[0] + print('r: {}'.format(label)) diff --git a/cv/detection/ssd/pytorch/base/test/cuda_encoder_test.py b/cv/detection/ssd/pytorch/test/cuda_encoder_test.py similarity index 88% rename from cv/detection/ssd/pytorch/base/test/cuda_encoder_test.py rename to cv/detection/ssd/pytorch/test/cuda_encoder_test.py index f4a9d0e24..ba10edf2f 100644 --- a/cv/detection/ssd/pytorch/base/test/cuda_encoder_test.py +++ b/cv/detection/ssd/pytorch/test/cuda_encoder_test.py @@ -1,11 +1,23 @@ -import sys -sys.path.append("..") - +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import torch +from torch.autograd import Variable from SSD import _C as C -from box_coder import dboxes300_coco, Encoder +from train import dboxes300_coco +from utils import Encoder import numpy as np @@ -150,7 +162,7 @@ def test_box_encoder(): N, bboxes_cat, offsets, bboxes = load_bboxes(box_list, True) # N, bboxes_cat, offsets, bboxes = load_bboxes([b1[:2,:], b1[:2,:]]) - # print(N, bboxes_cat, offsets) + print(N, bboxes_cat, offsets) label_numpy = np.random.randn(offsets[-1])*10 labels = torch.tensor(label_numpy.astype(np.int64)).cuda() @@ -163,8 +175,6 @@ def test_box_encoder(): start = time.time() bbox_out, label_out = C.box_encoder(N, bboxes_cat, offsets, labels, dboxes.cuda(), 0.5) - bbox_out = bbox_out.reshape(bbox_out.shape[0] * bbox_out.shape[1], -1) - label_out = label_out.reshape(label_out.shape[0] * label_out.shape[1], -1) torch.cuda.synchronize() end = time.time() @@ -178,7 +188,7 @@ def test_box_encoder(): # reference dboxes = dboxes300_coco() - encoder = Encoder(dboxes, fast_nms=True) + encoder = Encoder(dboxes) labels_ref = torch.tensor(label_numpy.astype(np.int64)) start = time.time() @@ -206,7 +216,7 @@ def test_box_encoder(): for i, res in enumerate(r): if not res.any(): num_fail += 1 - # print(i, res, ref_boxes[i,:], bbox_out[i, :]) + print(i, res, ref_boxes[i,:], bbox_out[i, :]) print('{} bboxes failed'.format(num_fail)) @@ -218,7 +228,7 @@ def test_box_encoder(): for i, res in enumerate(r2): if not res: num_fail += 1 - # print('label: ', i, res, label_out[i], ref_labels.numpy()[i]) + print('label: ', i, res, label_out[i], ref_labels.numpy()[i]) print('{} labels failed'.format(num_fail)) diff --git a/cv/detection/ssd/pytorch/base/test/opt_loss_test.py b/cv/detection/ssd/pytorch/test/opt_loss_test.py similarity index 64% rename from cv/detection/ssd/pytorch/base/test/opt_loss_test.py rename to cv/detection/ssd/pytorch/test/opt_loss_test.py index a422ee358..7411eafee 100644 --- a/cv/detection/ssd/pytorch/base/test/opt_loss_test.py +++ b/cv/detection/ssd/pytorch/test/opt_loss_test.py @@ -1,11 +1,8 @@ -import sys -sys.path.append("..") - import torch -from box_coder import dboxes300_coco -from model.losses.opt_loss import OptLoss -# from model.losses.loss import Loss +from base_model import Loss +from train import dboxes300_coco +from opt_loss import OptLoss # In: # ploc : N x 8732 x 4 @@ -21,9 +18,9 @@ glabel = data['glabel'].cuda() dboxes = dboxes300_coco() # loss = Loss(dboxes).cuda() -opt_loss = OptLoss().cuda() +loss = OptLoss(dboxes).cuda() -opt_loss = torch.jit.trace(opt_loss, (ploc, plabel, gloc, glabel)) +loss = torch.jit.trace(loss, (ploc, plabel, gloc, glabel)) # print(traced_loss.graph) # timing @@ -33,22 +30,20 @@ import time # Dry run to eliminate JIT compile overhead dl = torch.tensor([1.], device="cuda") -l1 = opt_loss(ploc, plabel, gloc, glabel) -print("opt loss: {}".format(l1)) -l1.backward() - +l = loss(ploc, plabel, gloc, glabel) +l.backward(dl) # fprop torch.cuda.synchronize() start = time.time() with torch.no_grad(): for _ in range(timing_iterations): - l1 = opt_loss(ploc, plabel, gloc, glabel) + l = loss(ploc, plabel, gloc, glabel) +print('loss: {}'.format(l)) torch.cuda.synchronize() end = time.time() -print('opt loss: {}'.format(l1)) time_per_fprop = (end - start) / timing_iterations print('took {} seconds per iteration (fprop)'.format(time_per_fprop)) @@ -57,15 +52,14 @@ print('took {} seconds per iteration (fprop)'.format(time_per_fprop)) torch.cuda.synchronize() start = time.time() for _ in range(timing_iterations): - l1 = opt_loss(ploc, plabel, gloc, glabel) - l1.backward() + l = loss(ploc, plabel, gloc, glabel) + l.backward(dl) torch.cuda.synchronize() end = time.time() -print('opt loss: {}'.format(l1)) time_per_fprop_bprop = (end - start) / timing_iterations print('took {} seconds per iteration (fprop + bprop)'.format(time_per_fprop_bprop)) -# print(loss.graph_for(ploc, plabel, gloc, glabel)) +print(loss.graph_for(ploc, plabel, gloc, glabel)) diff --git a/cv/detection/ssd/pytorch/train.py b/cv/detection/ssd/pytorch/train.py new file mode 100644 index 000000000..cfb05e108 --- /dev/null +++ b/cv/detection/ssd/pytorch/train.py @@ -0,0 +1,554 @@ +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nvidia.dali +import os +from base_model import Loss +from opt_loss import OptLoss +from mlperf_logger import configure_logger, log_start, log_end, log_event, set_seeds, get_rank, barrier +from mlperf_logging.mllog import constants +import torch +from torch.autograd import Variable +import time +import numpy as np +import io +from bisect import bisect # for lr_scheduler + +from ssd300 import SSD300 +from master_params import create_flat_master +from parse_config import parse_args, validate_arguments, validate_group_bn +from data.build_pipeline import prebuild_pipeline, build_pipeline +from box_coder import dboxes300_coco, build_ssd300_coder +from async_evaluator import AsyncEvaluator +from eval import coco_eval + +import sys +import gc + +# necessary pytorch imports +import torch.utils.data.distributed +import torch.distributed as dist + +# Apex imports +try: + import apex_C + import apex + from apex.parallel.LARC import LARC + from apex.parallel import DistributedDataParallel as DDP + from apex.fp16_utils import * + from apex.multi_tensor_apply import multi_tensor_applier + import amp_C +except ImportError: + raise ImportError("Please install APEX from https://github.com/nvidia/apex") + +from contextlib import redirect_stdout + +from SSD import _C as C + +def print_message(rank, *print_args): + if rank == 0: + print(*print_args) + +def load_checkpoint(model, checkpoint, rank_id): + print("loading model checkpoint", checkpoint) + od = torch.load(checkpoint, map_location=torch.device('cuda:'+str(rank_id))) + # od = torch.load(checkpoint) + + # remove proceeding 'module' from checkpoint + saved_model = od["model"] + for k in list(saved_model.keys()): + if k.startswith('module.'): + saved_model[k[7:]] = saved_model.pop(k) + model.load_state_dict(saved_model) + return od + +def check_async_evals(args, evaluator, threshold): + finished = 0 + # Note: only one rank does COCOEval, so we need to check there if we've + # finished -- we'll broadcast that to a "finished" tensor to determine + # if we should stop + # Note2: ssd_print contains a barrier() call, implemented with all_reduce + # If we conditional on rank 0, then an ssd_print all_reduce matches with + # the finished all_reduce and all hell breaks loose. + if args.rank == 0: + for epoch, current_accuracy in evaluator.finished_tasks().items(): + # Note: Move to per-iter check + # EVAL_START should be prior to the accuracy/score evaluation but adding the missing EVAL_START here for now + log_start(key=constants.EVAL_START, metadata={'epoch_num' : epoch}) + log_event(key=constants.EVAL_ACCURACY, + value=current_accuracy, + metadata={'epoch_num' : epoch}) + log_end(key=constants.EVAL_STOP, metadata={'epoch_num' : epoch}) + if current_accuracy >= threshold: + finished = 1 + + # handle the non-distributed case -- don't need to bcast, just take local result + if not args.distributed: + return finished == 1 + + # Now we know from all ranks if they're done - reduce result + # Note: Already caught the non-distributed case above, can assume broadcast is available + with torch.no_grad(): + finish_tensor = torch.tensor([finished], dtype=torch.int32, device=torch.device('cuda')) + # torch.distributed.all_reduce(finish_tensor) + torch.distributed.broadcast(finish_tensor, src=0) + + # >= 1 ranks has seen final accuracy + if finish_tensor.item() >= 1: + return True + + # Default case: No results, or no accuracte enough results + return False + +def check_async_evals_block(args, evaluator, threshold): + finished = 0 + # Note: only one rank does COCOEval, so we need to check there if we've + # finished -- we'll broadcast that to a "finished" tensor to determine + # if we should stop + # Note2: ssd_print contains a barrier() call, implemented with all_reduce + # If we conditional on rank 0, then an ssd_print all_reduce matches with + # the finished all_reduce and all hell breaks loose. + if args.rank == 0: + for epoch, current_accuracy in evaluator.get_all_tasks().items(): + # Note: Move to per-iter check + # EVAL_START should be prior to the accuracy/score evaluation but adding the missing EVAL_START here for now + log_start(key=constants.EVAL_START, metadata={'epoch_num' : epoch}) + log_event(key=constants.EVAL_ACCURACY, + value=current_accuracy, + metadata={'epoch_num' : epoch}) + log_end(key=constants.EVAL_STOP, metadata={'epoch_num' : epoch}) + if current_accuracy >= threshold: + finished = 1 + + # handle the non-distributed case -- don't need to bcast, just take local result + if not args.distributed: + return finished == 1 + + # Now we know from all ranks if they're done - reduce result + # Note: Already caught the non-distributed case above, can assume broadcast is available + with torch.no_grad(): + finish_tensor = torch.tensor([finished], dtype=torch.int32, device=torch.device('cuda')) + # torch.distributed.all_reduce(finish_tensor) + torch.distributed.broadcast(finish_tensor, src=0) + + # >= 1 ranks has seen final accuracy + if finish_tensor.item() >= 1: + return True + + # Default case: No results, or no accuracte enough results + return False + +def lr_warmup(optim, warmup_iter, iter_num, epoch, base_lr, args): + if iter_num < warmup_iter: + # new_lr = 1. * base_lr / warmup_iter * iter_num + + # mlperf warmup rule + warmup_step = base_lr / (warmup_iter * (2 ** args.warmup_factor)) + new_lr = base_lr - (warmup_iter - iter_num) * warmup_step + + for param_group in optim.param_groups: + param_group['lr'] = new_lr + return new_lr + else: + return base_lr + +def setup_distributed(args): + # Setup multi-GPU if necessary + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + + if args.distributed: + torch.cuda.set_device(args.local_rank) + if "MASTER_ADDR" in os.environ: + host_addr_full = 'tcp://' + os.environ["MASTER_ADDR"] + ':' + os.environ["MASTER_PORT"] + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + torch.distributed.init_process_group(backend=args.backend, init_method=host_addr_full, rank=rank, + world_size=world_size) + else: + torch.distributed.init_process_group(backend=args.backend, init_method='env://') + + args.local_seed = set_seeds(args) + # start timing here + if args.distributed: + args.N_gpu = torch.distributed.get_world_size() + args.rank = args.local_rank #torch.distributed.get_rank() + else: + args.N_gpu = 1 + args.rank = 0 + + validate_group_bn(args.bn_group) + + return args + +def train300_mlperf_coco(args): + + args = setup_distributed(args) + + # Build the model + model_options = { + 'use_nhwc' : args.nhwc, + 'pad_input' : args.pad_input, + 'bn_group' : args.bn_group, + } + + ssd300 = SSD300(args, args.num_classes, **model_options) + if args.checkpoint is not None: + od = load_checkpoint(ssd300, args.checkpoint, args.rank) + else: + od = None + + ssd300.train() + ssd300.cuda() + dboxes = dboxes300_coco() + # Note: No reason not to use optimised loss + + if args.dali: + loss_func = OptLoss() + else: + loss_func = Loss(dboxes) + loss_func.cuda() + + # Create optimizer. This must also be done after network_to_half. + global_batch_size = (args.N_gpu * args.batch_size) + log_event(key=constants.MODEL_BN_SPAN, value=args.bn_group*args.batch_size) + log_event(key=constants.GLOBAL_BATCH_SIZE, value=global_batch_size) + + # mlperf only allows base_lr scaled by an integer + base_lr = 2.5e-3 + requested_lr_multiplier = args.lr / base_lr + adjusted_multiplier = max(1, round(requested_lr_multiplier * global_batch_size / 32)) + + current_lr = base_lr * adjusted_multiplier + current_momentum = 0.9 + current_weight_decay = args.wd + static_loss_scale = 128. + + optim = apex.optimizers.FusedSGD(ssd300.parameters(), + lr=current_lr, + momentum=current_momentum, + weight_decay=current_weight_decay) + + ssd300, optim = apex.amp.initialize(ssd300, optim, opt_level=args.opt_level, loss_scale=static_loss_scale) + if od is not None and od.get("optimizer", None): + optim.load_state_dict(od["optimizer"]) + for param, saved_param in zip(apex.amp.master_params(optim), od["master params"]): + param.data.copy_(saved_param.data) + + # Parallelize. Need to do this after network_to_half. + if args.distributed: + if args.delay_allreduce: + print_message(args.local_rank, "Delaying allreduces to the end of backward()") + ssd300 = DDP(ssd300, + gradient_predivide_factor=args.N_gpu/8.0, + delay_allreduce=args.delay_allreduce, + retain_allreduce_buffers=args.use_fp16) + + log_event(key=constants.OPT_BASE_LR, value=current_lr) + log_event(key=constants.OPT_LR_DECAY_BOUNDARY_EPOCHS, value=args.lr_decay_epochs) + log_event(key=constants.OPT_LR_DECAY_STEPS, value=args.lr_decay_epochs) + log_event(key=constants.OPT_WEIGHT_DECAY, value=current_weight_decay) + if args.warmup is not None: + log_event(key=constants.OPT_LR_WARMUP_STEPS, value=args.warmup) + log_event(key=constants.OPT_LR_WARMUP_FACTOR, value=args.warmup_factor) + + print_message(args.local_rank, "epoch", "nbatch", "loss") + + if od and od.get("iter_num", None): + args.iteration = od["iter_num"] + iter_num = args.iteration + avg_loss = 0.0 + + start_elapsed_time = time.time() + last_printed_iter = args.iteration + num_elapsed_samples = 0 + + input_c = 4 if args.pad_input else 3 + example_shape = [args.batch_size, 300, 300, input_c] if args.nhwc else [args.batch_size, input_c, 300, 300] + example_input = torch.randn(*example_shape).cuda() + + if args.use_fp16: + example_input = example_input.half() + if args.jit: + # DDP has some Python-side control flow. If we JIT the entire DDP-wrapped module, + # the resulting ScriptModule will elide this control flow, resulting in allreduce + # hooks not being called. If we're running distributed, we need to extract and JIT + # the wrapped .module. + # Replacing a DDP-ed ssd300 with a script_module might also cause the AccumulateGrad hooks + # to go out of scope, and therefore silently disappear. + module_to_jit = ssd300.module if args.distributed else ssd300 + if args.distributed: + ssd300.module = torch.jit.trace(module_to_jit, example_input, check_trace=False) + else: + ssd300 = torch.jit.trace(module_to_jit, example_input, check_trace=False) + + # do a dummy fprop & bprop to make sure cudnnFind etc. are timed here + ploc, plabel = ssd300(example_input) + + # produce a single dummy "loss" to make things easier + loss = ploc[0,0,0] + plabel[0,0,0] + dloss = torch.randn_like(loss) + # Cause cudnnFind for dgrad, wgrad to run + loss.backward(dloss) + + # Necessary import in init + from pycocotools.coco import COCO + + encoder = build_ssd300_coder() + + evaluator = AsyncEvaluator(num_threads=1) + + log_end(key=constants.INIT_STOP) + + ##### END INIT + + # This is the first place we touch anything related to data + ##### START DATA TOUCHING + barrier() + log_start(key=constants.RUN_START) + barrier() + + train_pipe = prebuild_pipeline(args) + + train_loader, epoch_size = build_pipeline(args, training=True, pipe=train_pipe) + if args.rank == 0: + print("epoch size is: ", epoch_size, " images") + + val_loader, inv_map, cocoGt = build_pipeline(args, training=False) + if args.profile_gc_off: + gc.disable() + gc.collect() + + ##### END DATA TOUCHING + i_eval = 0 + block_start_epoch = 0 + log_start(key=constants.BLOCK_START, + metadata={'first_epoch_num': block_start_epoch, + 'epoch_count': args.evaluation[i_eval]}) + + if args.verify_checkpoint: + print("verify initial checkpoint....") + # Note: No longer returns, evaluation is abstracted away inside evaluator + if od and od.get("epoch", None): + epoch = od["epoch"] + else: + epoch = 0 + coco_eval(args, + ssd300, + val_loader, + cocoGt, + encoder, + inv_map, + epoch, + iter_num, current_lr, + evaluator=evaluator) + finished = check_async_evals_block(args, evaluator, args.threshold) + if finished: + return True + + torch.distributed.barrier() + for epoch in range(1, args.epochs + 1): + if epoch in args.lr_decay_epochs: + current_lr *= args.lr_decay_factor + print_message(args.rank, "lr decay step #" + str(bisect(args.lr_decay_epochs, epoch))) + for param_group in optim.param_groups: + param_group['lr'] = current_lr + if od and od.get("epoch", None): + if epoch <= od["epoch"]: + if epoch in args.evaluation: + if epoch != max(args.evaluation): + i_eval += 1 + block_start_epoch = epoch + print("Start continue training from epoch: {}, iter: {}, skip epoch {}".format(od["epoch"], iter_num, epoch)) + continue + for p in ssd300.parameters(): + p.grad = None + + if epoch in args.evaluation: + # Get the existant state from the train model + # * if we use distributed, then we want .module + train_model = ssd300.module if args.distributed else ssd300 + + if args.distributed and args.allreduce_running_stats: + if args.rank == 0: print("averaging bn running means and vars") + # make sure every node has the same running bn stats before + # using them to evaluate, or saving the model for inference + world_size = float(torch.distributed.get_world_size()) + for bn_name, bn_buf in train_model.named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= world_size + + # Note: No longer returns, evaluation is abstracted away inside evaluator + coco_eval(args, + ssd300, + val_loader, + cocoGt, + encoder, + inv_map, + epoch, + iter_num, current_lr, + evaluator=evaluator) + log_end(key=constants.BLOCK_STOP, metadata={'first_epoch_num': block_start_epoch}) + if epoch != max(args.evaluation): + i_eval += 1 + block_start_epoch = epoch + log_start(key=constants.BLOCK_START, + metadata={'first_epoch_num': block_start_epoch, + 'epoch_count': (args.evaluation[i_eval] - + args.evaluation[i_eval - 1])}) + + if args.rank == 0: + if args.save: + print("saving model...") + if not os.path.isdir('./models'): + os.mkdir('./models') + torch.save({ + "model": ssd300.state_dict(), + "optimizer": optim.state_dict(), + "master params": list(apex.amp.master_params(optim)), + "epoch": epoch, + "iter_num": iter_num, + }, "./models/iter_{}.pt".format(iter_num)) + + finished = check_async_evals_block(args, evaluator, args.threshold) + if finished: + return True + if args.distributed: + torch.distributed.barrier() + + log_start(key=constants.EPOCH_START, + metadata={'epoch_num': epoch, + 'current_iter_num': iter_num}) + for i, (img, bbox, label) in enumerate(train_loader): + if not args.dali: + img = img.cuda() + bbox = bbox.cuda() + label = label.cuda() + + if args.profile_start is not None and iter_num == args.profile_start: + torch.cuda.profiler.start() + torch.cuda.synchronize() + if args.profile_nvtx: + torch.autograd._enable_profiler(torch.autograd.ProfilerState.NVTX) + + if args.profile is not None and iter_num == args.profile: + if args.profile_start is not None and iter_num >=args.profile_start: + # we turned cuda and nvtx profiling on, better turn it off too + if args.profile_nvtx: + torch.autograd._disable_profiler() + torch.cuda.profiler.stop() + return + + new_lr = current_lr + if args.warmup is not None: + new_lr = lr_warmup(optim, args.warmup, iter_num, epoch, current_lr, args) + + if (img is None) or (bbox is None) or (label is None): + print("No labels in batch") + continue + + ploc, plabel = ssd300(img) + ploc, plabel = ploc.float(), plabel.float() + + N = img.shape[0] + bbox.requires_grad = False + label.requires_grad = False + # reshape (N*8732X4 -> Nx8732x4) and transpose (Nx8732x4 -> Nx4x8732) + bbox = bbox.view(N, -1, 4).transpose(1,2).contiguous() + # reshape (N*8732 -> Nx8732) and cast to Long + label = label.view(N, -1).long() + loss = loss_func(ploc, plabel, bbox, label) + + if i == 0 and epoch == 0: + avg_loss = loss.item() + else: + if np.isfinite(loss.item()): + avg_loss = 0.999*avg_loss + 0.001*loss.item() + else: + print("model exploded (corrupted by Inf or Nan)") + sys.exit(1) + + num_elapsed_samples += N + if args.rank == 0 and iter_num % args.print_interval == 0: + end_elapsed_time = time.time() + elapsed_time = end_elapsed_time - start_elapsed_time + + avg_samples_per_sec = num_elapsed_samples * args.N_gpu / elapsed_time + + print("Iteration: {:6d}, Loss function: {:5.3f}, Average Loss: {:.3f}, avg. samples / sec: {:.2f} lr: {:.6f}"\ + .format(iter_num, loss.item(), avg_loss, avg_samples_per_sec, new_lr), end="\n") + + last_printed_iter = iter_num + start_elapsed_time = time.time() + num_elapsed_samples = 0 + + with apex.amp.scale_loss(loss, optim) as scaled_loss: + scaled_loss.backward() + + if not args.profile_fake_optim: + optim.step() + + # Likely a decent skew here, let's take this opportunity to set the + # gradients to None. After DALI integration, playing with the + # placement of this is worth trying. + for p in ssd300.parameters(): + p.grad = None + + # Don't check every iteration due to cost of broadcast + if iter_num % 20 == 0: + finished = check_async_evals(args, evaluator, args.threshold) + + if finished: + return True + + iter_num += 1 + + if args.dali: + train_loader.reset() + log_end(key=constants.EPOCH_STOP, metadata={'epoch_num': epoch}) + + finished = check_async_evals_block(args, evaluator, args.threshold) + if finished: + return True + return False + +def main(): + + configure_logger(constants.SSD) + log_start(key=constants.INIT_START, log_all_ranks=True) + args = parse_args() + try: + from dltest import show_training_arguments + show_training_arguments(args) + except: + pass + # make sure the epoch lists are in sorted order + args.evaluation.sort() + args.lr_decay_epochs.sort() + + validate_arguments(args) + + torch.set_num_threads(1) + torch.backends.cudnn.benchmark = not args.profile_cudnn_get + + success = train300_mlperf_coco(args) + status = 'success' if success else 'aborted' + + # end timing here + log_end(key=constants.RUN_STOP, metadata={'status': status}) + +if __name__ == "__main__": + main() diff --git a/cv/detection/ssd/pytorch/base/dataloaders/util.py b/cv/detection/ssd/pytorch/utils.py similarity index 48% rename from cv/detection/ssd/pytorch/base/dataloaders/util.py rename to cv/detection/ssd/pytorch/utils.py index f65f5f981..6862a0687 100644 --- a/cv/detection/ssd/pytorch/base/dataloaders/util.py +++ b/cv/detection/ssd/pytorch/utils.py @@ -20,6 +20,45 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # +# ------------------------------------------------------------------------------ +# +# MIT License +# +# Copyright (c) 2017 Max deGroot, Ellis Brown +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# ------------------------------------------------------------------------------ +# +# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import torch import torchvision @@ -40,73 +79,159 @@ except ImportError: import json import gc import time -# import bz2 -# import pickle -from math import sqrt - -from box_coder import calc_iou_tensor, Encoder - - -class DefaultBoxes(object): - def __init__(self, fig_size, feat_size, steps, scales, aspect_ratios, \ - scale_xy=0.1, scale_wh=0.2): - - self.feat_size = feat_size - self.fig_size = fig_size - - self.scale_xy_ = scale_xy - self.scale_wh_ = scale_wh - - # According to https://github.com/weiliu89/caffe - # Calculation method slightly different from paper - self.steps = steps - self.scales = scales - - fk = fig_size / np.array(steps) - self.aspect_ratios = aspect_ratios - - self.default_boxes = [] - # size of feature and number of feature - for idx, sfeat in enumerate(self.feat_size): - - sk1 = scales[idx] / fig_size - sk2 = scales[idx + 1] / fig_size - sk3 = sqrt(sk1 * sk2) - all_sizes = [(sk1, sk1), (sk3, sk3)] - - for alpha in aspect_ratios[idx]: - w, h = sk1 * sqrt(alpha), sk1 / sqrt(alpha) - all_sizes.append((w, h)) - all_sizes.append((h, w)) - for w, h in all_sizes: - for i, j in itertools.product(range(sfeat), repeat=2): - cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx] - self.default_boxes.append((cx, cy, w, h)) - - self.dboxes = torch.tensor(self.default_boxes, dtype=torch.float) - self.dboxes.clamp_(min=0, max=1) - # For IoU calculation - self.dboxes_ltrb = self.dboxes.clone() - self.dboxes_ltrb[:, 0] = self.dboxes[:, 0] - 0.5 * self.dboxes[:, 2] - self.dboxes_ltrb[:, 1] = self.dboxes[:, 1] - 0.5 * self.dboxes[:, 3] - self.dboxes_ltrb[:, 2] = self.dboxes[:, 0] + 0.5 * self.dboxes[:, 2] - self.dboxes_ltrb[:, 3] = self.dboxes[:, 1] + 0.5 * self.dboxes[:, 3] +#import bz2 +import pickle +from math import sqrt, ceil, cos, sin, pi +from mlperf_logging.mllog import constants +from mlperf_logger import log_event + +from SSD import _C as C + +from fused_color_jitter import FusedColorJitter +from box_coder import Encoder + +# This function is from https://github.com/kuangliu/pytorch-ssd +def calc_iou_tensor(box1, box2): + """ Calculation of IoU based on two boxes tensor, + Reference to https://github.com/kuangliu/pytorch-ssd + input: + box1 (N, 4) + box2 (M, 4) + output: + IoU (N, M) + """ + N = box1.size(0) + M = box2.size(0) + + be1 = box1.unsqueeze(1).expand(-1, M, -1) + be2 = box2.unsqueeze(0).expand(N, -1, -1) + + # Left Top & Right Bottom + lt = torch.max(be1[:,:,:2], be2[:,:,:2]) + #mask1 = (be1[:,:, 0] < be2[:,:, 0]) ^ (be1[:,:, 1] < be2[:,:, 1]) + #mask1 = ~mask1 + rb = torch.min(be1[:,:,2:], be2[:,:,2:]) + #mask2 = (be1[:,:, 2] < be2[:,:, 2]) ^ (be1[:,:, 3] < be2[:,:, 3]) + #mask2 = ~mask2 + + delta = rb - lt + delta[delta < 0] = 0 + intersect = delta[:,:,0]*delta[:,:,1] + #*mask1.float()*mask2.float() + + delta1 = be1[:,:,2:] - be1[:,:,:2] + area1 = delta1[:,:,0]*delta1[:,:,1] + delta2 = be2[:,:,2:] - be2[:,:,:2] + area2 = delta2[:,:,0]*delta2[:,:,1] + + iou = intersect/(area1 + area2 - intersect) + return iou + +# This class is from https://github.com/chauhan-utk/ssd.DomainAdaptation +class SSDCropping(object): + """ Cropping for SSD, according to original paper + Choose between following 3 conditions: + 1. Preserve the original image + 2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9 + 3. Random crop + Reference to https://github.com/chauhan-utk/ssd.DomainAdaptation + """ + def __init__(self): - @property - def scale_xy(self): - return self.scale_xy_ + self.sample_options = ( + # Do nothing + None, + # min IoU, max IoU + (0.1, None), + (0.3, None), + (0.5, None), + (0.7, None), + (0.9, None), + # no IoU requirements + (None, None), + ) + # Implementation uses 1 iteration to find a possible candidate, this + # was shown to produce the same mAP as using more iterations. + self.num_cropping_iterations = 1 + log_event(key=constants.MAX_SAMPLES, + value=self.num_cropping_iterations) - @property - def scale_wh(self): - return self.scale_wh_ + def __call__(self, img, img_size, bboxes, labels): - def __call__(self, order="ltrb"): - if order == "ltrb": return self.dboxes_ltrb - if order == "xywh": return self.dboxes + # Ensure always return cropped image + while True: + mode = random.choice(self.sample_options) + if mode is None: + return img, img_size, bboxes, labels -# This function is from https://github.com/chauhan-utk/ssd.DomainAdaptation. -class SSDCropping(object): + htot, wtot = img_size + + min_iou, max_iou = mode + min_iou = float("-inf") if min_iou is None else min_iou + max_iou = float("+inf") if max_iou is None else max_iou + + # Implementation use 50 iteration to find possible candidate + for _ in range(self.num_cropping_iterations): + # suze of each sampled path in [0.1, 1] 0.3*0.3 approx. 0.1 + w = random.uniform(0.3 , 1.0) + h = random.uniform(0.3 , 1.0) + + if w/h < 0.5 or w/h > 2: + continue + + # left 0 ~ wtot - w, top 0 ~ htot - h + left = random.uniform(0, 1.0 - w) + top = random.uniform(0, 1.0 - h) + + right = left + w + bottom = top + h + + ious = calc_iou_tensor(bboxes, torch.tensor([[left, top, right, bottom]])) + + # tailor all the bboxes and return + if not ((ious > min_iou) & (ious < max_iou)).all(): + continue + + # discard any bboxes whose center not in the cropped image + xc = 0.5*(bboxes[:, 0] + bboxes[:, 2]) + yc = 0.5*(bboxes[:, 1] + bboxes[:, 3]) + + masks = (xc > left) & (xc < right) & (yc > top) & (yc < bottom) + + # if no such boxes, continue searching again + if not masks.any(): + continue + + bboxes[bboxes[:, 0] < left, 0] = left + bboxes[bboxes[:, 1] < top, 1] = top + bboxes[bboxes[:, 2] > right, 2] = right + bboxes[bboxes[:, 3] > bottom, 3] = bottom + + #print(left, top, right, bottom) + #print(labels, bboxes, masks) + bboxes = bboxes[masks, :] + labels = labels[masks] + + left_idx = int(left*wtot) + top_idx = int(top*htot) + right_idx = int(right*wtot) + bottom_idx = int(bottom*htot) + #print(left_idx,top_idx,right_idx,bottom_idx) + #img = img[:, top_idx:bottom_idx, left_idx:right_idx] + img = img.crop((left_idx, top_idx, right_idx, bottom_idx)) + + bboxes[:, 0] = (bboxes[:, 0] - left)/w + bboxes[:, 1] = (bboxes[:, 1] - top)/h + bboxes[:, 2] = (bboxes[:, 2] - left)/w + bboxes[:, 3] = (bboxes[:, 3] - top)/h + + htot = bottom_idx - top_idx + wtot = right_idx - left_idx + return img, (htot, wtot), bboxes, labels + + +class SSDCroppingNoDali(object): """ Cropping for SSD, according to original paper Choose between following 3 conditions: 1. Preserve the original image @@ -207,7 +332,7 @@ class SSDCropping(object): wtot = right_idx - left_idx return img, (htot, wtot), bboxes, labels - +# Don't need to cast to float, already there (from FusedColorJitter) class ToTensor(object): def __init__(self): pass @@ -215,10 +340,9 @@ class ToTensor(object): def __call__(self, img): img = torch.Tensor(np.array(img)) # Transform from HWC to CHW - img = img.permute(2, 0, 1).div(255) + img = img.permute(2, 0 ,1).div(255) return img - class RandomHorizontalFlip(object): def __init__(self, p=0.5): self.p = p @@ -229,7 +353,6 @@ class RandomHorizontalFlip(object): return image.transpose(Image.FLIP_LEFT_RIGHT), bboxes return image, bboxes - # Do data augumentation class SSDTransformer(object): """ SSD Data Augumentation, according to original paper @@ -239,39 +362,27 @@ class SSDTransformer(object): Flipping Jittering """ - def __init__(self, size=(300, 300), dboxes=None, dali=False, fast_nms=False, fast_cj=False, val=False, num_cropping_iterations=1): + def __init__(self, size = (300, 300), val=False): # define vgg16 mean self.size = size self.val = val - self.dali = dali - self.crop = SSDCropping(num_cropping_iterations=num_cropping_iterations) - if self.dali: - self.dboxes_ = None - self.encoder = None - else: - self.dboxes_ = dboxes # DefaultBoxes300() - self.encoder = Encoder(self.dboxes_, fast_nms) - - if fast_cj: - from fused_color_jitter import FusedColorJitter - self.img_trans = transforms.Compose([ - transforms.Resize(self.size), - FusedColorJitter(), - ToTensor(), - ]) - else: - self.img_trans = transforms.Compose([ - transforms.Resize(self.size), - transforms.ColorJitter(brightness=0.125, contrast=0.5, - saturation=0.5, hue=0.05 - ), - transforms.ToTensor() - ]) + + self.crop = SSDCropping() + self.img_trans = transforms.Compose([ + transforms.Resize(self.size), + #transforms.ColorJitter(brightness=0.125, contrast=0.5, + # saturation=0.5, hue=0.05 + #), + #transforms.ToTensor(), + FusedColorJitter(), + ToTensor(), + ]) self.hflip = RandomHorizontalFlip() # All Pytorch Tensor will be normalized # https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683 + normalization_mean = [0.485, 0.456, 0.406] normalization_std = [0.229, 0.224, 0.225] self.normalize = transforms.Normalize(mean=normalization_mean, @@ -280,7 +391,75 @@ class SSDTransformer(object): self.trans_val = transforms.Compose([ transforms.Resize(self.size), transforms.ToTensor(), - self.normalize]) + self.normalize,]) + + def __call__(self, img, img_size, bbox=None, label=None, max_num=200): + #img = torch.tensor(img) + if self.val: + bbox_out = torch.zeros(max_num, 4) + label_out = torch.zeros(max_num, dtype=torch.long) + bbox_out[:bbox.size(0), :] = bbox + label_out[:label.size(0)] = label + return self.trans_val(img), img_size, bbox_out, label_out + + # random crop + img, img_size, bbox, label = self.crop(img, img_size, bbox, label) + + # random horiz. flip + img, bbox = self.hflip(img, bbox) + + # [Resize, ColorJitter, ToTensor] + img = self.img_trans(img).contiguous() + + img = self.normalize(img) + + return img, img_size, bbox, label + + +class SSDTransformerNoDali(object): + """ SSD Data Augumentation, according to original paper + Composed by several steps: + Cropping + Resize + Flipping + Jittering + """ + + def __init__(self, dboxes, size=(300, 300), val=False, num_cropping_iterations=1): + # define vgg16 mean + self.size = size + self.val = val + + self.dboxes_ = dboxes # DefaultBoxes300() + self.encoder = Encoder(self.dboxes_) + + self.crop = SSDCroppingNoDali(num_cropping_iterations=num_cropping_iterations) + self.img_trans = transforms.Compose([ + transforms.Resize(self.size), + # transforms.Resize((300, 300)), + # transforms.RandomHorizontalFlip(), + transforms.ColorJitter(brightness=0.125, contrast=0.5, + saturation=0.5, hue=0.05 + ), + transforms.ToTensor() + # LightingNoice(), + ]) + self.hflip = RandomHorizontalFlip() + + # All Pytorch Tensor will be normalized + # https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683 + normalization_mean = [0.485, 0.456, 0.406] + normalization_std = [0.229, 0.224, 0.225] + self.normalize = transforms.Normalize(mean=normalization_mean, + std=normalization_std) + # self.normalize = transforms.Normalize(mean = [104.0, 117.0, 123.0], + # std = [1.0, 1.0, 1.0]) + + self.trans_val = transforms.Compose([ + transforms.Resize(self.size), + transforms.ToTensor(), + # ToTensor(), + self.normalize, ]) @property def dboxes(self): @@ -295,23 +474,19 @@ class SSDTransformer(object): label_out[:label.size(0)] = label return self.trans_val(img), img_size, bbox_out, label_out - # random crop + # print("before", img.size, bbox) img, img_size, bbox, label = self.crop(img, img_size, bbox, label) - - # random horiz. flip + # print("after", img.size, bbox) img, bbox = self.hflip(img, bbox) - # [Resize, ColorJitter, ToTensor] img = self.img_trans(img).contiguous() - + # img = img.contiguous().div(255) img = self.normalize(img) - if not self.dali: - bbox, label = self.encoder.encode(bbox, label) + bbox, label = self.encoder.encode(bbox, label) return img, img_size, bbox, label - # Implement a datareader for COCO dataset class COCODetection(data.Dataset): def __init__(self, img_folder, annotate_file, transform=None, data=None): @@ -330,14 +505,13 @@ class COCODetection(data.Dataset): self.data = json.load(fin) - if gc_old: - gc.enable() + if gc_old: gc.enable() self.images = {} self.label_map = {} self.label_info = {} - # print("Parsing COCO data...") + #print("Parsing COCO data...") start_time = time.time() # 0 stand for the background cnt = 0 @@ -351,8 +525,8 @@ class COCODetection(data.Dataset): for img in self.data["images"]: img_id = img["id"] img_name = img["file_name"] - img_size = (img["height"], img["width"]) - # print(img_name) + img_size = (img["height"],img["width"]) + #print(img_name) if img_id in self.images: raise Exception("dulpicated image record") self.images[img_id] = (img_name, img_size, []) @@ -366,28 +540,29 @@ class COCODetection(data.Dataset): for k, v in list(self.images.items()): if len(v[2]) == 0: - # print("empty image: {}".format(k)) + #print("empty image: {}".format(k)) self.images.pop(k) self.img_keys = list(self.images.keys()) self.transform = transform - # print("End parsing COCO data, total time {}".format(time.time()-start_time)) + #print("End parsing COCO data, total time {}".format(time.time()-start_time)) @property def labelnum(self): return len(self.label_info) - # @staticmethod - # def load(pklfile): - # # print("Loading from {}".format(pklfile)) - # with bz2.open(pklfile, "rb") as fin: - # ret = pickle.load(fin) - # return ret + @staticmethod + def load(pklfile): + #print("Loading from {}".format(pklfile)) + with bz2.open(pklfile, "rb") as fin: + ret = pickle.load(fin) + return ret + + def save(self, pklfile): + #print("Saving to {}".format(pklfile)) + with bz2.open(pklfile, "wb") as fout: + pickle.dump(self, fout) - # def save(self, pklfile): - # # print("Saving to {}".format(pklfile)) - # with bz2.open(pklfile, "wb") as fout: - # pickle.dump(self, fout) def __len__(self): return len(self.images) @@ -406,8 +581,8 @@ class COCODetection(data.Dataset): bbox_sizes = [] bbox_labels = [] - # for (xc, yc, w, h), bbox_label in img_data[2]: - for (l, t, w, h), bbox_label in img_data[2]: + #for (xc, yc, w, h), bbox_label in img_data[2]: + for (l,t,w,h), bbox_label in img_data[2]: r = l + w b = t + h #l, t, r, b = xc - 0.5*w, yc - 0.5*h, xc + 0.5*w, yc + 0.5*h @@ -428,4 +603,5 @@ class COCODetection(data.Dataset): else: pass # img = transforms.ToTensor()(img) - return img, img_id, (htot, wtot), bbox_sizes, bbox_labels \ No newline at end of file + return img, img_id, (htot, wtot), bbox_sizes, bbox_labels + diff --git a/cv/detection/ssd/pytorch/visualize.py b/cv/detection/ssd/pytorch/visualize.py new file mode 100644 index 000000000..29c40ec5d --- /dev/null +++ b/cv/detection/ssd/pytorch/visualize.py @@ -0,0 +1,174 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvidia.dali.pipeline import Pipeline +import nvidia.dali.ops as ops +import nvidia.dali.types as types + +import numpy as np +from time import time +import os +import random +import time +import io +import json + +import torch +from PIL import Image +from torchvision import transforms +from torch.utils.data import DataLoader + +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +from argparse import ArgumentParser +from utils import DefaultBoxes, Encoder, COCODetection +from utils import SSDTransformer +from ssd300 import SSD300 +from train import load_checkpoint, dboxes300_coco + +def parse_args(): + parser = ArgumentParser(description="Visualize models predictions on image") + parser.add_argument('--images', '-i', nargs='*', type=str, + help='path to jpg image') + parser.add_argument('--model', '-m', type=str, default='iter_240000.pt', + help='path to trained model') + parser.add_argument('--threshold', '-t', type=float, default=0.10, + help='threshold for predictions probabilities') + parser.add_argument('--annotations', '-a', type=str, + default='/coco/annotations/instances_val2017.json', + help='path to json with annotations') + return parser.parse_args() + +def print_image(image, model, encoder, inv_map, name_map, category_id_to_color, threshold): + # Open image for printing + im = Image.open(image) + W, H = im.size + + # Prepare tensor input for model + tmp = im.copy() + tmp = tmp.resize((300, 300)) + img = transforms.ToTensor()(tmp) + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + img = normalize(img).unsqueeze(dim = 0) + + # Find predictions + with torch.no_grad(): + ploc, plabel = model(img) + ploc, plabel = ploc.float(), plabel.float() + + ret = [] + for idx in range(ploc.shape[0]): + # ease-of-use for specific predictions + ploc_i = ploc[idx, :, :].unsqueeze(0) + plabel_i = plabel[idx, :, :].unsqueeze(0) + + try: + result = encoder.decode_batch(ploc_i, plabel_i, 0.50, 200)[0] + except: + print("No object detected in image {}".format(image)) + continue + + htot, wtot = (H, W) + loc, label, prob = [r.cpu().numpy() for r in result] + for loc_, label_, prob_ in zip(loc, label, prob): + ret.append([0, loc_[0]*wtot, \ + loc_[1]*htot, + (loc_[2] - loc_[0])*wtot, + (loc_[3] - loc_[1])*htot, + prob_, + inv_map[label_]]) + + ret = np.array(ret).astype(np.float32) + + # Choose bounding boxes for printing + bboxes = [] + for re in ret: + if re[5] > threshold: + bboxes.append(re) + + print("Bounding boxes detected in image {}:".format(image)) + print(bboxes) + + # Prepare image for plotting + img = transforms.ToTensor()(im) + img = img.permute(1, 2, 0) + H = img.shape[0] + W = img.shape[1] + fig,ax = plt.subplots(1) + ax.imshow(img) + + # Add bboxes with labels + used = set() + for bbox in bboxes: + if (bbox[6] in used): + rect = patches.Rectangle((bbox[1], bbox[2]), bbox[3], bbox[4], + edgecolor=category_id_to_color[bbox[6]], + linewidth=2, facecolor='none') + else: + rect = patches.Rectangle((bbox[1], bbox[2]), bbox[3], bbox[4], + label = name_map[bbox[6]], + edgecolor=category_id_to_color[bbox[6]], + linewidth=2, facecolor='none') + used.add(bbox[6]) + ax.add_patch(rect) + + # Show image + plt.legend(ncol=1, bbox_to_anchor=(1.04,1), loc="upper left") + plt.show() + +def main(): + # Parse arguments + args = parse_args() + + # Get categories names + with open(args.annotations,'r') as anno: + js = json.loads(anno.read()) + coco_names = js['categories'] + + # Prepare map of COCO labels to COCO names + name_map = {} + for name in coco_names: + name_map[name['id']] = name['name'] + + # Prepare map of SSD to COCO labels + deleted = [12, 26, 29, 30, 45, 66, 68, 69, 71, 83] + inv_map = {} + cnt = 0 + for i in range(1, 81): + while i + cnt in deleted: + cnt += 1 + inv_map[i] = i + cnt + + # Prepare colors for categories + category_id_to_color = dict([(cat_id, [random.uniform(0, 1) ,random.uniform(0, 1), random.uniform(0, 1)]) for cat_id in range(1, 91)]) + + # Set math plot lib size + plt.rcParams["figure.figsize"] = (12, 8) + + # Build and load SSD model + ssd300 = SSD300(81, backbone="resnet34", model_path=None, dilation=None) + load_checkpoint(ssd300, args.model) + ssd300.eval() + + # Prepare encoder + dboxes = dboxes300_coco() + encoder = Encoder(dboxes) + + # Print images + for image in args.images: + print_image(image, ssd300, encoder, inv_map, name_map, category_id_to_color, args.threshold) + +if __name__ == "__main__": + main() diff --git a/tests/executables/ssd/init_torch.sh b/tests/executables/ssd/init_torch.sh index 5065ae4b7..19211d91a 100644 --- a/tests/executables/ssd/init_torch.sh +++ b/tests/executables/ssd/init_torch.sh @@ -51,7 +51,7 @@ if [[ "$(uname -m)" == "aarch64" ]]; then source /opt/rh/gcc-toolset-11/enable fi -cd ../../research/cv/detection/ssd && bash ./clean_ssd.sh && bash ./build_ssd.sh && bash ./install_ssd.sh "$@"; check_status +cd ../../cv/detection/ssd/pytorch/ && bash ./clean_ssd.sh && bash ./build_ssd.sh && bash ./install_ssd.sh "$@"; check_status DATA_PATH_BBOX=../../../.. python3 prepare-json.py --keep-keys ${DATA_PATH}/annotations/instances_val2017.json ${DATA_PATH_BBOX}/bbox_only_instances_val2017.json "$@"; check_status diff --git a/tests/executables/ssd/train_ssd_amp_torch.sh b/tests/executables/ssd/train_ssd_amp_torch.sh index 6d348a988..719664e12 100644 --- a/tests/executables/ssd/train_ssd_amp_torch.sh +++ b/tests/executables/ssd/train_ssd_amp_torch.sh @@ -11,7 +11,7 @@ check_status() fi } -cd ../../research/cv/detection/ssd +cd ../../cv/detection/ssd/pytorch/ echo "python3 train.py --no-dali --dali-cache 0 --data=${COCO_PATH} \ --batch-size=${BATCH_SIZE} --warmup-factor=0 --warmup=650 --lr=2.92e-3 --threshold=0.08 --epochs 5 --eval-batch-size=160 \ -- Gitee From 66387b946415c0e914c5886f2f298fcc5f75b3c0 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 15:17:05 +0800 Subject: [PATCH 16/18] sync bert pytorch all --- .../bert_sample/pytorch/base/BaseDockerfile | 58 + .../bert_sample/pytorch/base/bind_pyt.py | 201 + .../pytorch/base/config/__init__.py | 4 + .../bert_sample/pytorch/base/config/_base.py | 189 + .../pytorch/base/config/config_manager.py | 176 + .../pytorch/base/config/mutable_params.py | 24 + .../pytorch/base/create_container.sh | 184 + .../pytorch/base/create_contaner.sh | 183 + .../pytorch/base/create_contaner_bi.sh | 184 + .../2048_shards_varlength.chk | 2048 ++++ .../4320_shards_varlength.chk | 4320 +++++++ .../data_preprocessing/chop_hdf5_files.py | 150 + .../chop_hdf5_files_to_varlength.py | 154 + .../pytorch/base/data_preprocessing/clean.sh | 86 + .../base/data_preprocessing/cleanup_file.py | 83 + .../convert_fixed2variable.py | 71 + .../convert_tf_checkpoint.py | 93 + .../create_pretraining_data.py | 455 + .../create_pretraining_data_wrapper.sh | 30 + .../base/data_preprocessing/do_gather.py | 95 + .../do_sentence_segmentation.py | 74 + .../pytorch/base/data_preprocessing/eval.md5 | 10000 ++++++++++++++++ .../data_preprocessing/eval_varlength.chk | 1 + .../base/data_preprocessing/hdf5_md5.py | 29 + .../parallel_create_hdf5.sh | 75 + .../data_preprocessing/pick_eval_samples.py | 83 + .../pick_eval_samples_varlength.py | 76 + .../base/data_preprocessing/prepare_data.sh | 167 + .../base/data_preprocessing/process_wiki.sh | 34 + .../data_preprocessing/seperate_test_set.py | 120 + .../base/data_preprocessing/tokenization.py | 413 + .../pytorch/base/dataloaders/__init__.py | 3 + .../pytorch/base/dataloaders/dataloader.py | 235 + .../pytorch/base/dataloaders/dataset.py | 159 + .../pytorch/base/model/__init__.py | 19 + .../pytorch/base/model/layers/__init__.py | 6 + .../pytorch/base/model/layers/activations.py | 82 + .../pytorch/base/model/layers/embeddings.py | 59 + .../pytorch/base/model/layers/layernorm.py | 36 + .../pytorch/base/model/layers/padding.py | 125 + .../pytorch/base/model/losses/__init__.py | 0 .../pytorch/base/model/models/__init__.py | 0 .../pytorch/base/model/models/modeling.py | 1394 +++ .../pytorch/base/optimizers/__init__.py | 1 + .../pytorch/base/optimizers/factory.py | 33 + .../pytorch/base/optimizers/lamb.py | 102 + .../bert_sample/pytorch/base/prepare.py | 288 + .../bert_sample/pytorch/base/requirements.txt | 3 + .../pytorch/base/run_pretraining.py | 173 + .../bert_sample/pytorch/base/run_training.sh | 39 + .../pytorch/base/run_with_docker.sh | 208 + .../pytorch/base/schedulers/__init__.py | 1 + .../pytorch/base/schedulers/base.py | 49 + .../pytorch/base/schedulers/factory.py | 33 + .../linear_warmup_poly_scheduler.py | 57 + .../schedulers/linear_warmup_scheduler.py | 33 + .../bert_sample/pytorch/base/setup.py | 98 + .../pytorch/base/train/__init__.py | 0 .../pytorch/base/train/evaluator.py | 99 + .../pytorch/base/train/event/__init__.py | 4 + .../pytorch/base/train/event/base.py | 65 + .../pytorch/base/train/event/base_adapter.py | 40 + .../pytorch/base/train/event/compose.py | 110 + .../pytorch/base/train/event/log.py | 155 + .../bert_sample/pytorch/base/train/trainer.py | 222 + .../pytorch/base/train/training_state.py | 73 + .../pytorch/base/utils/__init__.py | 17 + .../bert_sample/pytorch/base/utils/check.py | 94 + .../pytorch/base/utils/checkpoint.py | 77 + .../bert_sample/pytorch/base/utils/dist.py | 202 + .../bert_sample/pytorch/base/utils/logging.py | 245 + .../bert_sample/pytorch/base/utils/paths.py | 18 + .../pytorch/base/utils/tokenization.py | 428 + tests/executables/bert/init_torch.sh | 72 + .../train_bert_default_amp_dist_1x8_torch.sh | 41 + ...ain_bert_pretraining_amp_dist_1x8_torch.sh | 31 + 76 files changed, 25089 insertions(+) create mode 100644 nlp/language_model/bert_sample/pytorch/base/BaseDockerfile create mode 100644 nlp/language_model/bert_sample/pytorch/base/bind_pyt.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/config/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/config/_base.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/config/config_manager.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/config/mutable_params.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/create_container.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/create_contaner.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/create_contaner_bi.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/2048_shards_varlength.chk create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/4320_shards_varlength.chk create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files_to_varlength.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/clean.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/cleanup_file.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_fixed2variable.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_tf_checkpoint.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data_wrapper.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_gather.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_sentence_segmentation.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval.md5 create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval_varlength.chk create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/hdf5_md5.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/parallel_create_hdf5.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples_varlength.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/prepare_data.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/process_wiki.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/seperate_test_set.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/data_preprocessing/tokenization.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/dataloaders/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/dataloaders/dataloader.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/dataloaders/dataset.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/layers/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/layers/activations.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/layers/embeddings.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/layers/layernorm.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/layers/padding.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/losses/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/models/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/model/models/modeling.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/optimizers/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/optimizers/factory.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/optimizers/lamb.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/prepare.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/requirements.txt create mode 100644 nlp/language_model/bert_sample/pytorch/base/run_pretraining.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/run_training.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/run_with_docker.sh create mode 100644 nlp/language_model/bert_sample/pytorch/base/schedulers/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/schedulers/base.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/schedulers/factory.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_poly_scheduler.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_scheduler.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/setup.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/evaluator.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/event/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/event/base.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/event/base_adapter.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/event/compose.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/event/log.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/trainer.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/train/training_state.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/__init__.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/check.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/checkpoint.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/dist.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/logging.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/paths.py create mode 100644 nlp/language_model/bert_sample/pytorch/base/utils/tokenization.py create mode 100644 tests/executables/bert/init_torch.sh create mode 100644 tests/executables/bert/train_bert_default_amp_dist_1x8_torch.sh create mode 100644 tests/executables/bert/train_bert_pretraining_amp_dist_1x8_torch.sh diff --git a/nlp/language_model/bert_sample/pytorch/base/BaseDockerfile b/nlp/language_model/bert_sample/pytorch/base/BaseDockerfile new file mode 100644 index 000000000..6f036a1c0 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/BaseDockerfile @@ -0,0 +1,58 @@ +FROM ubuntu:18.04 + +ENV DEBIAN_FRONTEND=noninteractive +ENV PATH /root/miniconda/bin:$PATH + + +RUN apt-get update -y +RUN apt-get install -y \ + apt-utils \ + sudo \ + openssh-server \ + vim \ + git \ + curl \ + wget \ + tree \ + perl \ + kmod \ + make \ + pciutils \ + build-essential \ + python3.8-dev \ + python3-pip \ + libjpeg-dev \ + zlib1g-dev \ + unzip \ + cmake \ + bzip2 \ + cabextract \ + iputils-ping \ + pbzip2 \ + pv \ + numactl + +# Configure anaconda +RUN wget https://repo.anaconda.com/miniconda/Miniconda3-py38_4.10.3-Linux-x86_64.sh && \ + bash ./Miniconda3-py38_4.10.3-Linux-x86_64.sh -b -p /root/miniconda && \ +# eval "$(/root/miniconda/bin/conda shell.bash hook)" && \ + /root/miniconda/bin/conda clean -tipsy && \ + ln -s /root/miniconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \ + echo ". /root/miniconda/etc/profile.d/conda.sh" >> ~/.bashrc && \ + echo "conda activate base" >> ~/.bashrc && \ + conda config --set always_yes yes --set changeps1 no + + +RUN /bin/bash -c "apt-get install -y linux-headers-`uname -r`" + +# TODO: Remove pip source +RUN /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" + +COPY requirements.txt requirements.txt +RUN /bin/bash -c "pip3 install -r requirements.txt" + + +WORKDIR /workspace/baai-perf + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/bind_pyt.py b/nlp/language_model/bert_sample/pytorch/base/bind_pyt.py new file mode 100644 index 000000000..827787c6c --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/bind_pyt.py @@ -0,0 +1,201 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import subprocess +import os +import os.path as ospath +import socket +from argparse import ArgumentParser, REMAINDER +import random +import psutil + +MODEL_DIR = ospath.abspath( + ospath.join( + __file__, + "../../" + ) +) + +PROJ_DIR = MODEL_DIR + +MODEL = ospath.basename(MODEL_DIR) + + +def _parse_known_args(parser, *args, **kwargs): + return parser.parse_known_args(*args, **kwargs) + + +def parse_args(): + parser = ArgumentParser(description="PyTorch distributed training launch " + "helper utilty that will spawn up " + "multiple distributed processes") + + parser.add_argument("--node_rank", type=int, default=0, + help="The rank of the node for multi-node distributed " + "training") + parser.add_argument("--master_addr", default="127.0.0.1", type=str, + help="Master node (rank 0)'s address, should be either " + "the IP address or the hostname of node 0, for " + "single node multi-proc training, the " + "--master_addr can simply be 127.0.0.1") + parser.add_argument("--master_port", default=random.randint(10000,65534), type=int, + help="Master node (rank 0)'s free port that needs to " + "be used for communciation during distributed " + "training") + parser.add_argument('--no_hyperthreads', action='store_true', + help='Flag to disable binding to hyperthreads') + parser.add_argument('--no_membind', action='store_true', + help='Flag to disable memory binding') + + # non-optional arguments for binding + parser.add_argument("--nsockets_per_node", type=int, required=True, + help="Number of CPU sockets on a node") + parser.add_argument("--ncores_per_socket", type=int, required=True, + help="Number of CPU cores per socket") + + parser.add_argument("--training_script", type=str, required=True, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script") + + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--name", type=str, required=True) + + args, training_script_args = _parse_known_args(parser) + args.training_script_args = training_script_args + + return args + + +def get_cuda_visible_devices(gpus=1): + if "CUDA_VISIBLE_DEVICES" in os.environ: + return os.environ['CUDA_VISIBLE_DEVICES'] + return ','.join([str(gpu_id) for gpu_id in range(gpus)]) + +def is_port_in_use(port): + for conn in psutil.net_connections(): + if conn.laddr.port == port: + return True + + return False + +def get_process_info_by_port(port): + for conn in psutil.net_connections(kind='inet'): + if conn.laddr.port == port: + process = psutil.Process(conn.pid) + return { + "pid": conn.pid, + "name": process.name(), + "cmdline": process.cmdline(), + } + return None + +def main(): + args = parse_args() + config_full_name = f"config_{args.config}.py" + config_path = ospath.join(PROJ_DIR, args.name, "config", config_full_name) + + _, args.nnodes, args.nproc_per_node = args.config.split("x") + + args.nnodes = int(args.nnodes) + args.nproc_per_node = int(args.nproc_per_node) + + # variables for numactrl binding + + NSOCKETS = args.nsockets_per_node + NGPUS_PER_SOCKET = (args.nproc_per_node // args.nsockets_per_node) + (1 if (args.nproc_per_node % args.nsockets_per_node) else 0) + NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET + + # world size in terms of number of processes + dist_world_size = args.nproc_per_node * args.nnodes + + if is_port_in_use(args.master_port): + process_info = get_process_info_by_port(args.master_port) + if process_info: + print(f"端口:{args.master_port} 被进程 {process_info['name']} 占用") + print(f"进程ID: {process_info['pid']}") + print(f"进程命令行: {' '.join(process_info['cmdline'])}") + else: + print(f"端口:{args.master_port} 被占用,请先关闭占用此端口进程 !") + return + else: + print(f"master port: {args.master_port} is ok") + + # set PyTorch distributed related environmental variables + current_env = os.environ.copy() + current_env["MASTER_ADDR"] = args.master_addr + current_env["MASTER_PORT"] = str(args.master_port) + current_env["WORLD_SIZE"] = str(dist_world_size) + current_env["NODE_RANK"] = str(args.node_rank) + current_env["CUDA_VISIBLE_DEVICES"] = get_cuda_visible_devices(args.nproc_per_node) + + print(args.master_addr) + print(args.master_port) + + processes = [] + + for local_rank in range(0, args.nproc_per_node): + # each process's rank + dist_rank = args.nproc_per_node * args.node_rank + local_rank + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + # form numactrl binding command + cpu_ranges = [local_rank * NCORES_PER_GPU, + (local_rank + 1) * NCORES_PER_GPU - 1, + local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS), + (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1] + + numactlargs = [] + if args.no_hyperthreads: + numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ] + else: + numactlargs += [ "--physcpubind={}-{},{}-{}".format(*cpu_ranges) ] + + if not args.no_membind: + memnode = local_rank // NGPUS_PER_SOCKET + numactlargs += [ "--membind={}".format(memnode) ] + + # spawn the processes + cmd = [ "/usr/bin/numactl" ] \ + + numactlargs \ + + [ sys.executable, + "-u", + args.training_script, + "--local_rank={}".format(local_rank) + ] \ + + args.training_script_args + [f"{config_path}"] + + print("=" * 80) + print("= numactlargs_flag") + print(cmd) + print("=" * 80) + process = subprocess.Popen(cmd, env=current_env) + processes.append(process) + + proc_status = [] + + for process in processes: + process.wait() + proc_status.append(process.returncode != 0) + + exit(all(proc_status)) + + +if __name__ == "__main__": + main() + + diff --git a/nlp/language_model/bert_sample/pytorch/base/config/__init__.py b/nlp/language_model/bert_sample/pytorch/base/config/__init__.py new file mode 100644 index 000000000..258fb17d7 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/config/__init__.py @@ -0,0 +1,4 @@ +from ._base import * + +from .config_manager import activate_config_env + diff --git a/nlp/language_model/bert_sample/pytorch/base/config/_base.py b/nlp/language_model/bert_sample/pytorch/base/config/_base.py new file mode 100644 index 000000000..cecb8d69e --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/config/_base.py @@ -0,0 +1,189 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from typing import ClassVar +from train.event.base import BaseTrainingEventInterface + +# The train dir. Should contain train_dir, eval_dir, init_checkpoint, bert_config_path for the task. +data_dir: str = None + +# The train dir. Should contain .hdf5 files for the task. +train_dir: str = None + +# Bert pre-trained model selected in the list: +# bert-base-uncased, bert-large-uncased, bert-base-cased, +# bert-base-multilingual, bert-base-chinese. +bert_model: str = "bert-large-uncased" + +# The output directory where the model checkpoints will be written. +output_dir: str = None + +# The eval data dir. Should contain .hdf5 files for the task. +eval_dir: str = None + +# Sample to begin performing eval. +eval_iter_start_samples: int = 150000 + +# If set to -1, disable eval, else evaluate every eval_iter_samples during training +eval_iter_samples: int = 150000 + +# number of eval examples to run eval on +num_eval_examples: int = 10000 + +# whether to cache evaluation data on GPU +cache_eval_data: bool = False + +# The initial checkpoint to start training from. +init_checkpoint: str = None + +# The initial TF checkpoint to start training from. +init_tf_checkpoint: str = None + +# Whether to verify init checkpoint. +verify_checkpoint: bool = True + +# The maximum total input sequence length after WordPiece tokenization. +# Sequences longer than this will be truncated, and sequences shorter +# than this will be padded. +max_seq_length: int = 512 + +# The maximum total of masked tokens in input sequence +max_predictions_per_seq: int = 76 + +# Total batch size for training. +train_batch_size: int = 18 + +# Total batch size for training. +eval_batch_size: int = 128 + +# The initial learning rate for LAMB. +learning_rate: float = 4e-05 + +# weight decay rate for LAMB. +weight_decay_rate: float = 0.01 + +# LAMB beta1. +opt_lamb_beta_1: float = 0.9 + +# LAMB beta2. +opt_lamb_beta_2: float = 0.999 + +# Total number of training steps to perform. +max_steps: int = 1536 + +# Total number of training samples to run. +max_samples_termination: float = 14000000 + +# Proportion of optimizer update steps to perform linear learning rate warmup for. +# Typically 1/8th of steps for Phase2 +warmup_proportion: float = 0.01 + +# Number of optimizer update steps to perform linear learning rate warmup for. +# Typically 1/8th of steps for Phase2 +warmup_steps: int = 0 + +# Starting step for warmup. +start_warmup_step: int = 0 + +# local_rank for distributed training on gpus +local_rank: int = -1 + +# Communication backend for distributed training on gpus +dist_backend: str = "nccl" + +# random seed for initialization +seed: int = 42 + +# Number of updates steps to accumualte before performing a backward/update pass. +gradient_accumulation_steps: int = 1 + +# Whether to use 16-bit float precision instead of 32-bit +fp16: bool = False + +# Loss scaling, positive power of 2 values can improve fp16 convergence. +loss_scale: float = 0.0 + +# frequency of logging loss. If not positive, no logging is provided for training loss +log_freq: int = 1 + +# Whether to use gradient checkpointing +checkpoint_activations: bool = False + +# Whether to resume training from checkpoint. +# If set, precedes init_checkpoint/init_tf_checkpoint +resume_from_checkpoint: bool = False + +# The initial checkpoint to start continue training from. +resume_init_checkpoint: str = None + +# Number of checkpoints to keep (rolling basis). +keep_n_most_recent_checkpoints: int = 20 + +# Number of update steps until a model checkpoint is saved to disk. +num_samples_per_checkpoint: int = 500000 + +# Number of update steps until model checkpoints start saving to disk. +min_samples_to_start_checkpoints: int = 3000000 + +# Whether to save checkpoints +save_checkpoint: bool = False + +# Whether to run training. +do_train: bool = False + +# Whether to run with unpadding. +exchange_padding: bool = False + +# Whether to disable fusion of attention mask to softmax and dropout. +enable_fuse_dropout: bool = False + +# Whether to disable fusion of the attention mask to softmax. +disable_fuse_mask: bool = False + +# Whether to run with optimizations. +fused_gelu_bias: bool = False + +# Whether to run with optimizations. +fused_dropout_add: bool = False + +# Whether to run with optimizations. +dense_seq_output: bool = False + +# Whether to read local rank from ENVVAR +use_env: bool = False + +# Path bert_config.json is located in +bert_config_path: str = None + +# Stop training after reaching this Masked-LM accuracy +target_mlm_accuracy: float = 0.720 + +# Average accuracy over this amount of batches before performing a stopping criterion test +train_mlm_accuracy_window_size: int = 0 + +# Number of epochs to plan seeds for. Same set across all workers. +# num_epochs_to_generate_seeds_for: int = 2 +num_epochs_to_generate_seeds_for: int = 3 + +# Enable DDP. +use_ddp: bool = False + +# Turn ON gradient_as_bucket_view optimization in native DDP. +use_gradient_as_bucket_view: bool = False + +# A object to provide some core components in training +training_event: ClassVar[BaseTrainingEventInterface] = None + +# device +device: str = None +n_gpu: int = 1 + +eval_interval_samples: int = 0 +eval_steps = 1000 diff --git a/nlp/language_model/bert_sample/pytorch/base/config/config_manager.py b/nlp/language_model/bert_sample/pytorch/base/config/config_manager.py new file mode 100644 index 000000000..69520ad43 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/config/config_manager.py @@ -0,0 +1,176 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + + +import copy +import importlib +import inspect +import os +import sys +from argparse import ArgumentParser +from typing import Iterable, Mapping + +import config as global_config +from . import _base as base_config +from .mutable_params import mutable_params + +mutable_params = copy.copy(mutable_params) +immutable_params = set(global_config.__dict__.keys()) - set(mutable_params) + + +def get_config(config_path: str): + if os.path.exists(config_path): + abs_path = config_path + sys.path.append(os.path.dirname(abs_path)) + config_path = os.path.basename(config_path).replace(".py", "") + try: + module = importlib.import_module(config_path) + except Exception as ex: + sys.path.pop(-1) + raise ex + sys.path.pop(-1) + else: + raise FileNotFoundError("Not found config:", config_path) + + return module + + +def get_annotations(other_modules: list=None): + annotations = dict() + + if "__annotations__" in base_config.__dict__: + annotations.update(base_config.__dict__["__annotations__"]) + + if other_modules is not None: + for mod in other_modules: + if isinstance(mod, str): + mod = get_config(mod) + if "__annotations__" in mod.__dict__: + annotations.update(mod.__dict__["__annotations__"]) + + return annotations + + +def is_property(name: str, value): + status = [ + not name.startswith('__'), + not callable(value), + not inspect.isclass(value), + not inspect.ismodule(value), + not inspect.ismethod(value), + not inspect.isfunction(value), + not inspect.isbuiltin(value), + ] + + return all(status) + + +def get_properties_from_config(config): + if not isinstance(config, Mapping): + config = config.__dict__ + properties = dict() + for name, value in config.items(): + if is_property(name, value): + properties[name] = value + + return properties + + +def add_to_argparser(config: dict, parser: ArgumentParser, other_modules: list=None): + annotations = get_annotations(other_modules) + + def get_property_type(name, value): + if value is not None: + return type(value) + if name in annotations: + return annotations[name] + return str + + def add_args(parser, name, value, prefix=''): + dtype = get_property_type(prefix + name, value) + + if dtype == str: + parser.add_argument('--' + prefix + name, type=str, default=None) + elif dtype == int: + parser.add_argument('--' + prefix + name, type=int, default=None) + elif dtype == float: + parser.add_argument('--' + prefix + name, type=float, default=None) + elif dtype == bool: + parser.add_argument('--' + prefix + name, action=f"store_{str(not value).lower()}", default=None) + elif isinstance(value, Mapping): + for k, v in value.items(): + add_args(parser, k, v, prefix=prefix + name + ".") + elif isinstance(value, Iterable) and not isinstance(value, Mapping): + parser.add_argument('--' + prefix + name, type=type(value[0]), nargs='+', default=None) + # else: + # print(f'WARN: Cannot parse key {prefix + name} of type {type(value)}.') + + for name, value in config.items(): + if not is_property(name, value): + continue + add_args(parser, name, value) + + +def _merge_dict_to_config(src: dict, dist: dict, ignore_none=True): + for arg, value in src.items(): + if ignore_none and value is None: + continue + dist[arg] = value + + +def parse_from_args(config: dict, parser=None, other_modules: list=None, with_config_env_name: bool=False): + if parser is None: + parser = ArgumentParser() + + add_to_argparser(config, parser, other_modules) + if with_config_env_name: + parser.add_argument("config", type=str, help="Config name") + + args = parser.parse_args() + return args + + +def activate_config_env(name=None, parser=None, parse_args=True, with_config_env_name: bool=False): + global_config_copy_ = copy.copy(global_config.__dict__) + + if parse_args: + args_dict = dict() + for mutable_param in mutable_params: + args_dict[mutable_param] = global_config_copy_[mutable_param] + args = parse_from_args(args_dict, parser, with_config_env_name=with_config_env_name) + del args_dict + if name is None and with_config_env_name: + name = args.config + + if name is None: + raise RuntimeError("Argument `name` must be given.") + + external_module_params = copy.copy(get_config(name).__dict__) + for immutable_param in immutable_params: + if immutable_param in external_module_params: + external_module_params.pop(immutable_param) + + _merge_dict_to_config(global_config_copy_, global_config.__dict__) + _merge_dict_to_config(external_module_params, global_config.__dict__) + if parse_args: + _merge_dict_to_config(args.__dict__, global_config.__dict__) + + +def print_config(config=None): + if config is None: + config = global_config + properties = get_properties_from_config(config) + config_fields = [] + for name, value in properties.items(): + config_fields.append(f"{name}={value}") + + config_fields = ", ".join(config_fields) + config_str = f"Config({config_fields})" + print(config_str) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/config/mutable_params.py b/nlp/language_model/bert_sample/pytorch/base/config/mutable_params.py new file mode 100644 index 000000000..517668dea --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/config/mutable_params.py @@ -0,0 +1,24 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +mutable_params = [ + "train_batch_size", "eval_batch_size", "learning_rate", "weight_decay_rate", "opt_lamb_beta_1", + "opt_lamb_beta_2", "max_steps", "max_samples_termination", "warmup_proportion", "warmup_steps", + "start_warmup_step", "dist_backend", "seed", "gradient_accumulation_steps", "fp16", + "loss_scale", "exchange_padding", "enable_fuse_dropout", "disable_fuse_mask", "fused_gelu_bias", + "fused_dropout_add", "dense_seq_output", "cache_eval_data", "training_event","output_dir","save_checkpoint","eval_steps","init_checkpoint","target_mlm_accuracy" +] + +mutable_params += [ + "local_rank", + "do_train", + "data_dir", + "log_freq" +] \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/create_container.sh b/nlp/language_model/bert_sample/pytorch/base/create_container.sh new file mode 100644 index 000000000..23e41bd2c --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/create_container.sh @@ -0,0 +1,184 @@ +# ================================================= +# Constants +# ================================================= + +MODEL="bert" +export MODEL +DOCKER_IMAGE="perf:${MODEL}" +NEXP=1 + +# TODO: Add to Dockerfile +WORK_DIR="/workspace/baai-perf" +MODEL_DIR="${WORK_DIR}/benchmarks/${MODEL}/pytorch" + +CURRENT_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_DIR}/../../../" +BUILD_EXTENSION_DIR="${CURRENT_DIR}/build" +BUILD_EXTENSION_PACKAGE_NAME="ext_ops" + +BASE_DOCKERFILE_PATH="${CURRENT_DIR}/BaseDockerfile" +HOST_DOCKERFILE_PATH="${CURRENT_DIR}/Dockerfile" + +SOURCE_DATA_DIR="" +MAP_DATA_DIR="/mnt/dataset/perf/${MODEL}" +SUBMITTER="default" +CONFIG="" + +: "${CLEAR_CACHES:=1}" +SHM_SIZE="32g" + + +# ================================================= +# Parse arguments +# ================================================= + +i=2 +TRAINING_SCRIPT_ARGS="$@" +for arg in "$@" +do + if [[ $arg =~ "--data_dir" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SOURCE_DATA_DIR=${kv[1]} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/$arg/"--data_dir ${MAP_DATA_DIR}"} + else + SOURCE_DATA_DIR=${!i} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/"--data_dir ${!i}"/"--data_dir ${MAP_DATA_DIR}"} + fi + + elif [[ $arg =~ "--name" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SUBMITTER=${kv[1]} + else + SUBMITTER=${!i} + fi + + elif [[ $arg =~ "--config" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + CONFIG=${kv[1]} + else + CONFIG=${!i} + fi + fi + + let i++ +done + + +# ================================================= +# Check arguments +# ================================================= + +if [[ "${SOURCE_DATA_DIR}" == "" ]]; then + echo "ERROR: data_dir is not given, please set --data_dir " + exit 1 +fi + +if [[ "${CONFIG}" == "" ]]; then + echo "ERROR: config is not given, please set --config " + exit 1 +fi + +CONTAINER_SUBMITTER_DIR="${WORK_DIR}/${SUBMITTER}" +HOST_SUBMITTER_DIR="${PROJ_DIR}/${SUBMITTER}" + +CONTAINER_ENVIRONMENT_VARIABLES_PATH=${CONTAINER_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh +HOST_ENVIRONMENT_VARIABLES_PATH="${HOST_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh" + +HOST_SUBMITTER_DOCKERFILE="${PROJ_DIR}/${SUBMITTER}/${MODEL}/config/Dockerfile" +CONTAINER_NAME="bert_ckpt" + +if [ ! -f "${HOST_ENVIRONMENT_VARIABLES_PATH}" ]; then + touch "${HOST_ENVIRONMENT_VARIABLES_PATH}" +fi + +source ${HOST_ENVIRONMENT_VARIABLES_PATH} + +RESULTS_DIR="${PROJ_DIR}/${SUBMITTER}/${MODEL}/results" +LOG_FILE_BASE="${RESULTS_DIR}/config_${CONFIG}_experiment" + +echo "======================================" +echo "Arguments" +echo "---------" + +echo "MODEL = ${MODEL}" +echo "CONTAINER_NAME = ${CONTAINER_NAME}" +echo "DOCKER_IMAGE = ${DOCKER_IMAGE}" +echo "MODEL_DIR = ${MODEL_DIR}" +echo "SUBMITTER = ${SUBMITTER}" +echo "CONTAINER_SUBMITTER_DIR = ${CONTAINER_SUBMITTER_DIR}" +echo "HOST_SUBMITTER_DOCKERFILE = ${HOST_SUBMITTER_DOCKERFILE}" +echo "CONFIG = ${CONFIG}" +echo "CONTAINER_MOUNTS = ${CONTAINER_MOUNTS}" +echo "TRAINING_SCRIPT_ARGS = ${TRAINING_SCRIPT_ARGS[*]}" +echo "CURRENT_DIR = ${CURRENT_DIR}" +echo "CONTAINER_ENVIRONMENT_VARIABLES_PATH = ${CONTAINER_ENVIRONMENT_VARIABLES_PATH}" +echo "RESULTS_DIR = ${RESULTS_DIR}" +echo "LOG_FILE_BASE = ${LOG_FILE_BASE}" +echo "SHM_SIZE = ${SHM_SIZE}" +echo "======================================" + + +# ================================================= +# Training +# ================================================= + +# Cleanup container +# cleanup_docker() { +# docker container rm -f "${CONTAINER_NAME}" || true +# } +# cleanup_docker +# trap 'set -eux; cleanup_docker' EXIT + +# Clean built extension +if [ -d "${BUILD_EXTENSION_DIR}" ]; then + echo "WARN: Delete built extension" + rm -rf "${BUILD_EXTENSION_DIR}" + rm -rf ${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so + echo "extension file: "${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so"" +fi + + +# Build image +if [ -f "${HOST_DOCKERFILE_PATH}" ]; then + echo "WARN: Remove previous Dockerfile" + rm -f "${HOST_DOCKERFILE_PATH}" +fi + +echo "WARN: cp BaseDockerfile to Dockerfile" +cp "${BASE_DOCKERFILE_PATH}" "${HOST_DOCKERFILE_PATH}" + +if [ -f "${HOST_SUBMITTER_DOCKERFILE}" ]; then + echo "WARN: Found submitter's Dockerfile, merging submitter's Dockerfile to Dockerfile" + cat "${HOST_SUBMITTER_DOCKERFILE}" >> "${HOST_DOCKERFILE_PATH}" +fi + +docker build -t ${DOCKER_IMAGE} ./ + +# Setup container by Dockerfile +docker run --init --detach \ + --net=host --uts=host --ipc=host --security-opt=seccomp=unconfined \ + --privileged=true \ + --ulimit=stack=67108864 --ulimit=memlock=-1 \ + -w ${MODEL_DIR} \ + --shm-size="${SHM_SIZE}" \ + --volume ${SOURCE_DATA_DIR}:${MAP_DATA_DIR} \ + --volume ${PROJ_DIR}:${WORK_DIR} \ + --name="${CONTAINER_NAME}" ${CONTAINER_MOUNTS} \ + "${DOCKER_IMAGE}" sleep infinity + +# make sure container has time to finish initialization +# TODO: Uncomment +#sleep 30 +docker exec -it "${CONTAINER_NAME}" true + +mkdir -p ${RESULTS_DIR} +docker exec -it "${CONTAINER_NAME}" sh -c "chmod 777 run_training.sh" + +# TODO: Remove pip source +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" + +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};python3 prepare.py --name ${SUBMITTER} --data_dir ${MAP_DATA_DIR}" + diff --git a/nlp/language_model/bert_sample/pytorch/base/create_contaner.sh b/nlp/language_model/bert_sample/pytorch/base/create_contaner.sh new file mode 100644 index 000000000..b6b97a037 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/create_contaner.sh @@ -0,0 +1,183 @@ +# ================================================= +# Constants +# ================================================= + +MODEL="bert" +export MODEL +DOCKER_IMAGE="perf:${MODEL}" +NEXP=5 + +# TODO: Add to Dockerfile +WORK_DIR="/workspace/baai-perf" +MODEL_DIR="${WORK_DIR}/benchmarks/${MODEL}/pytorch" + +CURRENT_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_DIR}/../../../" +BUILD_EXTENSION_DIR="${CURRENT_DIR}/build" +BUILD_EXTENSION_PACKAGE_NAME="ext_ops" + +BASE_DOCKERFILE_PATH="${CURRENT_DIR}/BaseDockerfile" +HOST_DOCKERFILE_PATH="${CURRENT_DIR}/Dockerfile" + +SOURCE_DATA_DIR="" +MAP_DATA_DIR="/mnt/dataset/perf/${MODEL}" +SUBMITTER="iluvatar" +CONFIG="" + +: "${CLEAR_CACHES:=1}" +SHM_SIZE="32g" + + +# ================================================= +# Parse arguments +# ================================================= + +i=2 +TRAINING_SCRIPT_ARGS="$@" +for arg in "$@" +do + if [[ $arg =~ "--data_dir" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SOURCE_DATA_DIR=${kv[1]} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/$arg/"--data_dir ${MAP_DATA_DIR}"} + else + SOURCE_DATA_DIR=${!i} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/"--data_dir ${!i}"/"--data_dir ${MAP_DATA_DIR}"} + fi + + elif [[ $arg =~ "--name" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SUBMITTER=${kv[1]} + else + SUBMITTER=${!i} + fi + + elif [[ $arg =~ "--config" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + CONFIG=${kv[1]} + else + CONFIG=${!i} + fi + fi + + let i++ +done + + +# ================================================= +# Check arguments +# ================================================= + +if [[ "${SOURCE_DATA_DIR}" == "" ]]; then + echo "ERROR: data_dir is not given, please set --data_dir " + exit 1 +fi + +if [[ "${CONFIG}" == "" ]]; then + echo "ERROR: config is not given, please set --config " + exit 1 +fi + +CONTAINER_SUBMITTER_DIR="${WORK_DIR}/${SUBMITTER}" +HOST_SUBMITTER_DIR="${PROJ_DIR}/${SUBMITTER}" + +CONTAINER_ENVIRONMENT_VARIABLES_PATH=${CONTAINER_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh +HOST_ENVIRONMENT_VARIABLES_PATH="${HOST_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh" + +HOST_SUBMITTER_DOCKERFILE="${PROJ_DIR}/${SUBMITTER}/${MODEL}/config/Dockerfile" +CONTAINER_NAME="bert_test" + +if [ ! -f "${HOST_ENVIRONMENT_VARIABLES_PATH}" ]; then + touch "${HOST_ENVIRONMENT_VARIABLES_PATH}" +fi + +source ${HOST_ENVIRONMENT_VARIABLES_PATH} + +RESULTS_DIR="${PROJ_DIR}/${SUBMITTER}/${MODEL}/results" +LOG_FILE_BASE="${RESULTS_DIR}/config_${CONFIG}_experiment" + +echo "======================================" +echo "Arguments" +echo "---------" + +echo "MODEL = ${MODEL}" +echo "CONTAINER_NAME = ${CONTAINER_NAME}" +echo "DOCKER_IMAGE = ${DOCKER_IMAGE}" +echo "MODEL_DIR = ${MODEL_DIR}" +echo "SUBMITTER = ${SUBMITTER}" +echo "CONTAINER_SUBMITTER_DIR = ${CONTAINER_SUBMITTER_DIR}" +echo "HOST_SUBMITTER_DOCKERFILE = ${HOST_SUBMITTER_DOCKERFILE}" +echo "CONFIG = ${CONFIG}" +echo "CONTAINER_MOUNTS = ${CONTAINER_MOUNTS}" +echo "TRAINING_SCRIPT_ARGS = ${TRAINING_SCRIPT_ARGS[*]}" +echo "CURRENT_DIR = ${CURRENT_DIR}" +echo "CONTAINER_ENVIRONMENT_VARIABLES_PATH = ${CONTAINER_ENVIRONMENT_VARIABLES_PATH}" +echo "RESULTS_DIR = ${RESULTS_DIR}" +echo "LOG_FILE_BASE = ${LOG_FILE_BASE}" +echo "SHM_SIZE = ${SHM_SIZE}" +echo "======================================" + + +# ================================================= +# Training +# ================================================= + +# Cleanup container +# cleanup_docker() { +# docker container rm -f "${CONTAINER_NAME}" || true +# } +# cleanup_docker +# trap 'set -eux; cleanup_docker' EXIT + +# Clean built extension +if [ -d "${BUILD_EXTENSION_DIR}" ]; then + echo "WARN: Delete built extension" + rm -rf "${BUILD_EXTENSION_DIR}" + rm -rf ${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so + echo "extension file: "${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so"" +fi + + +# Build image +if [ -f "${HOST_DOCKERFILE_PATH}" ]; then + echo "WARN: Remove previous Dockerfile" + rm -f "${HOST_DOCKERFILE_PATH}" +fi + +echo "WARN: cp BaseDockerfile to Dockerfile" +cp "${BASE_DOCKERFILE_PATH}" "${HOST_DOCKERFILE_PATH}" + +if [ -f "${HOST_SUBMITTER_DOCKERFILE}" ]; then + echo "WARN: Found submitter's Dockerfile, merging submitter's Dockerfile to Dockerfile" + cat "${HOST_SUBMITTER_DOCKERFILE}" >> "${HOST_DOCKERFILE_PATH}" +fi + +docker build -t ${DOCKER_IMAGE} ./ + +# Setup container by Dockerfile +docker run -itd \ +--gpus all \ + --net=host --uts=host --ipc=host \ + --privileged \ + -w ${MODEL_DIR} \ + --shm-size="${SHM_SIZE}" \ + --volume ${SOURCE_DATA_DIR}:${MAP_DATA_DIR} \ + --volume ${PROJ_DIR}:${WORK_DIR} \ + --name="${CONTAINER_NAME}" ${CONTAINER_MOUNTS} \ + "${DOCKER_IMAGE}" + +# make sure container has time to finish initialization +# TODO: Uncomment +#sleep 30 +docker exec -it "${CONTAINER_NAME}" true + +mkdir -p ${RESULTS_DIR} +docker exec -it "${CONTAINER_NAME}" sh -c "chmod 777 run_training.sh" + +# TODO: Remove pip source +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" + +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};python3 prepare.py --name ${SUBMITTER} --data_dir ${MAP_DATA_DIR}" \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/create_contaner_bi.sh b/nlp/language_model/bert_sample/pytorch/base/create_contaner_bi.sh new file mode 100644 index 000000000..0c736a36e --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/create_contaner_bi.sh @@ -0,0 +1,184 @@ +# ================================================= +# Constants +# ================================================= + +source ../../../iluvatar/bert/config/environment_variables.sh + +MODEL="bert" +export MODEL +DOCKER_IMAGE="perf:${MODEL}" +NEXP=5 + +# TODO: Add to Dockerfile +WORK_DIR="/workspace/baai-perf" +MODEL_DIR="${WORK_DIR}/benchmarks/${MODEL}/pytorch" + +CURRENT_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_DIR}/../../../" +BUILD_EXTENSION_DIR="${CURRENT_DIR}/build" +BUILD_EXTENSION_PACKAGE_NAME="ext_ops" + +BASE_DOCKERFILE_PATH="${CURRENT_DIR}/BaseDockerfile" +HOST_DOCKERFILE_PATH="${CURRENT_DIR}/Dockerfile" + +SOURCE_DATA_DIR="" +MAP_DATA_DIR="/mnt/dataset/perf/${MODEL}" +SUBMITTER="iluvatar" +CONFIG="" + +: "${CLEAR_CACHES:=1}" +SHM_SIZE="32g" + + +# ================================================= +# Parse arguments +# ================================================= + +i=2 +TRAINING_SCRIPT_ARGS="$@" +for arg in "$@" +do + if [[ $arg =~ "--data_dir" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SOURCE_DATA_DIR=${kv[1]} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/$arg/"--data_dir ${MAP_DATA_DIR}"} + else + SOURCE_DATA_DIR=${!i} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/"--data_dir ${!i}"/"--data_dir ${MAP_DATA_DIR}"} + fi + + elif [[ $arg =~ "--name" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SUBMITTER=${kv[1]} + else + SUBMITTER=${!i} + fi + + elif [[ $arg =~ "--config" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + CONFIG=${kv[1]} + else + CONFIG=${!i} + fi + fi + + let i++ +done + + +# ================================================= +# Check arguments +# ================================================= + +if [[ "${SOURCE_DATA_DIR}" == "" ]]; then + echo "ERROR: data_dir is not given, please set --data_dir " + exit 1 +fi + +if [[ "${CONFIG}" == "" ]]; then + echo "ERROR: config is not given, please set --config " + exit 1 +fi + +CONTAINER_SUBMITTER_DIR="${WORK_DIR}/${SUBMITTER}" +HOST_SUBMITTER_DIR="${PROJ_DIR}/${SUBMITTER}" + +CONTAINER_ENVIRONMENT_VARIABLES_PATH=${CONTAINER_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh +HOST_ENVIRONMENT_VARIABLES_PATH="${HOST_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh" + +HOST_SUBMITTER_DOCKERFILE="${PROJ_DIR}/${SUBMITTER}/${MODEL}/config/Dockerfile" +CONTAINER_NAME="bert_test" + +if [ ! -f "${HOST_ENVIRONMENT_VARIABLES_PATH}" ]; then + touch "${HOST_ENVIRONMENT_VARIABLES_PATH}" +fi + +source ${HOST_ENVIRONMENT_VARIABLES_PATH} + +RESULTS_DIR="${PROJ_DIR}/${SUBMITTER}/${MODEL}/results" +LOG_FILE_BASE="${RESULTS_DIR}/config_${CONFIG}_experiment" + +echo "======================================" +echo "Arguments" +echo "---------" + +echo "MODEL = ${MODEL}" +echo "CONTAINER_NAME = ${CONTAINER_NAME}" +echo "DOCKER_IMAGE = ${DOCKER_IMAGE}" +echo "MODEL_DIR = ${MODEL_DIR}" +echo "SUBMITTER = ${SUBMITTER}" +echo "CONTAINER_SUBMITTER_DIR = ${CONTAINER_SUBMITTER_DIR}" +echo "HOST_SUBMITTER_DOCKERFILE = ${HOST_SUBMITTER_DOCKERFILE}" +echo "CONFIG = ${CONFIG}" +echo "CONTAINER_MOUNTS = ${CONTAINER_MOUNTS}" +echo "TRAINING_SCRIPT_ARGS = ${TRAINING_SCRIPT_ARGS[*]}" +echo "CURRENT_DIR = ${CURRENT_DIR}" +echo "CONTAINER_ENVIRONMENT_VARIABLES_PATH = ${CONTAINER_ENVIRONMENT_VARIABLES_PATH}" +echo "RESULTS_DIR = ${RESULTS_DIR}" +echo "LOG_FILE_BASE = ${LOG_FILE_BASE}" +echo "SHM_SIZE = ${SHM_SIZE}" +echo "======================================" + + +# ================================================= +# Training +# ================================================= + +# Cleanup container +# cleanup_docker() { +# docker container rm -f "${CONTAINER_NAME}" || true +# } +# cleanup_docker +# trap 'set -eux; cleanup_docker' EXIT + +# Clean built extension +if [ -d "${BUILD_EXTENSION_DIR}" ]; then + echo "WARN: Delete built extension" + rm -rf "${BUILD_EXTENSION_DIR}" + rm -rf ${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so + echo "extension file: "${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so"" +fi + + +# Build image +if [ -f "${HOST_DOCKERFILE_PATH}" ]; then + echo "WARN: Remove previous Dockerfile" + rm -f "${HOST_DOCKERFILE_PATH}" +fi + +echo "WARN: cp BaseDockerfile to Dockerfile" +cp "${BASE_DOCKERFILE_PATH}" "${HOST_DOCKERFILE_PATH}" + +if [ -f "${HOST_SUBMITTER_DOCKERFILE}" ]; then + echo "WARN: Found submitter's Dockerfile, merging submitter's Dockerfile to Dockerfile" + cat "${HOST_SUBMITTER_DOCKERFILE}" >> "${HOST_DOCKERFILE_PATH}" +fi + +docker build -t ${DOCKER_IMAGE} ./ + +# Setup container by Dockerfile +docker run -itd \ + --net=host --uts=host --ipc=host \ + --privileged \ + -w ${MODEL_DIR} \ + --shm-size="${SHM_SIZE}" \ + --volume ${SOURCE_DATA_DIR}:${MAP_DATA_DIR} \ + --volume ${PROJ_DIR}:${WORK_DIR} \ + --name="${CONTAINER_NAME}" ${CONTAINER_MOUNTS} \ + "${DOCKER_IMAGE}" + +# make sure container has time to finish initialization +# TODO: Uncomment +#sleep 30 +docker exec -it "${CONTAINER_NAME}" true + +mkdir -p ${RESULTS_DIR} +docker exec -it "${CONTAINER_NAME}" sh -c "chmod 777 run_training.sh" + +# TODO: Remove pip source +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" + +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};python3 prepare.py --name ${SUBMITTER} --data_dir ${MAP_DATA_DIR}" \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/2048_shards_varlength.chk b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/2048_shards_varlength.chk new file mode 100644 index 000000000..830fb5eb5 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/2048_shards_varlength.chk @@ -0,0 +1,2048 @@ +part_00000_of_02048.hdf5 ac517231f748d9ae0dad04cce21ab01d +part_00001_of_02048.hdf5 72eb0206ee55007fef7697887a9a9854 +part_00002_of_02048.hdf5 3a32e5dbc7f778f103a46e63f829b2a7 +part_00003_of_02048.hdf5 b0d3b517a6fdf06cdba6eabc5353ac69 +part_00004_of_02048.hdf5 2bd928b45c8e3eb21263ed561c636101 +part_00005_of_02048.hdf5 feeb99a68cf1f7b46cec3798ad7039bd +part_00006_of_02048.hdf5 7b37c353db09c52eb097691ae5c31d8c +part_00007_of_02048.hdf5 3adef5c0b25c407ff705832c646eaa5b +part_00008_of_02048.hdf5 f843a3f740659ef85240887829e2cd0b +part_00009_of_02048.hdf5 33f02eb4e489a790b57cd61ea49cb247 +part_00010_of_02048.hdf5 2188a09df8316ead693d45db26c93289 +part_00011_of_02048.hdf5 d5ed18add059adb40167df7b6d9b78e2 +part_00012_of_02048.hdf5 c5848e8d70f104c3d838eb5944d3c891 +part_00013_of_02048.hdf5 c53bf0fe33509cc8183faf2b7a9b8929 +part_00014_of_02048.hdf5 7d0f79579baf22b3537d15b749865896 +part_00015_of_02048.hdf5 873189f098e7f86d07a493cc966fab4b +part_00016_of_02048.hdf5 c7017402069bc19dd5a8d7a7ce92f04c +part_00017_of_02048.hdf5 5a016fcc2dc8b59d7caaacac8525331e +part_00018_of_02048.hdf5 c3721c16dffec8a213efc382a8702692 +part_00019_of_02048.hdf5 5bc52bcba2465b174b19249a70b679bc +part_00020_of_02048.hdf5 0724c25f8eadab720b4d34433ba85a22 +part_00021_of_02048.hdf5 8522fd94f477609b3c90a99bd58a2d62 +part_00022_of_02048.hdf5 d974a0a6fdb86c8661b3c06cde955c9e +part_00023_of_02048.hdf5 dd54cdbc5539ea41a0f3a05e68218631 +part_00024_of_02048.hdf5 742cecbcf1c9fdd6960d7c237a63b602 +part_00025_of_02048.hdf5 7d684040ac11bff2f624684524437a46 +part_00026_of_02048.hdf5 e9b5626339242dc61e63370f8b7c7e52 +part_00027_of_02048.hdf5 adf4175da231082a5e9f2b97fccdcedf +part_00028_of_02048.hdf5 f92fefaa5288a1b331f50ab0b5d9b016 +part_00029_of_02048.hdf5 b25111930d986497d6e00cad19ec5c30 +part_00030_of_02048.hdf5 15ee4cd55ada1579f880e3a0288b014e +part_00031_of_02048.hdf5 90220b6fab5ffb1120a28e0bebb92139 +part_00032_of_02048.hdf5 c6f405d259ab900d0dec701ec3f33191 +part_00033_of_02048.hdf5 6f2c6e395d4925df8f88c001052f311e +part_00034_of_02048.hdf5 773eb1e8349ac73d038c9377acf742a5 +part_00035_of_02048.hdf5 400851d03afcbad124cc639f79dfd161 +part_00036_of_02048.hdf5 4bb8a508fb09a1b923fd852604ad4d57 +part_00037_of_02048.hdf5 f8639af21ec6c93638b84c5c9f46263d +part_00038_of_02048.hdf5 30e8d4f33732e25268c53d36dc51e394 +part_00039_of_02048.hdf5 0c60280a020ffb350851f1e6f100a564 +part_00040_of_02048.hdf5 3fb33476f560019ebc76095e3e2ef432 +part_00041_of_02048.hdf5 1deddaac3ac84a41bcbbd596cf1790c1 +part_00042_of_02048.hdf5 3c66716ab83b27c1048c371b584de08b +part_00043_of_02048.hdf5 0df7450ce4f356c418134c341228588c +part_00044_of_02048.hdf5 e08de48adce90607c074552ca231fb55 +part_00045_of_02048.hdf5 2d86c4ccd2436a19f062ecebfe7cb3bf +part_00046_of_02048.hdf5 d7a9f3978bef69ccb0a6b3b0c7ebf784 +part_00047_of_02048.hdf5 5f5861661687330df56e40e5ae8d10c8 +part_00048_of_02048.hdf5 480a1d72986b4976898f25b3cbeb8a37 +part_00049_of_02048.hdf5 312466a38e51d445d6c3578db65801db +part_00050_of_02048.hdf5 23616caa8bfbe61988bf5ac69751f45a +part_00051_of_02048.hdf5 1b847150e7ea88db2f6b6f2171b616a9 +part_00052_of_02048.hdf5 c02bb625bdc466a0a189d6847ab3e770 +part_00053_of_02048.hdf5 6501231222bbcac3ffbb5cebc3468299 +part_00054_of_02048.hdf5 f21ffafbd44504da49f5b995b9bd25ce +part_00055_of_02048.hdf5 b3135f5688dd46b5332ca61bf90cd26e +part_00056_of_02048.hdf5 0d6c66bfdbe7b3d575cbe5ba4a04014f +part_00057_of_02048.hdf5 706187a92b002d190a964d22d1c75779 +part_00058_of_02048.hdf5 c3a5a7c3883f1d674204e2b4327c489c +part_00059_of_02048.hdf5 a04c10a4e9c875c19bcf8d33abaf8d53 +part_00060_of_02048.hdf5 fd89fa6cc873ae5c9576304f0ca1180c +part_00061_of_02048.hdf5 063e958a56189a4c912e69103309c218 +part_00062_of_02048.hdf5 844111d3923ea463a7c135ca2219ef41 +part_00063_of_02048.hdf5 348b058cc8465460070d1b1d78dcc87d +part_00064_of_02048.hdf5 12dbfbf88a48785d46cd69139bd1dc75 +part_00065_of_02048.hdf5 692f2e0f99bde94750498c5665158cdc +part_00066_of_02048.hdf5 041f140df3f3dad66c6b47b40a0296e3 +part_00067_of_02048.hdf5 eeb836f177be6f7afa572fca8cd36b52 +part_00068_of_02048.hdf5 dec3a6143f422a50ceed7fa9924d53f3 +part_00069_of_02048.hdf5 277a7885e36eaec7c7d425aa1baf9275 +part_00070_of_02048.hdf5 0b4e913fc88544a5cf8081e5ff4c6227 +part_00071_of_02048.hdf5 84870168c76045746b066d86f4048892 +part_00072_of_02048.hdf5 154934587a785e4fc5d0a6df9b4a3be2 +part_00073_of_02048.hdf5 1151aa05f7bd724bbdc889aee546e840 +part_00074_of_02048.hdf5 faa07c8e13b9ba89bc0f001653beeec9 +part_00075_of_02048.hdf5 699f68cab3474af86a91262e22a251c3 +part_00076_of_02048.hdf5 8545df227bc6114b539cd4cb647e10c0 +part_00077_of_02048.hdf5 b3f5dfe52489ab274aae8f8b148853d9 +part_00078_of_02048.hdf5 a58f058988e038d0cf77715069f85ef7 +part_00079_of_02048.hdf5 a1f77de1318e09fa82ae3e268ca20213 +part_00080_of_02048.hdf5 bb5c3cce7540964e484868e082e3d03d +part_00081_of_02048.hdf5 8ae1b874af3ec9ce591e3c35764b5bcc +part_00082_of_02048.hdf5 2fd796e3d021eeed98cb8a29cef5f1ea +part_00083_of_02048.hdf5 bafaef8e03b21f2d459164acac82b608 +part_00084_of_02048.hdf5 afc1a7ae7c0887641991c7ba4eea4f4b +part_00085_of_02048.hdf5 3b143f182fc5e603fb0961f2253f0e9a +part_00086_of_02048.hdf5 a6e771b920611cd93c7b46f15aecec1b +part_00087_of_02048.hdf5 478fb4b4d2a99deade384f01513a8d2d +part_00088_of_02048.hdf5 c97b4f4f17c72bebce9ee946ce06a5e0 +part_00089_of_02048.hdf5 dfa8282917495d4aa425e9eb435502e7 +part_00090_of_02048.hdf5 d800d623e77b225f3a9f4ae89e17636e +part_00091_of_02048.hdf5 0bed36e17602bbd02001333f64c49b69 +part_00092_of_02048.hdf5 b471888097d7af7e48e8c208bd46fa0f +part_00093_of_02048.hdf5 62d596aebd4510efeb8c7a366605e885 +part_00094_of_02048.hdf5 a21bc7aa3ddda68b657a4b7c5a2db3df +part_00095_of_02048.hdf5 18e609a838e80150031e27293e669ecd +part_00096_of_02048.hdf5 c64dcb84005149182394e0236513ed0e +part_00097_of_02048.hdf5 86e178ea04f6c951e04d3540429b5e9a +part_00098_of_02048.hdf5 885d8406335fa23a60d09e7e642817ce +part_00099_of_02048.hdf5 4e9172e3ee20481d492edc372023095e +part_00100_of_02048.hdf5 4a0d09d0e230bad0e98db011989cac3c +part_00101_of_02048.hdf5 dd8225ccc3a5829680bc4e69aac28185 +part_00102_of_02048.hdf5 244778a226da1cd8480c07f2c755eb6f +part_00103_of_02048.hdf5 d16a816ef9522c36e49fc54adcb19fe1 +part_00104_of_02048.hdf5 319a5bcc2cff604804b7986434f4b826 +part_00105_of_02048.hdf5 acdfcfbadf08b03b0b2bb2390564816b +part_00106_of_02048.hdf5 3f3db1affc6ca2c435d94df09fe48d5b +part_00107_of_02048.hdf5 0eac91209c742c5ab4e411d210136fe7 +part_00108_of_02048.hdf5 5aa9731c683e405fedf5e792a8afcbb3 +part_00109_of_02048.hdf5 9be685644423a9352eb86ef7a3ac1cd8 +part_00110_of_02048.hdf5 7e1bfd32a9958bc145305c2c8d431c92 +part_00111_of_02048.hdf5 d4fe42db38e011b98a26a9fa65ed3ac0 +part_00112_of_02048.hdf5 dbff9603ef52ad8a886f18842ce7acdd +part_00113_of_02048.hdf5 6e32afb3eb59cb8037977ce5c667a7a5 +part_00114_of_02048.hdf5 68b435e78759f1657f5ea2a193f32eb8 +part_00115_of_02048.hdf5 7267a99ea4f6de96ca9e15328070585c +part_00116_of_02048.hdf5 5757d8d2da49f568d8409aa29f59aa8a +part_00117_of_02048.hdf5 979131096be11ec1bbc51b81466487f9 +part_00118_of_02048.hdf5 5f4124110f29f7325d97175d10da674e +part_00119_of_02048.hdf5 6de0ffb0777854343266f20e01af4c50 +part_00120_of_02048.hdf5 55b3b9ecf01328c3d21adc6ea58473e8 +part_00121_of_02048.hdf5 a9d0012db09885f7a71f9a9e649979d4 +part_00122_of_02048.hdf5 c858bc793766b37b4937693296b4140f +part_00123_of_02048.hdf5 0eff9fdf967716f7a887193b23d89ad1 +part_00124_of_02048.hdf5 1f6d88c8e76a40f273c5949a137ff029 +part_00125_of_02048.hdf5 44a82be59c8159c75646fdd7efdddd70 +part_00126_of_02048.hdf5 20f1e4b1bb9a3ced4bf4d9baeb75c97e +part_00127_of_02048.hdf5 247d7b975ecdcd78b8468f3934341e27 +part_00128_of_02048.hdf5 a22a18ee9b769481ffe269de8e9c8ffd +part_00129_of_02048.hdf5 76e982f7c9d3d56aa98d01ae1e5b4a7b +part_00130_of_02048.hdf5 5c1afaca79589a6a8edbe6d0d97d7e99 +part_00131_of_02048.hdf5 ec9075de97c393262983e408320be20a +part_00132_of_02048.hdf5 990e225a693461af1f31bc790441500b +part_00133_of_02048.hdf5 2e714eaf77ddf60ad1bb7f04041d7de4 +part_00134_of_02048.hdf5 5267fea23f6d837ca90fc385146ac998 +part_00135_of_02048.hdf5 0eb11d482153ce8b40c1a78304f9b0e5 +part_00136_of_02048.hdf5 27aafeaeaace540327954ddb770ca193 +part_00137_of_02048.hdf5 2341f737f6c5b9a5c9a127701172a56e +part_00138_of_02048.hdf5 448c2c6899001245087aa5d31e4ef6e7 +part_00139_of_02048.hdf5 6268386262d6f5f0caff3a4040aff1f3 +part_00140_of_02048.hdf5 794a91abc82a772ec36927eb833b76d8 +part_00141_of_02048.hdf5 3f44f428c02fda180c76fe6fc9465ab4 +part_00142_of_02048.hdf5 d0ab7052124a5a94ebe2da56437845e7 +part_00143_of_02048.hdf5 e93d48a4892da729c4ffbc7cebd39193 +part_00144_of_02048.hdf5 719fce7ab632f9e0c3909d7013c48475 +part_00145_of_02048.hdf5 6136b920b33b3419958bc4c2349c7cac +part_00146_of_02048.hdf5 34e812868aece378e0c4fba49e0741cc +part_00147_of_02048.hdf5 da1fae99e89ecab958629416bd89e716 +part_00148_of_02048.hdf5 80fb7d76c1223bb2f3aeccec0355faf9 +part_00149_of_02048.hdf5 b3c2833d36004ae7f6d78ad26e583dff +part_00150_of_02048.hdf5 45f74f224c598ee7ff6a86e0ddc8c091 +part_00151_of_02048.hdf5 1fc784799883101e76f6efffb691fc76 +part_00152_of_02048.hdf5 91714d2831018894887ccce53d2ba0ba +part_00153_of_02048.hdf5 264c576e8e8eb5065144ee7b7fcdc348 +part_00154_of_02048.hdf5 bb38ddd7f2458a08387c3a6646b254a4 +part_00155_of_02048.hdf5 8abd1f47fc9c26f5db684c957577dbe7 +part_00156_of_02048.hdf5 2cb5e296cccf685c111a50594777b88f +part_00157_of_02048.hdf5 e62b850549a34ac9d4ab9ed80748f2c2 +part_00158_of_02048.hdf5 1f3399d8b40a86fcaff8eb21835e5033 +part_00159_of_02048.hdf5 793642f3157e8b8449c7cbad3617bb04 +part_00160_of_02048.hdf5 a43c7b1a21d80237bffe045cf822d237 +part_00161_of_02048.hdf5 94f0403c2a1b9c56c99e4f29400dfafe +part_00162_of_02048.hdf5 ab1d9fb3c1561e6b5123b5a3c1909306 +part_00163_of_02048.hdf5 d7dde3559804675eb574af6fefb30e0f +part_00164_of_02048.hdf5 2840dd95f77addcf3cedaf5942d5560d +part_00165_of_02048.hdf5 766e2bb9819228910f510e3440ea15fa +part_00166_of_02048.hdf5 77d2ab00e8c70aa2c9d4bc72b53bb510 +part_00167_of_02048.hdf5 10038e1a397360ebc2bd3531382a61fc +part_00168_of_02048.hdf5 f7d76ed48216ad5b1d11d7aea3376120 +part_00169_of_02048.hdf5 f8775d0cd22c1a2948a241002e550b0a +part_00170_of_02048.hdf5 163454767bc7dcd1499c9ee34103b1ec +part_00171_of_02048.hdf5 0c6e853c572d5684ec8486d176cdb506 +part_00172_of_02048.hdf5 963b029aec6e9e65d8e28fc973575d31 +part_00173_of_02048.hdf5 a1f4881ca1939b34faf0104998b828f3 +part_00174_of_02048.hdf5 19b3a8a546ab8e25db5d2a339d060fe3 +part_00175_of_02048.hdf5 2ed4be6dd62472e2082552ab4036eca3 +part_00176_of_02048.hdf5 2b91afd5c9ff7b36e0d8858e78337c13 +part_00177_of_02048.hdf5 7a4e1b9dcfc3693b28a64aabea78b44b +part_00178_of_02048.hdf5 d5dbcc8187e09bd4ae61e360688d8b49 +part_00179_of_02048.hdf5 7f840bc906183f4a9d2e457ac8fb2ea8 +part_00180_of_02048.hdf5 e4e8202bfabdd2b2f78616a14c3aeab7 +part_00181_of_02048.hdf5 c005ba8800fb8bd5d09af21159ca3361 +part_00182_of_02048.hdf5 a1095e071c86f907b312618be885413e +part_00183_of_02048.hdf5 c442863a70e331cea7cf0df951503925 +part_00184_of_02048.hdf5 ccd0456d81f3c6ef5adf42bacbde14b4 +part_00185_of_02048.hdf5 1a9806191c20097b01b1855fc94ca2d7 +part_00186_of_02048.hdf5 58caf9461dc9430e849e9b760c31d05d +part_00187_of_02048.hdf5 ef9e1cac4a1ab02ca3a727531fda4b03 +part_00188_of_02048.hdf5 7d83babd6e25d5f522ef6b3d77f19ed0 +part_00189_of_02048.hdf5 917a5e41d3285194cb9663eeba33e42d +part_00190_of_02048.hdf5 3bf24d7f655e543cfe99e3faa7085909 +part_00191_of_02048.hdf5 4815e50f9f1d0c4910df7a6c7a6c28ed +part_00192_of_02048.hdf5 e74cffb54aeffdbb43fdfa93d0bf23fa +part_00193_of_02048.hdf5 0e80c1f5fd09f6c8f349b3117cf5ad76 +part_00194_of_02048.hdf5 63f660ac7a1dbe7590602e3cceb85512 +part_00195_of_02048.hdf5 999fe1e2d2dba70b9ac72c34304ddca8 +part_00196_of_02048.hdf5 d9754b7e752bf9ccd7732ba5301c926c +part_00197_of_02048.hdf5 f02c51f4c036b947daf70ba82c781d0a +part_00198_of_02048.hdf5 7348b7cc4cf3f7331004a911843bd247 +part_00199_of_02048.hdf5 74586c4d4219fe507d3faa46d93fd82a +part_00200_of_02048.hdf5 bb9928a1665ddee4ba576ddb1723f1f4 +part_00201_of_02048.hdf5 ccc981db5c5bff00386cadc198d46a80 +part_00202_of_02048.hdf5 0690a40ccbdbce25070a0176cd624440 +part_00203_of_02048.hdf5 535524e286e1fb46a44d01c1cb5419a2 +part_00204_of_02048.hdf5 ca6ad434f8cf0c4268ca3c18b97809c9 +part_00205_of_02048.hdf5 a90788f16e0ae6021765253a91d00dcc +part_00206_of_02048.hdf5 ed1070da26aadb5f5ebf531263425782 +part_00207_of_02048.hdf5 2decd52b27f9c9a1d58e87554e39eb00 +part_00208_of_02048.hdf5 b7d7fdb2c2c18dee591e49c10fd751e9 +part_00209_of_02048.hdf5 15f423b5170222d20bd0392c32947408 +part_00210_of_02048.hdf5 88d8a64a1468852d5a6584bd060951ec +part_00211_of_02048.hdf5 4a5ed5896ce5d906f354f22b7fde4f14 +part_00212_of_02048.hdf5 89a2b7b572a2d4135c2a8d834737b7f5 +part_00213_of_02048.hdf5 c561f029a7f4b3970df2c0ec80ace10e +part_00214_of_02048.hdf5 216fa4abd7108f2ffaa8fef75db0928f +part_00215_of_02048.hdf5 b75da3c93e6dbb33beee78803051db6d +part_00216_of_02048.hdf5 a7f0f1261f0b2cfa4a37386b8513a6c4 +part_00217_of_02048.hdf5 a44324846631f44d7abc4f146fbd8cf3 +part_00218_of_02048.hdf5 c1cd59c8706f17b32e032788e68d509d +part_00219_of_02048.hdf5 2c993a0cb1cd7f4603385b2f8e1e52aa +part_00220_of_02048.hdf5 76bfd04bb75337b488b287a5eb2df4a7 +part_00221_of_02048.hdf5 96fd48b3c4160c9fc1770c987e2da3a9 +part_00222_of_02048.hdf5 f24891eaec4a9affe6b3ca521b2ea485 +part_00223_of_02048.hdf5 9a55330440de7d21b556e619929adffe +part_00224_of_02048.hdf5 33a7fd1f1c4fdf9bd3f8ba5bb9c8e39a +part_00225_of_02048.hdf5 83644a826f2897e468afcb418c3a0577 +part_00226_of_02048.hdf5 be8339607a9edefa50d69bce28343980 +part_00227_of_02048.hdf5 4bd2b3128661d72873ebf2b3579c686a +part_00228_of_02048.hdf5 d17904f1067a8f272964ab407ed9e62e +part_00229_of_02048.hdf5 317ccc5ead4ee6711a358216f331837a +part_00230_of_02048.hdf5 a61320fc706bf1db1e71e62a13d7bd58 +part_00231_of_02048.hdf5 e92ca138ea8a8f4a57bac2e26d8091e2 +part_00232_of_02048.hdf5 8536228bab1f27fe385cf2d9aeea7168 +part_00233_of_02048.hdf5 70ad197b1e226880ccbbd761c5f95223 +part_00234_of_02048.hdf5 51215462d19e7c7a7fcc25d87bba896b +part_00235_of_02048.hdf5 47f87fd6e8ed59a387965830def68690 +part_00236_of_02048.hdf5 f7eeae103d9466c2490d2c4690a9d202 +part_00237_of_02048.hdf5 207b0bc8683f98e91b89d4b70af3b04f +part_00238_of_02048.hdf5 62cc5eea13ffb957d61547c035d59f02 +part_00239_of_02048.hdf5 9143b3b5fb8ecd226aa3266134650300 +part_00240_of_02048.hdf5 28d7eccc2f0f9df06bfbd1ae46860039 +part_00241_of_02048.hdf5 ee3e982f1b0bf613e87c5c07fb4e61a6 +part_00242_of_02048.hdf5 6a5d79c065502ae7fe82e3179737208d +part_00243_of_02048.hdf5 2f979f1d7dc8d30606695bdc129455db +part_00244_of_02048.hdf5 5ebd1ff73defe4d67afa596c6781fe3d +part_00245_of_02048.hdf5 151f7a7b642f393d46be7b5f99681a60 +part_00246_of_02048.hdf5 d9835809e8f4a08e1280c9d608403e67 +part_00247_of_02048.hdf5 31f021ce68aa092d697e91e394590a63 +part_00248_of_02048.hdf5 4dcb59d9aa4e8d5330af41e7d3a865b6 +part_00249_of_02048.hdf5 7fce4d9e07b75123b1dc8cc50b6c34f0 +part_00250_of_02048.hdf5 763fa0ffe95d91c67ac82d1703cd63f1 +part_00251_of_02048.hdf5 07e94719048190b00f6fafe8308aa780 +part_00252_of_02048.hdf5 fd23c2b8d4130e5a313f0f364e89c4f6 +part_00253_of_02048.hdf5 4ef0c018798ec77524cea123c62f1ea6 +part_00254_of_02048.hdf5 34671a86532cf41b370172a25ba22dd3 +part_00255_of_02048.hdf5 7c402275715bf45e50e05415d52d8e8f +part_00256_of_02048.hdf5 bb0a6a4eb8afd971e3ced22efc634497 +part_00257_of_02048.hdf5 81566ec1830119138e293761ca9776fc +part_00258_of_02048.hdf5 848a566d5b792253c9eba5f21d999501 +part_00259_of_02048.hdf5 83c4dacffd04cd2735f94c468107a8ba +part_00260_of_02048.hdf5 258cdd0acc54e8c5e20d17b4313a0c03 +part_00261_of_02048.hdf5 6e25ba2f1a18a83e23583465f51acc20 +part_00262_of_02048.hdf5 2229bdbc4e0dbdfba786be3afcbbf5a5 +part_00263_of_02048.hdf5 83c4747e5c23f568150fc84f4a75aabd +part_00264_of_02048.hdf5 e9f2bc5b003cd86922e83f64c26656ea +part_00265_of_02048.hdf5 fe75a45747ae38e3b558680fa45b3e61 +part_00266_of_02048.hdf5 c32c437ab98b37b7b416c2163a1553bd +part_00267_of_02048.hdf5 9c1c50fb78f7848d298fb0b50209b204 +part_00268_of_02048.hdf5 fb12cd3db64b9c1f04b5e07de30fb294 +part_00269_of_02048.hdf5 d11806c036ef4f6976eef4c1518ff85b +part_00270_of_02048.hdf5 4b320ba8d5eb59ea017435a3cf9ffe54 +part_00271_of_02048.hdf5 2310b56b29aa46254c9757c9ebc5be7f +part_00272_of_02048.hdf5 9f3ca96046f3ebce77b3b383e04ad07e +part_00273_of_02048.hdf5 31b87884496d31bd9508542ea2440554 +part_00274_of_02048.hdf5 a05e7009138f0d0802595e15078a9351 +part_00275_of_02048.hdf5 eeac7ebfa66f1c689bfb51ac7da966ce +part_00276_of_02048.hdf5 3aaf5d7e3cdd0df6958b6df815154142 +part_00277_of_02048.hdf5 8f80e00891fccf61c450db2225edd8fb +part_00278_of_02048.hdf5 a74569099ea181e97d0cc369d51c1ba7 +part_00279_of_02048.hdf5 53fcab8b56c15e5b4ca3b99cb703d91f +part_00280_of_02048.hdf5 69cc80597f6440d8321f643f3df91701 +part_00281_of_02048.hdf5 7d2101202e1d7cefbac6778feda75d88 +part_00282_of_02048.hdf5 d262b384d050666421a3c3fdbd7e7e62 +part_00283_of_02048.hdf5 8e0cbf5a267d88adaa751974d2a5d31d +part_00284_of_02048.hdf5 76f21e93e66615869c19f08283c74a5b +part_00285_of_02048.hdf5 445624b144da304fb3b3882ac5f22e87 +part_00286_of_02048.hdf5 90ee4f16b167cb79d7363478176cfecc +part_00287_of_02048.hdf5 804409ee87786377397e8fdf9dc69709 +part_00288_of_02048.hdf5 e03aafbc157cf02d767fd6b2f3f55e91 +part_00289_of_02048.hdf5 1131f6da9b720347f702b512abd680f7 +part_00290_of_02048.hdf5 7b5733ca657ce434cfa82ab59df47e46 +part_00291_of_02048.hdf5 27895101e9cb0ce362f124b5675818df +part_00292_of_02048.hdf5 3910dbc5a374947680906efe41b35fb4 +part_00293_of_02048.hdf5 83f44a5cf208a82cd71d46a6bd97532d +part_00294_of_02048.hdf5 09b6f1ff752ac288a5a08b64b7c9e9bf +part_00295_of_02048.hdf5 1b9de1bc26767e019d44b6062b80275b +part_00296_of_02048.hdf5 3df51c0ee39c3b57dea9ae949ddc4e63 +part_00297_of_02048.hdf5 c477b264b0adf829b3e4fe529a12ac8c +part_00298_of_02048.hdf5 d981bdadb5ef33965f5b681f95cacd73 +part_00299_of_02048.hdf5 1ecb87fc3edab4d34016bb1861f537c0 +part_00300_of_02048.hdf5 b03aedbdcec33caaffe656573791344e +part_00301_of_02048.hdf5 22cd7fb91ce62a8fec360c71d188082b +part_00302_of_02048.hdf5 865d68725a89ffe6f6242ebb14cd462b +part_00303_of_02048.hdf5 5d39662cdead79ae7b545b23fd2704ef +part_00304_of_02048.hdf5 c75d2daa4b56e5dac90a55bcad1c00d3 +part_00305_of_02048.hdf5 982953831b61fc2c890e5b49694e2094 +part_00306_of_02048.hdf5 1b3fb6e2b4340c4240a1ff7b8914efff +part_00307_of_02048.hdf5 648560c3860935d1aaa6e3a05879b091 +part_00308_of_02048.hdf5 3d4ce928098d1c20887534fed3f9d9b4 +part_00309_of_02048.hdf5 ca7b48a1f655d2022dbb05485ad28ffb +part_00310_of_02048.hdf5 5f8cf62b9ac2647d91df82e267e70778 +part_00311_of_02048.hdf5 eb45b0497ac72ebf0443f2cde9b75b07 +part_00312_of_02048.hdf5 a9c7902925e152e9753711dd4a29e862 +part_00313_of_02048.hdf5 1aed5b2c68d0f58026d5006845912540 +part_00314_of_02048.hdf5 a67a4c25c4246c6ef4db47a43784be9a +part_00315_of_02048.hdf5 607109fbf48df9bedb64e615ad3b3b8f +part_00316_of_02048.hdf5 be54c95efb0c94af94527e6fa49b0118 +part_00317_of_02048.hdf5 41c73a777850fb0b3e0d571f576055cf +part_00318_of_02048.hdf5 46c686ec70280d375ebf55784176d0f1 +part_00319_of_02048.hdf5 8790601aca11bfe8c6553116f040c977 +part_00320_of_02048.hdf5 705655db72b045b7215fb0c5e0bf175f +part_00321_of_02048.hdf5 23ecfa80389bf22c7d401aaa4ee78077 +part_00322_of_02048.hdf5 e188e3b8ebce88975b1b1bfae768833b +part_00323_of_02048.hdf5 f03d67bad22275eb62071de93fde69ba +part_00324_of_02048.hdf5 56f7d4f116442e5001d9a53407b42afd +part_00325_of_02048.hdf5 1f3850ada69ae07b56e443ab00fdb86f +part_00326_of_02048.hdf5 f2230dbcc4df9c9eefcf9a062350bcc7 +part_00327_of_02048.hdf5 3772c580b0e6253e302100fd12004a3a +part_00328_of_02048.hdf5 17bf1a9bd4d931fe8817c4d6394c0573 +part_00329_of_02048.hdf5 f7b79262d006b56522da02a9440613ee +part_00330_of_02048.hdf5 a8bb2655cbb85b64f6a528f84c68fe42 +part_00331_of_02048.hdf5 62ccca89634d7419bea1aa13b7eee8cb +part_00332_of_02048.hdf5 587eeb366d246725210c0401355456f1 +part_00333_of_02048.hdf5 950cd096c7551a5b85a1ff81ac53b844 +part_00334_of_02048.hdf5 83915826604de84123ad64595a3c077a +part_00335_of_02048.hdf5 842e14b658c1727757fdd7da0b80905a +part_00336_of_02048.hdf5 58b782be61b007971ec1c32089aab3b5 +part_00337_of_02048.hdf5 030c04c1b7342f8e4ac954a248a50e2c +part_00338_of_02048.hdf5 fe3a7b20f78b847e08f6c6057aefd829 +part_00339_of_02048.hdf5 624f8d8b0959e8cb7426b7afa2eee7e1 +part_00340_of_02048.hdf5 ea07266aa6f9c20e6cd7c6643d51bd1d +part_00341_of_02048.hdf5 58c2db58aba3fcfd5d8b2c8e31b3560c +part_00342_of_02048.hdf5 639d0cbf932ac9adff5e8ee780bdd594 +part_00343_of_02048.hdf5 294ac9cc1e4a2c9b4834d3e593a5386e +part_00344_of_02048.hdf5 19f42bce8dc6f12ab77d9c60ef72edb4 +part_00345_of_02048.hdf5 31adc38a6025f1a95c6fbd9c6a7cd566 +part_00346_of_02048.hdf5 09198087b3a4476da4242fee4fbae990 +part_00347_of_02048.hdf5 c671b1323142bdc9eaca48892590108d +part_00348_of_02048.hdf5 1b0e7b3f252ee7f08306bd74db49d49b +part_00349_of_02048.hdf5 ecccd992727ec19d215c8fef594711f5 +part_00350_of_02048.hdf5 1d78049af90f8f9c826099fd3fd82bd5 +part_00351_of_02048.hdf5 f0a8658cfa7343038039595e670fe839 +part_00352_of_02048.hdf5 ff40c6e6ba5bfad1166025c89b80aa67 +part_00353_of_02048.hdf5 459f8aece676fdddd5913db4a5257864 +part_00354_of_02048.hdf5 141850c4a85d45d8ec074c15374d8666 +part_00355_of_02048.hdf5 b5be1b02b56866eb093f6aad952ac5d3 +part_00356_of_02048.hdf5 1d0e2d3f56667b11db71917aad30e503 +part_00357_of_02048.hdf5 210e1093e84c37095f7f50785d106554 +part_00358_of_02048.hdf5 72929728e78d9cc5cad338706344f5c4 +part_00359_of_02048.hdf5 3a1aaf0848fc1d3b91f42e4b2d3a7cd7 +part_00360_of_02048.hdf5 948aa83da5757d8a0c265f1241652b17 +part_00361_of_02048.hdf5 f437c3e0a02d7a8baf72068b6461736b +part_00362_of_02048.hdf5 0093570384b5e8d0e4855e3d2572633d +part_00363_of_02048.hdf5 cc6d0312e1bb2c0caec0afa7e2609b1d +part_00364_of_02048.hdf5 df79005e3f39391b4b75cfb9f10454fe +part_00365_of_02048.hdf5 9be3f27eda89dd5c2b8193f4af9f6d68 +part_00366_of_02048.hdf5 5fbbd684fb0fddbebd641d866f7fa327 +part_00367_of_02048.hdf5 da30d1829ffc43da9afd22f7a3eee93d +part_00368_of_02048.hdf5 ac33336ec857b34830afb689a05b7ec9 +part_00369_of_02048.hdf5 7361c801e3ecba0dc77d995138eee9d9 +part_00370_of_02048.hdf5 7ff7340068075aeedbd3ee99e00a3747 +part_00371_of_02048.hdf5 64f1153eaacc0e9098cc52dc63cca2ba +part_00372_of_02048.hdf5 b40f2ae62c60cae33c80c05a73a90709 +part_00373_of_02048.hdf5 90a62a0273f1cbd41e6c178036031199 +part_00374_of_02048.hdf5 4024d7785b48a7def7688b7dda30b529 +part_00375_of_02048.hdf5 8b26c809be57179ec5815cc53f3a0e10 +part_00376_of_02048.hdf5 6ddb77ae024fe2d5b3c66de7e0496b38 +part_00377_of_02048.hdf5 5a939e53f15bba1734450d6112de58db +part_00378_of_02048.hdf5 b107b3de9758de60d0e97fe04e512dde +part_00379_of_02048.hdf5 b467ae689bc91203528ea72e0e056d5c +part_00380_of_02048.hdf5 4a234ef5c499297556e988df9721c153 +part_00381_of_02048.hdf5 38821f669aec0da13e9d0099801490a1 +part_00382_of_02048.hdf5 d5c73ea739246cafaf0050a8f2512157 +part_00383_of_02048.hdf5 baa04b0acf42aeb0d55f81bdaefd9851 +part_00384_of_02048.hdf5 eb235edadcf56f69485da6875ff87421 +part_00385_of_02048.hdf5 def1cd23ce87e88e77be042f8f57bd07 +part_00386_of_02048.hdf5 022016273bfed9ba6495b2415baa1005 +part_00387_of_02048.hdf5 cacb713e204358104584b613099e1b7f +part_00388_of_02048.hdf5 980fd410d6ec7caf53b010616a132a17 +part_00389_of_02048.hdf5 4c5db52db3987b26842a047506167123 +part_00390_of_02048.hdf5 b6c6aed8c390a4bafef4ebce13113a11 +part_00391_of_02048.hdf5 ce8d22d12fda59f5ba3a121444561321 +part_00392_of_02048.hdf5 d9b7c069d486606c30f2d8ed7376bebe +part_00393_of_02048.hdf5 333c3c63df584e7127084a33b7563301 +part_00394_of_02048.hdf5 aa55042bfad5254e735b45d902f2ed96 +part_00395_of_02048.hdf5 58a4b9f45f11da469208a3e7ca2bc3a9 +part_00396_of_02048.hdf5 1068978ad9d0377efd24ce2b063791fd +part_00397_of_02048.hdf5 8213d65b8fceb5e97abaf506ad8459dc +part_00398_of_02048.hdf5 2d1e56dd8e1f72f39aa72a3426a6fa8f +part_00399_of_02048.hdf5 070bb8bd3c3115bd191fc64887d939b0 +part_00400_of_02048.hdf5 ef52b224d2d461255117d4f91e107177 +part_00401_of_02048.hdf5 3041dc1cd6c12f3cca955417fa36e323 +part_00402_of_02048.hdf5 31ad6d9a4f6bfb2bf65f8cbd813a0952 +part_00403_of_02048.hdf5 e947f0a4e02f01bcc319229e384c5394 +part_00404_of_02048.hdf5 fb5537ddec91e3fe6e78095b3ae20c7f +part_00405_of_02048.hdf5 15c755a0c0cdfd38e4479e3fbfdc5c09 +part_00406_of_02048.hdf5 b2b57b0febb638910dbc547334728c7e +part_00407_of_02048.hdf5 95dbdc7724ba9612b135a1e91812014a +part_00408_of_02048.hdf5 7754c7e2d95122975a07158dff39154e +part_00409_of_02048.hdf5 4e08a4f1cb21d024a3225d60713d1ec5 +part_00410_of_02048.hdf5 ba14fcda44f5edbc0d60b1bcb1059fa5 +part_00411_of_02048.hdf5 3bd4fbaea97600a0458c761ee0015eba +part_00412_of_02048.hdf5 7ab4b4a19d3190740eb208feeb1579ff +part_00413_of_02048.hdf5 3712b47381d2fbc569e75294f8b022e6 +part_00414_of_02048.hdf5 04b53648a64780a3d986810c20ab2ef9 +part_00415_of_02048.hdf5 8e0e0cb7592d565105aa2c9c3ea843ed +part_00416_of_02048.hdf5 c5c25ecead484cb051833028503c3725 +part_00417_of_02048.hdf5 2f1465115ca872a34746679d494903ed +part_00418_of_02048.hdf5 7a86828d889888ac250dca75b1ce5ce1 +part_00419_of_02048.hdf5 a98c8d25da75295cbc88bd47d7cb88cc +part_00420_of_02048.hdf5 e18f2cc83726fde04be5f759f0f41db5 +part_00421_of_02048.hdf5 b97cf80563b09c7ba65b0f99c02bdecb +part_00422_of_02048.hdf5 9aa124d54665bc62c747bd57b3520837 +part_00423_of_02048.hdf5 55f9c465456999230ea1d5941069673a +part_00424_of_02048.hdf5 eadd712c895ea1e0e8b4b7c9c3f76715 +part_00425_of_02048.hdf5 426357da0ff53bba97bbbca27a23ff30 +part_00426_of_02048.hdf5 f9bb81b75c4fdf84ea25d3690f86302b +part_00427_of_02048.hdf5 4f32a32914bbdcc702548ef7257e4828 +part_00428_of_02048.hdf5 88b51adc1a2e7ac65efa9e88a47fa247 +part_00429_of_02048.hdf5 9e4d13eac6102eb7223a183ea08781eb +part_00430_of_02048.hdf5 eed3ac3a795a6a88eef8b217a92816e0 +part_00431_of_02048.hdf5 1b8f4cc554adbd33b9ec9224e5588d4a +part_00432_of_02048.hdf5 00d2b917f5d2768ad9d2bb06e095f0b8 +part_00433_of_02048.hdf5 d5bca239bc2414d73cea33c3422b083e +part_00434_of_02048.hdf5 62f159fcdcd02d522cdb34d324e3fb03 +part_00435_of_02048.hdf5 4cad05ada881959b0ca7fcf0c2511099 +part_00436_of_02048.hdf5 516d01f6f46ca0ac3873371ea149b897 +part_00437_of_02048.hdf5 ba29ab2df65bcde9ac8800674bc75db5 +part_00438_of_02048.hdf5 8d2a4db392898025b972af00906e9903 +part_00439_of_02048.hdf5 958ce7ca401f6b646570ddecf32a6b77 +part_00440_of_02048.hdf5 7b70f374cecb9b4710ebf35b69e37187 +part_00441_of_02048.hdf5 5955845c000916ff95278bb53dda089c +part_00442_of_02048.hdf5 46d31a9015723fda2ed489c0ac9e67f7 +part_00443_of_02048.hdf5 899e30878fdc90861f11be8803d60427 +part_00444_of_02048.hdf5 e48aa7b47a24b9f3e1d035ad479b8750 +part_00445_of_02048.hdf5 a8c15571f31fe9ca0a1ad07a2ae8682d +part_00446_of_02048.hdf5 4bc92b3651384441eb93810688ecba4c +part_00447_of_02048.hdf5 246c5141dfc8fdbdad607c7824ce46d0 +part_00448_of_02048.hdf5 dc43820ddb293437a2ca08cf4db14fd7 +part_00449_of_02048.hdf5 c515707630e2cd01c7a08c2692ac1ec8 +part_00450_of_02048.hdf5 e0a1e9c121557281709361d9ae4c38cd +part_00451_of_02048.hdf5 0069822c064f5bfb5878b83389d8c968 +part_00452_of_02048.hdf5 197c4b5e9d76f8339034e19926af8d81 +part_00453_of_02048.hdf5 937efafc970b2478177ca6931ce1b704 +part_00454_of_02048.hdf5 c464cb464d7b045beb42a7b13a2a6cb8 +part_00455_of_02048.hdf5 8c9ebd6bd897a983ffdc651b75e6828e +part_00456_of_02048.hdf5 f7799a9f9ed5bf223682cfefd23bb2cb +part_00457_of_02048.hdf5 6e568ddcd95b996b2f68c9f95b49d0ad +part_00458_of_02048.hdf5 9f847bc8ce2e5c7894054a25395002e3 +part_00459_of_02048.hdf5 d7f7f1b646f220d20984243630cc05fb +part_00460_of_02048.hdf5 c21f764c2c31fd035227b5dfb3d75f75 +part_00461_of_02048.hdf5 bd634d68aea28deec2b4c723a5366892 +part_00462_of_02048.hdf5 cb8e1aaea054522e3f9b4013df328877 +part_00463_of_02048.hdf5 2875388f33f2e03b9eaeef573023f815 +part_00464_of_02048.hdf5 b280d7f00ee4630602cda945124a756b +part_00465_of_02048.hdf5 093b1917a75f85950c3555758c7431eb +part_00466_of_02048.hdf5 5738735abe98a3327220ef87af0b88c7 +part_00467_of_02048.hdf5 8414971dadac7539537877c9e9ea1fa6 +part_00468_of_02048.hdf5 9e0711ca8942677c2f70cb123ebb4149 +part_00469_of_02048.hdf5 9ba97e9d73093e72880f0c9f0b5c0a3e +part_00470_of_02048.hdf5 75431a66dbb3d3ecd9ced2948934c4a1 +part_00471_of_02048.hdf5 68bd7848e7522505a02ac43069dadf4c +part_00472_of_02048.hdf5 ebe3be50a83a0946553f2eec49604c26 +part_00473_of_02048.hdf5 bae8c72a836016a32bf21ead64540ef9 +part_00474_of_02048.hdf5 98fa64de4b0e4780f067f6c1fd7d4965 +part_00475_of_02048.hdf5 2bac579035b32b68c48b6bae2defde8b +part_00476_of_02048.hdf5 0a0a5257ad86c4bcafc3ca31a380246d +part_00477_of_02048.hdf5 f08708b3afde8b83ade92f7376a3d081 +part_00478_of_02048.hdf5 6cb386ba840e430a411869a0894f762a +part_00479_of_02048.hdf5 17b1013b6f51a8b86275751ec047602a +part_00480_of_02048.hdf5 fb362e0b7f81a00c16e64f84bd1582e4 +part_00481_of_02048.hdf5 b03f719e0fe60f3a8caa91782d354835 +part_00482_of_02048.hdf5 b7845cae9cf4801c7e06edaa268c71b5 +part_00483_of_02048.hdf5 95e0f7bab75412f6fb6c4a322be96fb5 +part_00484_of_02048.hdf5 cde5cc326b76ed5157b12db29ed6b828 +part_00485_of_02048.hdf5 0fad2461fc1cfc130450a161b7bb5562 +part_00486_of_02048.hdf5 c60aeaababf91b3c9b053d5ca7e57584 +part_00487_of_02048.hdf5 02c5224bb1a21ac7a8c2adcc9fd3e9f6 +part_00488_of_02048.hdf5 cd1132e8acc676bdc1cdc9d7bf638f51 +part_00489_of_02048.hdf5 be87186dccfa2ccf5ef9bb4e8a58a969 +part_00490_of_02048.hdf5 4704c7f1e40ec8a84ecd1b47a03af2f8 +part_00491_of_02048.hdf5 f3f6f7c23f8bf320eea2f8eceb9d63de +part_00492_of_02048.hdf5 9859e7bf4d7a1526e89ad3608dd504e9 +part_00493_of_02048.hdf5 3f4f20501b29fea4ded3709f5bade5ae +part_00494_of_02048.hdf5 abfffb06cc43715091d8b5b7fd2b59eb +part_00495_of_02048.hdf5 c6886d60bd7bda4be2f2e04d8011162d +part_00496_of_02048.hdf5 edf6109244352cc84c28ffbf4bcd8f44 +part_00497_of_02048.hdf5 5653fb912b0a0f21da07602bbac7cfa0 +part_00498_of_02048.hdf5 84610b0f29809f3a24edc3e725376ffb +part_00499_of_02048.hdf5 5a607103eed3e5123150ee9c912e875c +part_00500_of_02048.hdf5 dc8340c804f9a25189d1085eb2d366f4 +part_00501_of_02048.hdf5 3a4b13f6b3f63cfeaa01cbfe418f6be0 +part_00502_of_02048.hdf5 85051be49c88014410f0beedfa5f2eda +part_00503_of_02048.hdf5 bce927972ac27c25835c1f15e8734ced +part_00504_of_02048.hdf5 14ae1e3d3868c137408d25548a8604cc +part_00505_of_02048.hdf5 cee5ff73dc8340a7956f7846e89817b6 +part_00506_of_02048.hdf5 8d1ac53e890de9b1a544d9f6e6374acb +part_00507_of_02048.hdf5 eac09de69589d67f70855dd3737f0cee +part_00508_of_02048.hdf5 ecfdebff5c69c69ac34d3a648f5a8c6f +part_00509_of_02048.hdf5 e97a644b9fac44e4d1adabcb64b7f0d2 +part_00510_of_02048.hdf5 8e38c66a7b4e1b16b265f70fb00485aa +part_00511_of_02048.hdf5 239dda3f4d71421674a4d7f9358cf1a7 +part_00512_of_02048.hdf5 f0746e1cae74d8d9285b6ea6cc9f1f80 +part_00513_of_02048.hdf5 c4c865560b084f30ab6a94d158d8db12 +part_00514_of_02048.hdf5 acb8f7e0c4fd7613d0b27142725164ba +part_00515_of_02048.hdf5 183ab3490377e07ff82b7047bf1f8dee +part_00516_of_02048.hdf5 8c5907d90ecdbb4038c974781a9805ee +part_00517_of_02048.hdf5 7d3a69bec3f15e56e9ff130d3e8f70c7 +part_00518_of_02048.hdf5 4302f4de86e4cc4129fbf5ff462ab340 +part_00519_of_02048.hdf5 6ac7f0987c8cb9bc9c1d0bcbf2a7e8b8 +part_00520_of_02048.hdf5 5bf90ce81b1049afd3da9269fd22d45f +part_00521_of_02048.hdf5 dc50a4945ed125b73b4f66725ff7b95d +part_00522_of_02048.hdf5 aec4269baeea27576cf1e27d6a154db4 +part_00523_of_02048.hdf5 8e62945438b0c93fff47adcf7171097d +part_00524_of_02048.hdf5 e1fb2b3ddab186db29f008477d066eff +part_00525_of_02048.hdf5 3f5c55e32271525e0bff95843f74cba7 +part_00526_of_02048.hdf5 e4e5d073c044b23ffd5d53df036a966d +part_00527_of_02048.hdf5 233d585d9fb8350cec072f2198ad6d74 +part_00528_of_02048.hdf5 3ed6c4bb5ad145d41b25d79e3ecefa09 +part_00529_of_02048.hdf5 8ff2bcdb970df82e215b333f7f72b758 +part_00530_of_02048.hdf5 b6dd7c4852ee7eaee7b4fa3a2fff72c0 +part_00531_of_02048.hdf5 1c2c79ab4d7ef08566e1efd4fb906cba +part_00532_of_02048.hdf5 e6c1487094b971c730f43009f8555a45 +part_00533_of_02048.hdf5 3f5e708ef3b8513d66d999962d12fbda +part_00534_of_02048.hdf5 cc713b68b1ea437177d39ac8ffe39119 +part_00535_of_02048.hdf5 cbaab26785d687f22fb0a5cca05ab73d +part_00536_of_02048.hdf5 f4c38bf4560a0f8bf6dd628eb8783498 +part_00537_of_02048.hdf5 79a4619463ba534ef94a88b0c0a76d31 +part_00538_of_02048.hdf5 b098640a952661016d383b7480173509 +part_00539_of_02048.hdf5 b106564ab876ef8fa593970050b855a9 +part_00540_of_02048.hdf5 d0a12ef253a3f4c1880d98bd3bc74446 +part_00541_of_02048.hdf5 94036717328b4119afdbb656eb6113e3 +part_00542_of_02048.hdf5 570b471b3b30f20e5763be013046ccf5 +part_00543_of_02048.hdf5 1d3efa2d1502ab24a6b68f94cde90dc7 +part_00544_of_02048.hdf5 870eb0bc53994eb02fefac00b89e4fa0 +part_00545_of_02048.hdf5 629f1911d289bf93caa2fdd3c1497fb5 +part_00546_of_02048.hdf5 ab6ec159673611dc06c5554a3a7d112e +part_00547_of_02048.hdf5 90274377c93f6536e0afb857fd50e6a6 +part_00548_of_02048.hdf5 3eb09e140d84665efcad834f63630a2d +part_00549_of_02048.hdf5 07cf53cfe4e2bab58c9b3934601ea1b9 +part_00550_of_02048.hdf5 502ee2980eb055a590ad9b7f9c3add2b +part_00551_of_02048.hdf5 92b107c138871f1ab546116ead05c667 +part_00552_of_02048.hdf5 86296469c93c20a4122dc1eab7e40792 +part_00553_of_02048.hdf5 00f3f5143ad70642677ecdbf469be22c +part_00554_of_02048.hdf5 1484644e91944c8d50ab1a66ab78e36f +part_00555_of_02048.hdf5 74046a041568ecf9eaa12cb0e840e76b +part_00556_of_02048.hdf5 5b99d6e9a55b80745378a4ac54ace8a7 +part_00557_of_02048.hdf5 39732c273ee85828007657c490e2adc8 +part_00558_of_02048.hdf5 9a3acc8a4e0fc550ffc0d357f7f44052 +part_00559_of_02048.hdf5 93b97a37ef0f97524c1fc18b26412ab1 +part_00560_of_02048.hdf5 d41205cd411a0790f8d3f76353313550 +part_00561_of_02048.hdf5 366b698ca0e090891873cf9d025d0de3 +part_00562_of_02048.hdf5 15eae257577f4164c2401ef66547bd24 +part_00563_of_02048.hdf5 8dc497a872f930b977445e767435170a +part_00564_of_02048.hdf5 221cebaa9ba480e64db24b0443bb4ce3 +part_00565_of_02048.hdf5 7dc0849fd843cc0f24dc4ff455a2ee04 +part_00566_of_02048.hdf5 ce539113746dcfc3e47c7aff14ba2645 +part_00567_of_02048.hdf5 22ff246e01222b0fdde358120e8c3f13 +part_00568_of_02048.hdf5 4824f30fa800c8a5a1c09519ff75a65b +part_00569_of_02048.hdf5 fe02372fb6c29d8d9c3194f5c2f0f1fa +part_00570_of_02048.hdf5 cbefba5933b89966d585fb0c1978b093 +part_00571_of_02048.hdf5 6d08e8d3ab6de47fd1dd872221c02392 +part_00572_of_02048.hdf5 80dab5ddc5cf79394954ffd90544c387 +part_00573_of_02048.hdf5 8c8d3848a6285ec58d7843c25f141cff +part_00574_of_02048.hdf5 1d5a4a46af1a3efe64157e038c503892 +part_00575_of_02048.hdf5 e1a5f3785077908a31b6681c727c72a9 +part_00576_of_02048.hdf5 cd48300db8967bedbeec5a449900f01c +part_00577_of_02048.hdf5 a1c7017208d65fc13e49ccbd6a963445 +part_00578_of_02048.hdf5 55dfd402b572c33fd2d872ad039976c1 +part_00579_of_02048.hdf5 60a15bdb232176a2d0d18e5ccc2363b9 +part_00580_of_02048.hdf5 b624c99f4bdb013d9b769c7042599fe2 +part_00581_of_02048.hdf5 490f729f7636392cb211a274cfa2e3d0 +part_00582_of_02048.hdf5 26135a3d063e7b0f51b12b8277c038f2 +part_00583_of_02048.hdf5 fd18325887b37701caaf067173656093 +part_00584_of_02048.hdf5 b849038708b07ff002733a522477b9a3 +part_00585_of_02048.hdf5 48dc06918237981117c52615bdc9eca4 +part_00586_of_02048.hdf5 acafbddf9773424c6b565d25e4974924 +part_00587_of_02048.hdf5 9b3c4dc0c46c4a6f64deb3de091fd526 +part_00588_of_02048.hdf5 782ea624c2593badd744861aae1d4b0d +part_00589_of_02048.hdf5 14ca4b8c205af102a16fb6c9352e98f2 +part_00590_of_02048.hdf5 fc80ba4fd4d6557d15c29aa163921577 +part_00591_of_02048.hdf5 cc03b173a39540fd61c9bebbef442f7c +part_00592_of_02048.hdf5 3198e228e8d20a51db6cb3c1ed89d521 +part_00593_of_02048.hdf5 76e4e1441531f86338b3cca62ba545cf +part_00594_of_02048.hdf5 34adb8e3bfba951d05ac5eb1b57bf585 +part_00595_of_02048.hdf5 800ab43f857546d39fc0588f79024974 +part_00596_of_02048.hdf5 bff8943abcd2a0f7fddfc53c31b249d3 +part_00597_of_02048.hdf5 fe3d870165e099bc721a683c10cc9622 +part_00598_of_02048.hdf5 3cb26ac43c27af69ea08e11d2c801b24 +part_00599_of_02048.hdf5 1a587bdc1c02d604e4a2c45ccb076f86 +part_00600_of_02048.hdf5 646f1b8c9b222d5c1648b483e9f0e3d0 +part_00601_of_02048.hdf5 35372896cdf5086366e19138dd24d499 +part_00602_of_02048.hdf5 ed6fc27de9c29abcfc998f132e5cb7ef +part_00603_of_02048.hdf5 71f3f2799f05978cc6caca3efc36780c +part_00604_of_02048.hdf5 738b779a0df990bce147d8d59660e988 +part_00605_of_02048.hdf5 33e71f378ef012178a94c959b7ee7d94 +part_00606_of_02048.hdf5 8c95211ddf9b85d4ab9049b28f1f9caf +part_00607_of_02048.hdf5 6cf4637a2698482d5a73a09d33a42da6 +part_00608_of_02048.hdf5 b1980b3064e550890cb70e826c90889a +part_00609_of_02048.hdf5 ef3247331a650132ab0ad8959e5ca97a +part_00610_of_02048.hdf5 477d5eedf7c5ec4acd1878398e8e1d54 +part_00611_of_02048.hdf5 406dd09e0c9efd503effded622d1803c +part_00612_of_02048.hdf5 49bf6658c245b341c81e62d6079336f3 +part_00613_of_02048.hdf5 baa4935479dd2eaef1a63e3e69ffff03 +part_00614_of_02048.hdf5 66b9aa60721aac7d01ea0b2b484fe068 +part_00615_of_02048.hdf5 ed8b12e8ae00e4c90bac250b240b2213 +part_00616_of_02048.hdf5 62796e0a3535a4d740128ccc200f3699 +part_00617_of_02048.hdf5 b98c39484725861760f014d6b4b645f7 +part_00618_of_02048.hdf5 5ee530e5aa103aacdb2cd6d77160c2a5 +part_00619_of_02048.hdf5 d4695de61dd2e844953f927bcd95c24a +part_00620_of_02048.hdf5 ab655f63396363a60def4d18f2341bf0 +part_00621_of_02048.hdf5 4dd8bb04782b340e6ffcc23636e5354f +part_00622_of_02048.hdf5 b10fb9d38c842628365b8e34bbdfb248 +part_00623_of_02048.hdf5 a0ad5bef600f69738e30cf43289ed72f +part_00624_of_02048.hdf5 7c508c55140d2b1f616555acd6318116 +part_00625_of_02048.hdf5 f7d9f8c4309bc9ff246abedf805420af +part_00626_of_02048.hdf5 9274b7492fef6e92209af51f53ca61cb +part_00627_of_02048.hdf5 f96a63e46fce441763e68bcfd0cd0733 +part_00628_of_02048.hdf5 f2d6e9ee0b15383c680ea91f9611705a +part_00629_of_02048.hdf5 0b9b524177783a086e0392252ae75aad +part_00630_of_02048.hdf5 326d16a7e87323d77326e4e33b81961a +part_00631_of_02048.hdf5 583535122e9336d0d298e52cdae57899 +part_00632_of_02048.hdf5 1550f1a786f3901d344676f4cc57b712 +part_00633_of_02048.hdf5 6463976389942a1de7a9e87beaf3a996 +part_00634_of_02048.hdf5 7f12641e374b34bd7567b65f02bee3c8 +part_00635_of_02048.hdf5 40f1c5b30cf95bf29db73812c9865b0f +part_00636_of_02048.hdf5 7198712ced642336d57f046703c6532a +part_00637_of_02048.hdf5 2716945b8f111c8ef370d2b3daa0bcd1 +part_00638_of_02048.hdf5 ac588d46625aae1235933699276a2b58 +part_00639_of_02048.hdf5 5edb7c2fcfdb51ef04d9223304831435 +part_00640_of_02048.hdf5 19c505967983405c9ab43385427ad9dd +part_00641_of_02048.hdf5 8511bd398f969a3d525f163e1dff833b +part_00642_of_02048.hdf5 be425b527f7e2c97a9a53bb448d025a0 +part_00643_of_02048.hdf5 e19abafebdc4a1b8504d32fd93bb9f5b +part_00644_of_02048.hdf5 1f3758e17bd7b6c3d94ee934eba624fa +part_00645_of_02048.hdf5 1d3eadeeb9d3d5510361816a8d156b00 +part_00646_of_02048.hdf5 40ad2a390e4eb9c94e938de46d99690a +part_00647_of_02048.hdf5 a2019735bda06a275f4c6134d4d21315 +part_00648_of_02048.hdf5 80d02ae1e234d0b9bae2ce51272c09c1 +part_00649_of_02048.hdf5 c1123b846a5af62bae7667b3a12ca709 +part_00650_of_02048.hdf5 103c3aeeff136246ced38581f83e88a5 +part_00651_of_02048.hdf5 a2a09cb9822088db5deb10392cdca11f +part_00652_of_02048.hdf5 9dd6db496a20f6997a8c1d929b8b184e +part_00653_of_02048.hdf5 a1f5d79249387b0e6551e9766abce3e9 +part_00654_of_02048.hdf5 58e0ca7c9e52bbc726557dec7b779685 +part_00655_of_02048.hdf5 d1fea551c4e6a154ad1e9df829efff44 +part_00656_of_02048.hdf5 84e53a58c28306971c295fb8d75dbd6b +part_00657_of_02048.hdf5 e47009bb0a4ca2dcd3572d950d151056 +part_00658_of_02048.hdf5 0157c81561e6eec7962fa7f86beb039b +part_00659_of_02048.hdf5 e09ffeff0bb9385e15047d56d5e5166a +part_00660_of_02048.hdf5 8a54248fc5ffe6e485e9c842aae5d1e2 +part_00661_of_02048.hdf5 62ab7bd437ef1f39b8acfb01a1aa3226 +part_00662_of_02048.hdf5 9dc024464fa757cfc95d74df7f37743a +part_00663_of_02048.hdf5 3a2e5e969ed7d81015588f02f8818419 +part_00664_of_02048.hdf5 284be32f103f6788c6f1add8536711c7 +part_00665_of_02048.hdf5 5b0debfc6ca4a74c5fd3aa6ced5fd20d +part_00666_of_02048.hdf5 73d8d9d5938f733cc562f333db504806 +part_00667_of_02048.hdf5 b4f62edf72e7f47738f8c1e9e9ad5272 +part_00668_of_02048.hdf5 88a85f30a2d45d8e266ca362b5c826b8 +part_00669_of_02048.hdf5 8f271721478ea6e4a0db8fe8aa412bbd +part_00670_of_02048.hdf5 3b6ef1591a04961b802b3d75bb6e5990 +part_00671_of_02048.hdf5 8b76bf59697d3e5229d02e8038b10b9a +part_00672_of_02048.hdf5 f00d0a22cac4a7dfd5b2d3d24f854b34 +part_00673_of_02048.hdf5 9a4331db0b8173dcc065e5f56ef6851c +part_00674_of_02048.hdf5 c23e45044055b00df0a28d18c5d5adfb +part_00675_of_02048.hdf5 0a9146fd83512491b342a7af84ca7cf0 +part_00676_of_02048.hdf5 656bc7458db70a390d270859713e99ab +part_00677_of_02048.hdf5 82474dfbae4cd981116df869ed64d89c +part_00678_of_02048.hdf5 c0655705ade8e1061999236b642f90c2 +part_00679_of_02048.hdf5 6c3ecdae9057bf879a02b8a069895368 +part_00680_of_02048.hdf5 db77d18eea558d8b00ca7084f635fbfc +part_00681_of_02048.hdf5 af8279d915d052527cd75abc594640fe +part_00682_of_02048.hdf5 35401c68dab0a90788c799ec923e6fb4 +part_00683_of_02048.hdf5 4ca7e46198117df437d98580500e6ce0 +part_00684_of_02048.hdf5 0b38a80de4c1a04468bc27b6896ecfaf +part_00685_of_02048.hdf5 e6a73705720999996bd4ac1847e4bb15 +part_00686_of_02048.hdf5 a34f9afafef152c8ce50ec8398d57550 +part_00687_of_02048.hdf5 45ec8c2e1d2b17e2d5e658e615280dd6 +part_00688_of_02048.hdf5 5c373890c1c59ac1ba9b384c912c0f03 +part_00689_of_02048.hdf5 07dbe72e569965eea3d15f753ac0abcc +part_00690_of_02048.hdf5 a77815370a03bea6c1f9b850ab12edfa +part_00691_of_02048.hdf5 f68c42c92b187f748a7672f0a78530d8 +part_00692_of_02048.hdf5 53843a274af9dd1b879e4031bc09c813 +part_00693_of_02048.hdf5 0beded723f4243914938c2948c4644bc +part_00694_of_02048.hdf5 a3edb5655a370606c3a2d01c7b37cea9 +part_00695_of_02048.hdf5 be629759584b4f1c48e780e8242b19c1 +part_00696_of_02048.hdf5 1fbbe616ad583e5f24b32bd7e4dd5ad1 +part_00697_of_02048.hdf5 5b29d389648d3783ec8b040030e294c1 +part_00698_of_02048.hdf5 cf29c605d40c36733cf7ebd26a7f35d4 +part_00699_of_02048.hdf5 87253a6f912d2af137fd3bc2cbd883c3 +part_00700_of_02048.hdf5 c04515db149637914122507cd512317c +part_00701_of_02048.hdf5 e3c2e64fb20462eb73c71f686fb4cf0f +part_00702_of_02048.hdf5 e23ae2acf82670c86cea5d322cbb03e1 +part_00703_of_02048.hdf5 ba63c5d931b644dc751c4732553df1c1 +part_00704_of_02048.hdf5 32549aecdbe9a1194636690e8f5f7415 +part_00705_of_02048.hdf5 36720b374b95f07a0b349c5b8f763dc6 +part_00706_of_02048.hdf5 ba17385120831b41d74ae296b68c2bc0 +part_00707_of_02048.hdf5 7d8ee251495ce69b6dfdd07e78bb387f +part_00708_of_02048.hdf5 2bb51fa739d0c159f633faf1100bdcda +part_00709_of_02048.hdf5 2f4c2ad8f3cd943dee13134481568b2a +part_00710_of_02048.hdf5 6d023501c31347a16361ab2e7f94fa94 +part_00711_of_02048.hdf5 9e24fcce2a1649261861affb84207521 +part_00712_of_02048.hdf5 4f6e559501a0e5145c7e3656841a1f0e +part_00713_of_02048.hdf5 ac30f55d33e1fa3dce9e74f65df2a2b9 +part_00714_of_02048.hdf5 bbcb1369a1281636f626ec5318b42613 +part_00715_of_02048.hdf5 252269d88ebafe542d5cf6367e2398f7 +part_00716_of_02048.hdf5 a27846e66e0d65d04ac04f378c863be1 +part_00717_of_02048.hdf5 742b3d6e94f44d315df536f13c7c2cc5 +part_00718_of_02048.hdf5 61e9e12ee615cf853124fbf207490606 +part_00719_of_02048.hdf5 fac4e0efa519b8ea538f98a03a5bb2ff +part_00720_of_02048.hdf5 3e10e5fe6d5ef8f56593bf7e34d2d028 +part_00721_of_02048.hdf5 56cf9958c002abe0f2efa2c185ac015f +part_00722_of_02048.hdf5 064ece5ff7c313430e376874e1aa13a1 +part_00723_of_02048.hdf5 83c4066d268603954fa131652ddfc78b +part_00724_of_02048.hdf5 61c7763245c8804f29a198c8083f401c +part_00725_of_02048.hdf5 e3fb199fb569ff0892ad65ac27ef10d3 +part_00726_of_02048.hdf5 036111ea19724f8b8914b1f0cf6c4a69 +part_00727_of_02048.hdf5 fc572523224b77670704c57cc4837f77 +part_00728_of_02048.hdf5 618ca924620852ad18cfcd9b4c05fab1 +part_00729_of_02048.hdf5 479433f05f7d87a2c27b14fd891b7381 +part_00730_of_02048.hdf5 693ed7f214b2fe223a4f77a8b25534b7 +part_00731_of_02048.hdf5 9e3bc49ab492393d4468a79c66585183 +part_00732_of_02048.hdf5 058f784043e884ca4ecf7cc9c5209c71 +part_00733_of_02048.hdf5 0d8c906647d96bf14548980466d64bf0 +part_00734_of_02048.hdf5 5a086bd65b93eb7eaa4e49dcc1289dcb +part_00735_of_02048.hdf5 7b3e8a616e4c3d4768441cf9934892df +part_00736_of_02048.hdf5 effc54a1c6780b119f244d51ba0fc66d +part_00737_of_02048.hdf5 a4fb0b258a5d5c136f2b373e8edd4bfd +part_00738_of_02048.hdf5 4eb8172f3c09c9a3f338747b987aecc7 +part_00739_of_02048.hdf5 9dd23a50c84e6c805816c9c5d86f6da9 +part_00740_of_02048.hdf5 245cd8624bf7f69fe72d590e1d07618e +part_00741_of_02048.hdf5 48f1d0df04252d2ca69d36c2648d9f91 +part_00742_of_02048.hdf5 bbe9e328b0a5786e089ca10ee04d7a89 +part_00743_of_02048.hdf5 8fbe5c5cd09f23884d23a00ba0d0fe2f +part_00744_of_02048.hdf5 693c83f68233b599f80ed2a384d952f6 +part_00745_of_02048.hdf5 7820ac7d95fc366d9797fa9c6c4f3e2e +part_00746_of_02048.hdf5 0ab11c62e469bcae57914d5c401a4a76 +part_00747_of_02048.hdf5 ada31fe9022b23fcc4b9a48aeeb59961 +part_00748_of_02048.hdf5 9ace4eb724cb89fc9aff11f71f6a344e +part_00749_of_02048.hdf5 1c48566fb945a397fb1ffad186e63134 +part_00750_of_02048.hdf5 adaf4e17fab84b9e22c1ffae10735a74 +part_00751_of_02048.hdf5 39dd88ec599a865d85cd0a0a91a87071 +part_00752_of_02048.hdf5 64b331c56a79045562a96929ae1acbfa +part_00753_of_02048.hdf5 824fb2ea00e2dbb3f45be57793671d4c +part_00754_of_02048.hdf5 2f1b4d7f29fa9b94d29d0b7fa0bed472 +part_00755_of_02048.hdf5 e0a80b5df482ffd2dfc76e6ac4a70859 +part_00756_of_02048.hdf5 b442f2764ee0828d283ec4c6072e64a6 +part_00757_of_02048.hdf5 5626eb5cdd5ad1857e5e87b5eed97b1f +part_00758_of_02048.hdf5 9185285e91876c62af1083066b50568b +part_00759_of_02048.hdf5 f9cac134bbae62439d721bd667f9a9d0 +part_00760_of_02048.hdf5 6caa743f5f08cf4d04c6a5149e4b2611 +part_00761_of_02048.hdf5 74466883d64e7dfc037a95ac6b8f5b36 +part_00762_of_02048.hdf5 5e6671abc0743026121b9907bbbd15ea +part_00763_of_02048.hdf5 4eab7eee525ad19816148783bed29d09 +part_00764_of_02048.hdf5 0d440121520b2e813e7a22d57fdff435 +part_00765_of_02048.hdf5 93db5611806b75752f20adc9f31a3550 +part_00766_of_02048.hdf5 b3d81fc98f6c1866eacc3ba743136452 +part_00767_of_02048.hdf5 8f91c14280764476c7b1f59e85a3fbc0 +part_00768_of_02048.hdf5 0ff66e0d3087138eaffd9ae9562dc6cf +part_00769_of_02048.hdf5 88643887d90dda0240c89382c14ff8fb +part_00770_of_02048.hdf5 dfd4f82c34491a5189fb658aabce53e2 +part_00771_of_02048.hdf5 6b69387406da01e178f851f1a7dda4dc +part_00772_of_02048.hdf5 e6a25935cd724dbd63a4d6ed0de8cefe +part_00773_of_02048.hdf5 9b23b25cad33c7cab21608e6a99665b5 +part_00774_of_02048.hdf5 3692d92e7ecea54a3705afcdca2a3c1b +part_00775_of_02048.hdf5 5e34bef6e20da8a7a340b20d759b1408 +part_00776_of_02048.hdf5 0e004dd5fe186928b56e39f78dffb572 +part_00777_of_02048.hdf5 ecf85e484103881bcffd5c80af7f00f0 +part_00778_of_02048.hdf5 b1100da72afdb461fe05b611c19823ad +part_00779_of_02048.hdf5 2719b12cb4100e63f15bbf455ce0493b +part_00780_of_02048.hdf5 e4d721f0015defeb9e182f2916ee7ec9 +part_00781_of_02048.hdf5 8c46b006542b1fd1aaeebda200a14ea1 +part_00782_of_02048.hdf5 d07b494baee98d6a1631c49be50340a7 +part_00783_of_02048.hdf5 80ff97a0ab4bccfb46b1169252574cb5 +part_00784_of_02048.hdf5 23d99ab12ccd6bfeaa1432ab00e14a01 +part_00785_of_02048.hdf5 790d6031b03c5cd72a5fd53eb2ac74b6 +part_00786_of_02048.hdf5 096225ef571c5ab33795351db3bd0e56 +part_00787_of_02048.hdf5 c73d9f06292afa4936fe33a868687e13 +part_00788_of_02048.hdf5 70a3618533630eb56f13352f07faa8b3 +part_00789_of_02048.hdf5 1275a999da12306bc007706f93dd68fb +part_00790_of_02048.hdf5 ea862a26a327c6c7d3e79b60420a1f2b +part_00791_of_02048.hdf5 a007f2c20f2508bb7f3f21b42a7537bc +part_00792_of_02048.hdf5 1176ec357bdf18bacb1e8a15132a1144 +part_00793_of_02048.hdf5 ef89a7e09b97f416f6a4fb9bfd69d1f2 +part_00794_of_02048.hdf5 ae6353113603cdd57e86d8e46e211603 +part_00795_of_02048.hdf5 e6c50018a53d760b55eb6c94c9529192 +part_00796_of_02048.hdf5 f8d4764eaa584f656b3ceae19784eec9 +part_00797_of_02048.hdf5 9646a2632e191b76d86db20911fc6a1c +part_00798_of_02048.hdf5 d5701776b01d78e2c553e905f1658ada +part_00799_of_02048.hdf5 63cd32baaa9c4abd6a350f596e47d41c +part_00800_of_02048.hdf5 daae7de8751c2894f20a10074e4848fb +part_00801_of_02048.hdf5 6fb6bcd1d1eac9727bd10e101d6b636f +part_00802_of_02048.hdf5 fe9ff8c30e7bb33266e9cbf6d7063460 +part_00803_of_02048.hdf5 d94395dbed551e0ec969616b620480c0 +part_00804_of_02048.hdf5 7b8e44c4fc0160295337a3353cf209d9 +part_00805_of_02048.hdf5 30ae6c74264e017793056eb2796712c1 +part_00806_of_02048.hdf5 20ebaf5ecb3d6f741cec90bc404fef7f +part_00807_of_02048.hdf5 4d3fdb40da202023d29edaff466fcef5 +part_00808_of_02048.hdf5 4a4633f6359947297f1a556347c2139d +part_00809_of_02048.hdf5 92d89bd8352e356197cb0c2e3782edbf +part_00810_of_02048.hdf5 c2efa5059024689fb448c88045c5b881 +part_00811_of_02048.hdf5 a471a9752c48e60a57dde6bc4f014b5e +part_00812_of_02048.hdf5 dd2d40a6cc78a85dab71872aebd20929 +part_00813_of_02048.hdf5 3a5cdf49a496f8ad18c109f1e2466038 +part_00814_of_02048.hdf5 048bf6f5b1838ca2e098b1e4a5d129c4 +part_00815_of_02048.hdf5 b4bbc7ccb11ea49d296eee047083569e +part_00816_of_02048.hdf5 78703867f960c12acb56999355cdd85e +part_00817_of_02048.hdf5 47457f29ee54eb7886669e35a8c0f17e +part_00818_of_02048.hdf5 33c8e7e0c0756bbdc7927e9235289bbf +part_00819_of_02048.hdf5 644a254ab79463cce75e96e1da7c5bbc +part_00820_of_02048.hdf5 7b4fdbaa7b40768cf47878b606e22ae2 +part_00821_of_02048.hdf5 c8522edcbae149a627e39135346e8053 +part_00822_of_02048.hdf5 b7cad409a3d1f135ac5b856651bf5a55 +part_00823_of_02048.hdf5 7807b43c439cefe3026c56d4752d31c0 +part_00824_of_02048.hdf5 ea66304813d8cfc26b0c9cbed1eb7367 +part_00825_of_02048.hdf5 08701a0aea588f40bbd5bbaa837fadcb +part_00826_of_02048.hdf5 5a643c7f77376fc7fb77ecd3a2d42463 +part_00827_of_02048.hdf5 de3ff64c93eb6e39cc7eb66a54a8c540 +part_00828_of_02048.hdf5 9e7c639556ccf4a4ca383378521227e3 +part_00829_of_02048.hdf5 6f1d2f3c9757ff5a5bebf97d86dfd7e4 +part_00830_of_02048.hdf5 dc130956e60719e6098e310f28fd8f5d +part_00831_of_02048.hdf5 83edd8578f657f8bd8bd49ca7f867649 +part_00832_of_02048.hdf5 03a1cf8b5a5b1522ecafd9d63c509723 +part_00833_of_02048.hdf5 3e0ecc5972c70948f07bdacb880229d6 +part_00834_of_02048.hdf5 7f835ebe5edc3eaba7a2363d44d83ac1 +part_00835_of_02048.hdf5 159f7452593bce0f919727f4f5c1e123 +part_00836_of_02048.hdf5 078cf894ffcc16a13871eec2e3543ab0 +part_00837_of_02048.hdf5 0ee3f6c9bd2bfb31381d51b91c67f471 +part_00838_of_02048.hdf5 5de7977e74328b2b8730e3c92631d0af +part_00839_of_02048.hdf5 ae2ac832926d73d7b23dc22bd75223a2 +part_00840_of_02048.hdf5 8f24fd7c1e3741bc8525de851f155c47 +part_00841_of_02048.hdf5 4de1fd8ffce5d8f89a085750d4373c83 +part_00842_of_02048.hdf5 2c20141375c86af122e70dbc223daaec +part_00843_of_02048.hdf5 983bccf9c704795431fe691b925adbb2 +part_00844_of_02048.hdf5 865db78726a68c3cbf4f9885ec9c40f3 +part_00845_of_02048.hdf5 d3d978f94eccddff69f84447e92e7b08 +part_00846_of_02048.hdf5 4e91b204a37a518e42986d16ae88a8e0 +part_00847_of_02048.hdf5 155fd9f8e91919d47414a9dca3496ed6 +part_00848_of_02048.hdf5 590c2cfa28cd6f7ec78cc6eeab866158 +part_00849_of_02048.hdf5 2ba072b967ed465da8f202e48d678a4a +part_00850_of_02048.hdf5 abb987a0fdd9bd74b6b1383f492efe7b +part_00851_of_02048.hdf5 c2949e4f6729c0924b57fb00a2cedfb4 +part_00852_of_02048.hdf5 f5c0b2ff5ae0c3f4823026fe4c280285 +part_00853_of_02048.hdf5 3445ed4df787645245ac6f387bc04321 +part_00854_of_02048.hdf5 2a44cedf52f403814b692762faba82ea +part_00855_of_02048.hdf5 c5a53e22e4f0a977134f1555b08b095f +part_00856_of_02048.hdf5 d3754722a557d0900e8bc9eeee35856c +part_00857_of_02048.hdf5 a3e4fa6186222695fd685e64df1963ff +part_00858_of_02048.hdf5 ddda746e2f2d1d0bbd1096b2bb524f85 +part_00859_of_02048.hdf5 33290a77a614cc6e3a2903577452733f +part_00860_of_02048.hdf5 acf0a2ec16cb92a9a621093ec4c3cf52 +part_00861_of_02048.hdf5 60b7817366c7f7f928adfdcced1b8b8b +part_00862_of_02048.hdf5 61b0fd7bac1f780d46f5370d5ca60482 +part_00863_of_02048.hdf5 fc0534a7930ad9f81e60389f0ab0561e +part_00864_of_02048.hdf5 38e238d4255ce64cdae0a63eeff42aa4 +part_00865_of_02048.hdf5 e7202f450720ff12b693a917f8d9a221 +part_00866_of_02048.hdf5 8db06132c23ea5bbc3e2b406ef030730 +part_00867_of_02048.hdf5 155ceb0e4b4011fc62c0ccf5ac3d82f3 +part_00868_of_02048.hdf5 e04f9308045b7d93bfdfae5d6c736c82 +part_00869_of_02048.hdf5 fe23b70ffac264ea8b085d29a4dad85a +part_00870_of_02048.hdf5 fab62362f91688ea043f66cd8bcce795 +part_00871_of_02048.hdf5 0d8646ad1096cb45d818bda8f6c727f1 +part_00872_of_02048.hdf5 80bae5b5e4207455d4320183e1b7560f +part_00873_of_02048.hdf5 33dd2356b48df84efc574aaa055279db +part_00874_of_02048.hdf5 3a390ae845499c13eaf12fb39546ff1e +part_00875_of_02048.hdf5 cc730a6b967a2bf6f49e3bc84ca95358 +part_00876_of_02048.hdf5 cad67ac2c9dee48213758c3a91a89604 +part_00877_of_02048.hdf5 d9d81c4f8aef8435cc9e1707422b6cf5 +part_00878_of_02048.hdf5 c5fc2264b3ca3d53fe533fb840cf687c +part_00879_of_02048.hdf5 76770c2ed8f2262825c1e866e1ddc6c4 +part_00880_of_02048.hdf5 77f3c60072968610cb1c01cdad3a4506 +part_00881_of_02048.hdf5 b44c4a96e1d9600b62c28f06a83a5da1 +part_00882_of_02048.hdf5 554792113f652e71be688b90858308b0 +part_00883_of_02048.hdf5 0db03c0f0a5f9d8d885473f0951afcc6 +part_00884_of_02048.hdf5 aa6c7496fb09bc25417099d81b1efc32 +part_00885_of_02048.hdf5 dc93bc2909df94db67f71e7bc5375d7a +part_00886_of_02048.hdf5 a5aacbb12d6977a8068366381a777c60 +part_00887_of_02048.hdf5 502a2e6113da00c4972fe09369851610 +part_00888_of_02048.hdf5 21f22af1a7b9b9f782a4b3d3bde7d898 +part_00889_of_02048.hdf5 2bec072f7bd2f78e895201366f826fdc +part_00890_of_02048.hdf5 ee6c9ad59f29651d2954b173a2e2f834 +part_00891_of_02048.hdf5 f4ae93bc7864d90b7c3e426400ba9070 +part_00892_of_02048.hdf5 524e28eb3e8d03809c0e681126e544b6 +part_00893_of_02048.hdf5 24e7d8e732c2069743ad5d37e0babfe7 +part_00894_of_02048.hdf5 f6b8f291e2709870a512b96f172ae17b +part_00895_of_02048.hdf5 c8a0fd04d284bdfdd55b8d2d03c12139 +part_00896_of_02048.hdf5 e21817887765b57091e73dac53d00e6f +part_00897_of_02048.hdf5 7b8812a7046b527fb725a2aaeb393e63 +part_00898_of_02048.hdf5 c2975cf01d3af28216e083905d607651 +part_00899_of_02048.hdf5 bfbf8921aa28a695af99dc629df8576c +part_00900_of_02048.hdf5 d37d6634f0728a7d81fe6ce794f8bdd3 +part_00901_of_02048.hdf5 d2dc5caf113ef56ca9a542970380bc82 +part_00902_of_02048.hdf5 ea9fc4e611ba981b00ae48b8d4eceac0 +part_00903_of_02048.hdf5 bac947ee39561cd76514b0c6a4dadeb0 +part_00904_of_02048.hdf5 5c960276bbaa0491a05be488a62ecc05 +part_00905_of_02048.hdf5 945170f6f60989e4eae770997207147e +part_00906_of_02048.hdf5 aeb1455a1c083bd001c633f44e5c4a20 +part_00907_of_02048.hdf5 0049eba63bd76a4761751031ecd76ed3 +part_00908_of_02048.hdf5 46805535ba757b2d9d9aaac1cbb755a8 +part_00909_of_02048.hdf5 57c941df26c3648c2fc44dd5cd8554da +part_00910_of_02048.hdf5 f99cf21382e39aef0158720313e497df +part_00911_of_02048.hdf5 dd6f21e60dc7e3d790b129e21480ff11 +part_00912_of_02048.hdf5 a183c71fe4a6b40ed974a7ab4c0174ea +part_00913_of_02048.hdf5 833e4e28c78922aee54b842372d69c54 +part_00914_of_02048.hdf5 3ff14034fde07e7cbe176bf119f8eb13 +part_00915_of_02048.hdf5 e2244fbbb1adee92abbb40236595cae3 +part_00916_of_02048.hdf5 db557898a2a1d78aaa42764f5ad2119a +part_00917_of_02048.hdf5 afeeaf011e498b423d7a5a5af7f6cbf6 +part_00918_of_02048.hdf5 36be08521ac3dc5b422e889be42ba119 +part_00919_of_02048.hdf5 44afa89a845347527a8cb6087679361a +part_00920_of_02048.hdf5 cb5e410404ba0b85235796152421d7b8 +part_00921_of_02048.hdf5 91fd3f0635166d010a44dedbf870ed93 +part_00922_of_02048.hdf5 eafb2235649675df2b9ab8eebf956285 +part_00923_of_02048.hdf5 35b7d9358ad0d5fc8d80493209cb5018 +part_00924_of_02048.hdf5 d75fae395f0a828048b61f0cf5173f02 +part_00925_of_02048.hdf5 03078ae7a09724a0100ca573af3e279e +part_00926_of_02048.hdf5 b741d8cab05ad4ab9f5c270876224411 +part_00927_of_02048.hdf5 f56880a28ab47ce113604b01d1113667 +part_00928_of_02048.hdf5 08f0ff9074feb875230f657accc8ed58 +part_00929_of_02048.hdf5 960554fa344892fd9cf6da901fde73e3 +part_00930_of_02048.hdf5 9487c8ff641cc99d599678a165a81e72 +part_00931_of_02048.hdf5 32cb45c1c4e6e4fe80a683e2008f6a6f +part_00932_of_02048.hdf5 0c15788fcfecdffc3587de0cac75cc08 +part_00933_of_02048.hdf5 9378e0bf9c4b05dc0b3fabf66df955f1 +part_00934_of_02048.hdf5 85deaf030555d96aba8bdc0419c3e1f0 +part_00935_of_02048.hdf5 43c55597746df6dac26345f81504d19f +part_00936_of_02048.hdf5 acd41de29e848f4aa59fae4aaf391e77 +part_00937_of_02048.hdf5 596c8faf8222503be3ef5b9990056669 +part_00938_of_02048.hdf5 cb9caa093e91fc52668963d7ccd09472 +part_00939_of_02048.hdf5 cb9e5535069db80fb775e5b1251b2448 +part_00940_of_02048.hdf5 515339ea91d4ee3a0b0c066f1c65a5f4 +part_00941_of_02048.hdf5 c8ff19f1e837732cc4b3e79b3b8a8c97 +part_00942_of_02048.hdf5 e0e799e3dae46ec66d2e4c42c55cf0e5 +part_00943_of_02048.hdf5 f7473d316add42ed7799864f550e2557 +part_00944_of_02048.hdf5 f7d2ed2cde907cf29d85ae0fc7257afd +part_00945_of_02048.hdf5 81898861f59d0d9ae2082de17cf42199 +part_00946_of_02048.hdf5 198bb5f1a8226790ed433ec355544c8c +part_00947_of_02048.hdf5 276cc477e79e0f1b62387e193c18a8b8 +part_00948_of_02048.hdf5 41ab36a2ea30c92b70fb4db31b55544f +part_00949_of_02048.hdf5 59a578d46f6058f6ccd106096999f8f1 +part_00950_of_02048.hdf5 4dca7e9454539f072d5332e9d12adccc +part_00951_of_02048.hdf5 f28d5147d889a44e2a92bb464feffb55 +part_00952_of_02048.hdf5 fdcbebb24f3e7cfa710e8723e3dd1401 +part_00953_of_02048.hdf5 f9f71234e914e4eae73d392bbf62d224 +part_00954_of_02048.hdf5 e506314f575e064a7f777c2eeafadbbf +part_00955_of_02048.hdf5 bf6d600b916105498ca22ae07768ba27 +part_00956_of_02048.hdf5 4a2d4cf153a46c9eba95b5d95c5fd667 +part_00957_of_02048.hdf5 f0e180989460123985e15f8388494471 +part_00958_of_02048.hdf5 10a852a046acb6be6b94fbb75a35a67a +part_00959_of_02048.hdf5 fa1e817620fba16923b0dac690c632e5 +part_00960_of_02048.hdf5 31bf4d1eabb7f011755b4c5645f0e6b5 +part_00961_of_02048.hdf5 d5ca936aa058afd7c03b4fdd6d5f114b +part_00962_of_02048.hdf5 e411ea5cdc7887b23a6250d2fb3aea32 +part_00963_of_02048.hdf5 297c9362661a97dff2e0dbf9d442841c +part_00964_of_02048.hdf5 a41121694f63280c7b319aac504d7457 +part_00965_of_02048.hdf5 755bd367f5c6d67f205d9e3866635824 +part_00966_of_02048.hdf5 78cb4d293d442cfb6f0be6022627a997 +part_00967_of_02048.hdf5 a6d43a0379ac5f4f40cb0c458e1815e9 +part_00968_of_02048.hdf5 bf77ab4e0fca12bc0554f55489400c06 +part_00969_of_02048.hdf5 a851bb972042f18f1e65a40961d5d322 +part_00970_of_02048.hdf5 3be29c96d1643253aa8585bbfdae80d0 +part_00971_of_02048.hdf5 4d7838599302821cb2dffc1b3a5d6c48 +part_00972_of_02048.hdf5 30623d62793f694c54a520fd99d3a068 +part_00973_of_02048.hdf5 8ad1b3ecc4df49c76b5bd2b912b6a3a9 +part_00974_of_02048.hdf5 6739d82cdbc7128ccb8239d3443a84cd +part_00975_of_02048.hdf5 ee90adf2f2a61f96428cd1b16cc16e84 +part_00976_of_02048.hdf5 67fbec91ceb5f72102e78fbe949c4b18 +part_00977_of_02048.hdf5 ea0c9abda530a702024877e71e470830 +part_00978_of_02048.hdf5 d6e71de3ccc1beabf2404ba8e63d0aba +part_00979_of_02048.hdf5 c291b816e4a9077da0807a3600bdbb88 +part_00980_of_02048.hdf5 6d886d788fda14e9869d67927bfe94e2 +part_00981_of_02048.hdf5 21f64a0df41d3dddd894f9cf780b4819 +part_00982_of_02048.hdf5 a0d5c73560c5e06ea666896d9801282c +part_00983_of_02048.hdf5 1c3b30e47dcca76adbd6ca7f9e768704 +part_00984_of_02048.hdf5 592bf985115dcbd3c020525cff19cb64 +part_00985_of_02048.hdf5 3bff0abde53100a1d4cff9f9373bc832 +part_00986_of_02048.hdf5 944bda7865f01f3b1f6dc4b38a1c2223 +part_00987_of_02048.hdf5 3ab6ff47ecd24d2e73b6dd0a653f31e7 +part_00988_of_02048.hdf5 a556cfcae43736b89004ba84c062b1cf +part_00989_of_02048.hdf5 122ae6e8cc7d843f96d3fd0eadf34397 +part_00990_of_02048.hdf5 679ad3495e52d86d7fe533ea153fa8ec +part_00991_of_02048.hdf5 87f2a71224a5e999a869c6a6aa0642b3 +part_00992_of_02048.hdf5 948545d9faf02afa1e95ce18c8e8f321 +part_00993_of_02048.hdf5 985a773d415983e4ad88d2d7ae24d8d1 +part_00994_of_02048.hdf5 e14b9616a5b890c1132a2d59eb95eca5 +part_00995_of_02048.hdf5 7d337d60f219950d6558ae896ae81547 +part_00996_of_02048.hdf5 77f36279ec56936a825b7bde0e9bddca +part_00997_of_02048.hdf5 282d9e490be62f27c2fd10b44857495d +part_00998_of_02048.hdf5 984408e3c6b0b21ad7e59e87c9deb0ba +part_00999_of_02048.hdf5 91f41182c6e3f91ff8db8fc717ed3bd1 +part_01000_of_02048.hdf5 eb4944d638ef35c6a2f7d6c612c29303 +part_01001_of_02048.hdf5 4ecb6f9177bd114be05658b717f3a278 +part_01002_of_02048.hdf5 8daec54aa51233c33d39f9cce79d2e20 +part_01003_of_02048.hdf5 7dfd20714bd1d563d29908b823db75cc +part_01004_of_02048.hdf5 bb8de8fb4fa6dde3ddcb8053f419209d +part_01005_of_02048.hdf5 66b15ded614961781128926d639263fe +part_01006_of_02048.hdf5 22d056ab268ef16732f8b198cdc15f57 +part_01007_of_02048.hdf5 c45306a6d1ac54719c4f7246739c6bbe +part_01008_of_02048.hdf5 2b5f317256c7c446020a6f6001ebed6a +part_01009_of_02048.hdf5 f74d8ddef7d2da531a5bf20981681991 +part_01010_of_02048.hdf5 a834ed755255062d95566c765344a778 +part_01011_of_02048.hdf5 b7e90c0c8d1ae6e4b35ad7cedf2f1a4e +part_01012_of_02048.hdf5 4bfc91946f47aa1aeae98496956e5576 +part_01013_of_02048.hdf5 6e9cfcf4b8247f8c716114863db74059 +part_01014_of_02048.hdf5 f7e1a95a48fb62ea5d02b01c2a3827eb +part_01015_of_02048.hdf5 ccbb7c859d30ea69afe81842e213fa4e +part_01016_of_02048.hdf5 b54bbb6729f931d468ee906225888a72 +part_01017_of_02048.hdf5 d11c18c3acd28b8fbc2430700364a2a8 +part_01018_of_02048.hdf5 b74b7617716fa6815a8f808b2fc4f080 +part_01019_of_02048.hdf5 d4e8990d9a04b1991424ce61161e754b +part_01020_of_02048.hdf5 6bdb85b832c7e03a76b35c7750922ef7 +part_01021_of_02048.hdf5 f2d6a17e7b801850135c65fedb964c2d +part_01022_of_02048.hdf5 5affb73b3c8f45cef4cfb955c7152ef0 +part_01023_of_02048.hdf5 69ad1b753f956b0d3b22e3b2685a5b80 +part_01024_of_02048.hdf5 b141ece7d6dec7ef834d1ec6cfdc59fa +part_01025_of_02048.hdf5 10c1b930952f19565553e9a3a6e13bcd +part_01026_of_02048.hdf5 360ac7a68f321c12e5a4e2ffa7e20e68 +part_01027_of_02048.hdf5 9b44ad5f9ef8aaab3ec11130420d0895 +part_01028_of_02048.hdf5 2af5337f9e13db07a10ac6322f7a2591 +part_01029_of_02048.hdf5 83cc49b717be7c1daf02aa67b2a78116 +part_01030_of_02048.hdf5 236d63ae14242182b70c4695ecebee61 +part_01031_of_02048.hdf5 a4005be83ede8f4d5047a2a15e3ddebf +part_01032_of_02048.hdf5 37a296c584a20393303a2ab4ea325c7d +part_01033_of_02048.hdf5 784ccc5e273fbfcfdb9331ccaa8f99f4 +part_01034_of_02048.hdf5 3112646c93dcba64adfaa0a09038d255 +part_01035_of_02048.hdf5 d70e0d942102320307cb1e3b80ba6429 +part_01036_of_02048.hdf5 2003e22e9a8a7930742cef68ebff40b2 +part_01037_of_02048.hdf5 601e729ab123198b39ac44a85361d4f3 +part_01038_of_02048.hdf5 fd912992295549e2703fdb0105f81717 +part_01039_of_02048.hdf5 56b072590fa2a0f38756fbd01fca5366 +part_01040_of_02048.hdf5 49e35701edf866153b531fbef95ed625 +part_01041_of_02048.hdf5 3136ac6bf0442c9345863d2b17306a9f +part_01042_of_02048.hdf5 8ea65a9a624e1fd6e42f1543af214125 +part_01043_of_02048.hdf5 662999254b703c7d7ae476f58319f298 +part_01044_of_02048.hdf5 80f5f9ce0120ebacc80e775cb8c8eb01 +part_01045_of_02048.hdf5 d4b88a67165c6167680e8ab1f452db12 +part_01046_of_02048.hdf5 bdba4699fd6451266e883a0936109bf9 +part_01047_of_02048.hdf5 d4eaba83802bfc5d153c31103e98c479 +part_01048_of_02048.hdf5 3a2be5a79f624e3fa07a69cb8a223817 +part_01049_of_02048.hdf5 cdb3fd9d61083d569b52f0b120bdaad9 +part_01050_of_02048.hdf5 36d17db8c1e67577e4b8a63feeac876b +part_01051_of_02048.hdf5 8f1bd5ec1ebca7a69c456f63a588fea3 +part_01052_of_02048.hdf5 81bc408294f94c6791abbcf0e4971b53 +part_01053_of_02048.hdf5 04cec5a07a728439948a3bf9ea75070c +part_01054_of_02048.hdf5 78b6455c29a211e77bbbb4913c3f9779 +part_01055_of_02048.hdf5 a026bda2389f2026a34539ffb2fb68a3 +part_01056_of_02048.hdf5 5db8688e2f408fb547dace49be32c521 +part_01057_of_02048.hdf5 63904c403a786df23ac43d04d4263275 +part_01058_of_02048.hdf5 d2e7bdbf45c09fdccfca27199ef13329 +part_01059_of_02048.hdf5 067f1f3f8506c9d42fe6328d972a451e +part_01060_of_02048.hdf5 8299064690009e8942e1aee9bc441ceb +part_01061_of_02048.hdf5 75bf59d24b0e93bb4d6d6aee6e321dad +part_01062_of_02048.hdf5 74f00d6f0d887d2ede8c57eb3d9e0c8b +part_01063_of_02048.hdf5 8c7f381d9879e8651a111042d0c1b65a +part_01064_of_02048.hdf5 9fe0ae79e14cd17b4a6a2f808894fd1f +part_01065_of_02048.hdf5 349e49daca6154ce8f4b505039be947c +part_01066_of_02048.hdf5 2e95e2e46a4556c1c9a7e3b101c64408 +part_01067_of_02048.hdf5 c0b7de6cf99162b25fc83ba34df9b811 +part_01068_of_02048.hdf5 3fe0b0c1e7972113d1d37a8414265d8b +part_01069_of_02048.hdf5 88c0dadcd583ac05f570f47c5b93a309 +part_01070_of_02048.hdf5 8a3b5085992318516319d3c5516d6f36 +part_01071_of_02048.hdf5 fce4b8cba2702ad176ae7036c9e85ad4 +part_01072_of_02048.hdf5 b3eaa253c5ec8aac9ee3a2c8fda286f0 +part_01073_of_02048.hdf5 82edc7abd21a8725615ba5bf3e3120a3 +part_01074_of_02048.hdf5 3c89a25c15fa36400c3fea169d120b2f +part_01075_of_02048.hdf5 34b60782bdb1eb84c15550cd514fd7c1 +part_01076_of_02048.hdf5 d826520674f80d6b740c6229200fbd7e +part_01077_of_02048.hdf5 fe756bdbb07a25905b0d60f43f597297 +part_01078_of_02048.hdf5 eb3d127b128b81b432f7979a446a0970 +part_01079_of_02048.hdf5 549d083282192c69729147d3b16bb47a +part_01080_of_02048.hdf5 f78e1aedc0ad3fc6135514984694298f +part_01081_of_02048.hdf5 454482e58b755c7a4a66d2540519f378 +part_01082_of_02048.hdf5 3fea0aaa9f0411670da830fc0a580a1d +part_01083_of_02048.hdf5 7eb4fd98fb4b14fb51d7f92f73db782b +part_01084_of_02048.hdf5 9a5120774ba2f5a3bf207b40a117def2 +part_01085_of_02048.hdf5 16f206aaebf13eaa1c6fdfcc1868c11c +part_01086_of_02048.hdf5 a2998bf87b2534bda620fdef15856e19 +part_01087_of_02048.hdf5 06725c4a210ac8ceda8158ba46f30834 +part_01088_of_02048.hdf5 da0f6162fdc685d248ec8312601e3ae9 +part_01089_of_02048.hdf5 85a3e8dac662184337534b22467b6160 +part_01090_of_02048.hdf5 311eb30dcd5ecdcc85fb149a2aef8d14 +part_01091_of_02048.hdf5 c1ee41b4ed7fde41863d57fccc7ebf75 +part_01092_of_02048.hdf5 5e5a7d57f67d396b1de72ffdc58f474c +part_01093_of_02048.hdf5 2950d7620fe8f4edc58b4dbabc6de7e6 +part_01094_of_02048.hdf5 60d0507b9d08f387d3fbe5bbca8efd45 +part_01095_of_02048.hdf5 6c694df92e8e747ae6703f6d1885f05b +part_01096_of_02048.hdf5 22cdda2cab1824f7575270c84cca03b3 +part_01097_of_02048.hdf5 55160510b72d792cfc13943ed7a93b28 +part_01098_of_02048.hdf5 5fd0f9970e0802c129c518fd85e62fef +part_01099_of_02048.hdf5 a60ce740fa39fa4553d6e92746cf9a4f +part_01100_of_02048.hdf5 b6f83dab9e109d47f4657bcbb6883e3c +part_01101_of_02048.hdf5 745628c993fa6d6e3dc4883ae50982b3 +part_01102_of_02048.hdf5 5d4729f2232c2134e4e969d74da9a552 +part_01103_of_02048.hdf5 94446302ef1ca3c4b4aa5e9c5c76554f +part_01104_of_02048.hdf5 e55d41821f02db21c32509ec4f5c0aaa +part_01105_of_02048.hdf5 19174a5e8c161cc8d672f0d748238d51 +part_01106_of_02048.hdf5 b661752b018c8caeb6a1aa0aa31b4181 +part_01107_of_02048.hdf5 04d7bdd8c0b402c7a2a28571366cb7da +part_01108_of_02048.hdf5 ee8d02155fef1a6b6dce6b55725d6a3c +part_01109_of_02048.hdf5 883a3067b781faccccd81aa474d190ca +part_01110_of_02048.hdf5 19a3f2330e8b240ff110e34db6afecca +part_01111_of_02048.hdf5 fbc9f411ad7ffe96be34377513014903 +part_01112_of_02048.hdf5 74ea23bc051fa28e04d3c12fae62676b +part_01113_of_02048.hdf5 d5f53ada0f1c61c1b8ca782c3d441e73 +part_01114_of_02048.hdf5 eec96f163ddd0f93fe09d30b99d583d9 +part_01115_of_02048.hdf5 198d54047d5f43e9bb8538f2baca24b9 +part_01116_of_02048.hdf5 0f3253f432368d012d4050571f469583 +part_01117_of_02048.hdf5 d9c5cf3c6700362c5f30bab2e8e28d77 +part_01118_of_02048.hdf5 9d8f4c65605a07b7ddda69f4889a8506 +part_01119_of_02048.hdf5 7044805e7f9ccdee516a8a359b099700 +part_01120_of_02048.hdf5 108776606725d5e0f47a6e668203c80a +part_01121_of_02048.hdf5 37b7092a83a04de06196d6b52c68d393 +part_01122_of_02048.hdf5 8ce002d5dd777c013cb063736f3b9997 +part_01123_of_02048.hdf5 4b7c1caabbfea4fd056eb58a4e7a681f +part_01124_of_02048.hdf5 1452cf5923d2b2c1805e19a72c477047 +part_01125_of_02048.hdf5 3f8a4147a19d9cc8e3b0d3668d3121c2 +part_01126_of_02048.hdf5 457a75860b22c3189d8b7511cde5f77b +part_01127_of_02048.hdf5 170bcde395cff9fa2dacd2cd7410336a +part_01128_of_02048.hdf5 c49aea25531188e975c97807041a49b6 +part_01129_of_02048.hdf5 da5b8432daf01a56467a3f245f797e93 +part_01130_of_02048.hdf5 cb53c0f208f2da1fb81b731f525819fe +part_01131_of_02048.hdf5 faf20a6748baca0fc572d4a476ed658f +part_01132_of_02048.hdf5 7266d15d777ed7b6774b06363dcb5f09 +part_01133_of_02048.hdf5 5ad8dcdadbe8c3e26b7123f0ebc5a1d3 +part_01134_of_02048.hdf5 c8a75d3b67d0136a6ee8fb15ddc635fa +part_01135_of_02048.hdf5 9e82ef30d32d6a4e91ab392245844b39 +part_01136_of_02048.hdf5 bccaacb6584037e52b77d74aac6cc5eb +part_01137_of_02048.hdf5 93ddb2ef80d7d29665d3c05a0cfa799a +part_01138_of_02048.hdf5 b07d4e40fcb9154460034be7fd946e56 +part_01139_of_02048.hdf5 2c1db5d22f028729d81be33643c3bcc4 +part_01140_of_02048.hdf5 257f63f7d71422eb58fc1f513e271d16 +part_01141_of_02048.hdf5 e4afec7be02dbd06c5ec019f8165f158 +part_01142_of_02048.hdf5 4ce4ef4f30f889a4c2b966622f8b8b68 +part_01143_of_02048.hdf5 bad075e13f75cc409d8c1f80e1f88a90 +part_01144_of_02048.hdf5 e132ed386a0ed493ae3f877ef4a350e4 +part_01145_of_02048.hdf5 f8bbd125337c2aedac48779521a2e691 +part_01146_of_02048.hdf5 5e6fa1c38c753976d83cdf2cde8d63da +part_01147_of_02048.hdf5 f660ad03f42932f3ce2d71dd6f1e1f1b +part_01148_of_02048.hdf5 285feb36c31b24611e71151cde8b7f49 +part_01149_of_02048.hdf5 ee38536e6a0b6b71ea3f228f58e3e228 +part_01150_of_02048.hdf5 f4021c38fbc1f8feace2306f1ec7ebbd +part_01151_of_02048.hdf5 ce74da5e3a6880e772905c4160d522e4 +part_01152_of_02048.hdf5 4615c0c1c2c639b789901b577304fc2a +part_01153_of_02048.hdf5 54b3e7f7ee15952d84b2fcb21e1cb0ab +part_01154_of_02048.hdf5 7c62c22f3135f61a62972dad8cf9ba6e +part_01155_of_02048.hdf5 47948f16d8359f297c6434278bec96e9 +part_01156_of_02048.hdf5 db1b3c11b70efb2c9fda14185861e30c +part_01157_of_02048.hdf5 f9848157a4ed3d783112971bbda45014 +part_01158_of_02048.hdf5 51a5678e9f2adada08ffeb8096b43b05 +part_01159_of_02048.hdf5 a037196ee04a00ec563b5a6f69576918 +part_01160_of_02048.hdf5 b06ffe0556ce1fa892f161d453cfcde7 +part_01161_of_02048.hdf5 40ea1e439a5b8f9a30ab04c857a50ab6 +part_01162_of_02048.hdf5 100144ddc0cd9e2668b5544244020609 +part_01163_of_02048.hdf5 83c76787fcc399242c0c3afadce3cbb7 +part_01164_of_02048.hdf5 1f80662b61ccaa4cab8ea428b8de8ad7 +part_01165_of_02048.hdf5 21cca365441698fafde522db0e88f474 +part_01166_of_02048.hdf5 be3a85a18bbcb8c692e9c936d3fae151 +part_01167_of_02048.hdf5 12e052fbb240c78b1b36fcce30bba558 +part_01168_of_02048.hdf5 6a4b787feba85528eb039274e995e150 +part_01169_of_02048.hdf5 a766268f658227494bdffd884455b75d +part_01170_of_02048.hdf5 f816a002f6ce1245b0b83f90878198d3 +part_01171_of_02048.hdf5 3105ff386aa31b2ec3c54743da1795ce +part_01172_of_02048.hdf5 0c488420a62a48e79bd170f9dccdaa61 +part_01173_of_02048.hdf5 6e12f4397ab0a19ceb78302b24f49016 +part_01174_of_02048.hdf5 48a0ed9340afd9cc866405d47a51b9b5 +part_01175_of_02048.hdf5 a52d889d81aa3664456fe6ce85b09f82 +part_01176_of_02048.hdf5 feb3e8f3a23ba9cff1224c91974147c0 +part_01177_of_02048.hdf5 8a0abc748a08445807d48b9b96219126 +part_01178_of_02048.hdf5 073417c986c5cd4de66c754e4487820e +part_01179_of_02048.hdf5 48c0e496a6e3c694ac9b9abe4b21a1e8 +part_01180_of_02048.hdf5 766138ea57024561bc405577057f7870 +part_01181_of_02048.hdf5 671632541f12d86f5962cf9c1f4bc59d +part_01182_of_02048.hdf5 c1e8baf9beba1319d1c3b40c1fb4ab28 +part_01183_of_02048.hdf5 9229fcba08f29d253e64d6ac11a187f0 +part_01184_of_02048.hdf5 6cd3ebd7a55b54d97041c61514da740c +part_01185_of_02048.hdf5 a50cc119ab1be86f8f76d3a28c31cc94 +part_01186_of_02048.hdf5 a2c39aa7b96ea08a04eb65dcbeef97cc +part_01187_of_02048.hdf5 38033ceea48882a7c4646828724e7869 +part_01188_of_02048.hdf5 c8d50e243029d84354620fbfedc34c68 +part_01189_of_02048.hdf5 b800c49dc8984f54baa03e491a1b9b9b +part_01190_of_02048.hdf5 75dd85039b0f1af77baa6d8826244b76 +part_01191_of_02048.hdf5 1a478d383731f7f23a1fc441d67044e3 +part_01192_of_02048.hdf5 375274955d96784892302bfc5d63595b +part_01193_of_02048.hdf5 1b0ba2d11efe69f61d03777de2a893ec +part_01194_of_02048.hdf5 dc40cd760e4be6b36aefd38e63731dba +part_01195_of_02048.hdf5 951c9f9a47db0bdef20a951b919a1a59 +part_01196_of_02048.hdf5 32181e84eadeba3ba3f05122f6e1edb7 +part_01197_of_02048.hdf5 76e60b2c568f5a54430df3eee2fe86c8 +part_01198_of_02048.hdf5 424f8e0be37929ab2307fdff3b7701a5 +part_01199_of_02048.hdf5 e96f8a4eb7f9eb747b986c8c1326b688 +part_01200_of_02048.hdf5 46c5ec35809fd9433f987b29a5b128d4 +part_01201_of_02048.hdf5 9961d326715619d6fc6d0657af09076c +part_01202_of_02048.hdf5 dab5f328675e586959b43723d9798f9e +part_01203_of_02048.hdf5 eeb7e6f9d6753a6f6eb27efb2281e342 +part_01204_of_02048.hdf5 bfe9a3f166e5faa12aaee2c9b9306866 +part_01205_of_02048.hdf5 a579d123ad4eba9a3077d1e5c40a59a1 +part_01206_of_02048.hdf5 dd06c4571dfaa2bb69d37017b0ec6be5 +part_01207_of_02048.hdf5 6683e398b2f6314aede109753c252e72 +part_01208_of_02048.hdf5 83743c9ad94e185eeeed8b27b6e600a4 +part_01209_of_02048.hdf5 34ecc523bc238775d6cc7862766aded8 +part_01210_of_02048.hdf5 2ee095472b769ed1fbbe4b2c5e596c22 +part_01211_of_02048.hdf5 1ebfb46da8ada458c24637578afca65e +part_01212_of_02048.hdf5 9cf3941bd04cfc7ceab0ce1cbdbbc8df +part_01213_of_02048.hdf5 26a8022022107a499468cf9270846e27 +part_01214_of_02048.hdf5 027d4395d4ae8086e84501f7e3d9fcae +part_01215_of_02048.hdf5 2273d9c4d9cef8c7f024942bfb62a646 +part_01216_of_02048.hdf5 50d99a184e5f412f0242dc1066a18a5c +part_01217_of_02048.hdf5 d4cc54ac847e668c1e3c82a9b8681129 +part_01218_of_02048.hdf5 08b5d1846133d7974589f21e8b29f59c +part_01219_of_02048.hdf5 89d9679061df71b83f2f27f27fe2dce9 +part_01220_of_02048.hdf5 2677d4ad7ad3113cd632cab231106f14 +part_01221_of_02048.hdf5 f3736c8cfafdfebcee26b7bcc3400655 +part_01222_of_02048.hdf5 5941fc0d884fc669b5e0d72472ddf8ca +part_01223_of_02048.hdf5 4d1e200bdca802c5901fdb91410e6c00 +part_01224_of_02048.hdf5 0b2f84f7b743eb53bda7beecb1f5e262 +part_01225_of_02048.hdf5 8f9a43afae2246a30dcad6d011d192a4 +part_01226_of_02048.hdf5 e61389952ecac75f6c5e03cf4aeee5b9 +part_01227_of_02048.hdf5 7ade615414096876a9d34a5aaf7bdb8c +part_01228_of_02048.hdf5 7e2ec382541c94a6531dde1539e60629 +part_01229_of_02048.hdf5 97538b268d9c988607314e31c74b8105 +part_01230_of_02048.hdf5 aa8e099ba50641873a4ae53af9bd99a4 +part_01231_of_02048.hdf5 a7d16032614f724b996334fac62cd620 +part_01232_of_02048.hdf5 dd848e21025d63d00d8ccc8faf0fa585 +part_01233_of_02048.hdf5 c52d1e1724f67951fc88f90f0ac83cd8 +part_01234_of_02048.hdf5 9dd8f5a99f3059b51472e3196fb1a9ce +part_01235_of_02048.hdf5 26c86f41c120630b6a8af197a09ed7c9 +part_01236_of_02048.hdf5 2b1a554da2f48d137eec2f9d85842e97 +part_01237_of_02048.hdf5 d27b9a445b76ddc15aec1e1f13f4d75f +part_01238_of_02048.hdf5 643898daacc03f4e1024786c8b65f4d0 +part_01239_of_02048.hdf5 7784bbdf28d3d48505b939e176897367 +part_01240_of_02048.hdf5 da5b8354e2aa6ab3da1f50f5e64bdb3d +part_01241_of_02048.hdf5 36dd432640d6834f993264c1c95a8229 +part_01242_of_02048.hdf5 f689b7471d349d4f922a1849f1c57b81 +part_01243_of_02048.hdf5 8bca7e391fe5b93b3044fc7d1c30ceed +part_01244_of_02048.hdf5 3afaa5d6b83b2bd80a3dc37216eb4457 +part_01245_of_02048.hdf5 574c700f77a4dac713be6ac481a6af28 +part_01246_of_02048.hdf5 3c52baab58d13a0dfb22a247c0448b30 +part_01247_of_02048.hdf5 a8899f0e065d8e67ade803e57ccf49c5 +part_01248_of_02048.hdf5 56d85119f82967d93c5124b119a240f5 +part_01249_of_02048.hdf5 aa9d7eff72dacb1dfca2e776199d5fc5 +part_01250_of_02048.hdf5 9dddff5b9fca267f0931ea27cea48d32 +part_01251_of_02048.hdf5 e63908a10bd888d26163b4130295c638 +part_01252_of_02048.hdf5 103753e040dbc696609c60b91d21d223 +part_01253_of_02048.hdf5 8cb36a24486299e5541891041e787fa7 +part_01254_of_02048.hdf5 95d734e3089b39296ff0c4286dd3f950 +part_01255_of_02048.hdf5 d018b9b4ef8a475f9c4edff675df56da +part_01256_of_02048.hdf5 1a555bcdd7f0a862964ae375c1b22bf8 +part_01257_of_02048.hdf5 24a3cf9c33cc25bf3269e21b5658f24e +part_01258_of_02048.hdf5 45a472f598e43f6872dbbac6b71f0446 +part_01259_of_02048.hdf5 69da9df4669ef6b4a19b913cc9a31bd7 +part_01260_of_02048.hdf5 cc86b2f7e6481d11033c2a398e318a89 +part_01261_of_02048.hdf5 28b15f18b99ab747d26bacdde07d0da0 +part_01262_of_02048.hdf5 96728fb85639096228807125a5c40506 +part_01263_of_02048.hdf5 1ca48f2a890a194ffd19d4339f0e3be5 +part_01264_of_02048.hdf5 bf11dca036e9935c3fe75565bec41d80 +part_01265_of_02048.hdf5 f44b6dae01b661c5e1f46cbb76844d5f +part_01266_of_02048.hdf5 6f6cbc6e6bf242daa7051a9d1f8bd72a +part_01267_of_02048.hdf5 20c0f0f903508a03436b67e636f8b33a +part_01268_of_02048.hdf5 f632b2d20f8376fc7a3f5ced805bd2df +part_01269_of_02048.hdf5 5a1f80e304557974ba10661b956ae6e7 +part_01270_of_02048.hdf5 1191e7454dca9fd45d077ceb45e3ce9c +part_01271_of_02048.hdf5 66fb3535de1ac37a759fcaed2d3edda1 +part_01272_of_02048.hdf5 47812fce936cd14f7e3550e782032452 +part_01273_of_02048.hdf5 6a1aaed35c4f274b1baee03b0d82d207 +part_01274_of_02048.hdf5 26800924b6618807f7abe93619f7cc59 +part_01275_of_02048.hdf5 a7c39164099fbc2c469c5ca4f740c824 +part_01276_of_02048.hdf5 f237fdec353fb8886119071de2efbe91 +part_01277_of_02048.hdf5 89bf56e49012f9c4be319540ed29aa74 +part_01278_of_02048.hdf5 621d07eb28432bac0ccdc3fe7037b670 +part_01279_of_02048.hdf5 eb77e9b60d55c1ac72dfed8e600a3de5 +part_01280_of_02048.hdf5 23d7cfb28a49748ea7c5fab0675f7283 +part_01281_of_02048.hdf5 fb2ce058ad89dfbb47b9772d19892af6 +part_01282_of_02048.hdf5 9d12c1bbed81973e093654e378122a96 +part_01283_of_02048.hdf5 405a3bc1bdd8aea8700969cbc2e3a91e +part_01284_of_02048.hdf5 c807432cb2cb5e5a5e3a01d97bdf8713 +part_01285_of_02048.hdf5 ad0a87efa8559f989e6dddb0ccbdda26 +part_01286_of_02048.hdf5 df914cb6caa99d817624f07d0e5cf580 +part_01287_of_02048.hdf5 ba6d2a73f75b36429ed6f61e8993ed54 +part_01288_of_02048.hdf5 48271ca2dc156f3b8bdc91a9f0633c63 +part_01289_of_02048.hdf5 049ad730dd701351422def6da4e3bd88 +part_01290_of_02048.hdf5 9e3e5b282d68721d17f9f2aa0bdbfec5 +part_01291_of_02048.hdf5 fb027eb6c2c30c2b69f912ad81b51034 +part_01292_of_02048.hdf5 67e0be12da4c765f4a8f5669f4593b19 +part_01293_of_02048.hdf5 8443276b192dfb1fe8d95501e536d382 +part_01294_of_02048.hdf5 c89342feade773196847601ba402db02 +part_01295_of_02048.hdf5 afb2b592a34704f1253505bf1148d062 +part_01296_of_02048.hdf5 59615ef73e383a3bfc2701e1982bed7d +part_01297_of_02048.hdf5 bd7ed53af748752e1a38dfa0c647d5a5 +part_01298_of_02048.hdf5 5b70e0ec0f2c68bb2c25f9bca73d4ae2 +part_01299_of_02048.hdf5 e67172adfa177abbc292f9c9ac931c50 +part_01300_of_02048.hdf5 5b968e66ea91573ce822717f176206ab +part_01301_of_02048.hdf5 612c305d47b22e574995621e478f1d66 +part_01302_of_02048.hdf5 6377d2b4505208b3d55080af301770a9 +part_01303_of_02048.hdf5 c57643e25d46d906a7c3f99ad144d1bc +part_01304_of_02048.hdf5 b9db7575eff138f39cfbde05bf19a018 +part_01305_of_02048.hdf5 b87b09f14df1ca80f9975abd58bcd30b +part_01306_of_02048.hdf5 c09b60ff5a1c9305339b96af9a2e5bda +part_01307_of_02048.hdf5 1586026fdd4a0399c7fd67b152a5b11d +part_01308_of_02048.hdf5 9aeaec318f6bc71ad613152e5b693bfc +part_01309_of_02048.hdf5 21feb217502cd9937421cc857d502b0f +part_01310_of_02048.hdf5 cc3c30dc20790ba3923a992c6496977a +part_01311_of_02048.hdf5 2c288a1e9786ff949a19601f30ba3836 +part_01312_of_02048.hdf5 7afef7ffa0f4bf2e97444fe23becf457 +part_01313_of_02048.hdf5 803355b6e79fea94fdb5bad3929e43b6 +part_01314_of_02048.hdf5 0853977bfc9cc7e57ab2ab8cdd543bab +part_01315_of_02048.hdf5 b7522ce8a58e205b54ac0db002abb60c +part_01316_of_02048.hdf5 03368ce7fa79717a32c5cb875f69fb6b +part_01317_of_02048.hdf5 a7c1e1b53b7ac851386e83c7b8ad8a78 +part_01318_of_02048.hdf5 4882836c54f0d98fe8d1978b3908b5b0 +part_01319_of_02048.hdf5 26360d926765ebe3ffa9e4bdf6911b7e +part_01320_of_02048.hdf5 c4b771a008b8fa698cde754c94d7663c +part_01321_of_02048.hdf5 eb71fecd2596087f2bfdd0363cfee24f +part_01322_of_02048.hdf5 980f1c2af0ff85cb9d816cc0f6d7819f +part_01323_of_02048.hdf5 f03911986a0494634422af9e5b846e02 +part_01324_of_02048.hdf5 44fb704c585ca76602fcf5f71e575d6a +part_01325_of_02048.hdf5 bd5a358abe430ea37d9c9ba800307f25 +part_01326_of_02048.hdf5 cc69f851205b3760fbdf7724a5239488 +part_01327_of_02048.hdf5 1618733215fdd9922b52415a1a937469 +part_01328_of_02048.hdf5 9abfbfd4f3b2de2bf6c1dc54ad9fd625 +part_01329_of_02048.hdf5 53de752292b5aabb2cfce812f88ca11a +part_01330_of_02048.hdf5 892f4ad58f1684e20dae77b523a56ee4 +part_01331_of_02048.hdf5 94a2e23801dbd188f41ea6493796df1a +part_01332_of_02048.hdf5 2a4ef58e27e870650f04405793cf542c +part_01333_of_02048.hdf5 52ebf696901b1377273453ead9c38ad9 +part_01334_of_02048.hdf5 78f216b57cad3dcc7c653bb157df6b64 +part_01335_of_02048.hdf5 d6daae29be7b0d670694f1643845a316 +part_01336_of_02048.hdf5 9007c840b60e0f0d26a20184568ab3c1 +part_01337_of_02048.hdf5 7aa50cc234bc1bed3bc044d205484ca2 +part_01338_of_02048.hdf5 90544a22415b16ff276e06e8a5b4cc4e +part_01339_of_02048.hdf5 95a6ae14ea03943f8ea215d723149158 +part_01340_of_02048.hdf5 4e5a2e1190b649cc6d2a144006a29169 +part_01341_of_02048.hdf5 1cb635a3a54f7aca315c5fe54d1ff5a0 +part_01342_of_02048.hdf5 0984591bf6b474fa1b96f61625e4ea66 +part_01343_of_02048.hdf5 baeae783cc6748e273e2d8b2709938a1 +part_01344_of_02048.hdf5 9ae28ec85c85adc2b7a907f234d3e616 +part_01345_of_02048.hdf5 f249e8578dac1c1c3095f854cb8cb8d2 +part_01346_of_02048.hdf5 35b3402273dde425dc3cf447cacdb648 +part_01347_of_02048.hdf5 7271a7b0c9b52624439396e1f9466ec5 +part_01348_of_02048.hdf5 0bf36f8bc5566d16238d049a73d94d12 +part_01349_of_02048.hdf5 bd35a57dbdb3393dc14c9f1fa0a8b1e4 +part_01350_of_02048.hdf5 237466fb2a0974c3f7dff2ebf83d05fe +part_01351_of_02048.hdf5 4e5f8402bbffd8df2830ee60e6b4826e +part_01352_of_02048.hdf5 82f1d457b57de4cace571ef3b1a32077 +part_01353_of_02048.hdf5 37e719fc74696c274953fd05546bb92a +part_01354_of_02048.hdf5 846a07a09c62edf4883d11c5613f1900 +part_01355_of_02048.hdf5 06a63b7b89d7f0e230b03b93da033933 +part_01356_of_02048.hdf5 0369d875c795b88304d2e3805d266df8 +part_01357_of_02048.hdf5 7d9b157212383e1d47b5cdb3bb9870a7 +part_01358_of_02048.hdf5 7856d04702a94410e10080a9e26391f9 +part_01359_of_02048.hdf5 d1cb333acaa3b8dfb6bb997b7b028d2c +part_01360_of_02048.hdf5 ba699a3b6223a08d1785bddbd645a263 +part_01361_of_02048.hdf5 235c833c6309e882e8d59868b638d404 +part_01362_of_02048.hdf5 c5b4f912616fe3580ca576ceb1329bc8 +part_01363_of_02048.hdf5 f36d2694d210156444b1300b492cb4a0 +part_01364_of_02048.hdf5 8ffc08122dc47f188df87dfbce156606 +part_01365_of_02048.hdf5 992d1e5d3355c84031e3a0aedc04df27 +part_01366_of_02048.hdf5 a078c643a22c6622a77623d258ac1d54 +part_01367_of_02048.hdf5 19f2e3da76a1b100587c3e0d9293b980 +part_01368_of_02048.hdf5 29d6cbfe4af21360f2b9395c804ce79d +part_01369_of_02048.hdf5 75183122f98417a170b142815c144c8b +part_01370_of_02048.hdf5 8a60da88451917da6e83e8a5feecf006 +part_01371_of_02048.hdf5 3c056e3695334e9b555d8b71556c604f +part_01372_of_02048.hdf5 bbee3c3e15627fb75d69fe5921ab542d +part_01373_of_02048.hdf5 fd82b7d1b69a79577a6f6dfa392cf3cd +part_01374_of_02048.hdf5 48bbc78f12b12d46d45aaa1f14a8dc6a +part_01375_of_02048.hdf5 e17811f0d36aa999d74d42101c2f4ca6 +part_01376_of_02048.hdf5 307e9e0314d464b53fa5a747505e648f +part_01377_of_02048.hdf5 b8ea5d93cc67e2408d4e48bdf79c6b61 +part_01378_of_02048.hdf5 f25aefb0441e355b00e5685c640c8da6 +part_01379_of_02048.hdf5 d926cacd755b29ef4433ea0296677b6f +part_01380_of_02048.hdf5 fe7585fdfbb794d615ec99caa0ece54c +part_01381_of_02048.hdf5 0161f7e0c87131c03549db75a12641f2 +part_01382_of_02048.hdf5 8632cadc67b576ab3e5bc83c20099478 +part_01383_of_02048.hdf5 b60deaf0a8f8f1b2c14dddb3c0b44220 +part_01384_of_02048.hdf5 4ef862ff3bb5a8eba9fc541b90049f39 +part_01385_of_02048.hdf5 d522093158df2ac834666a5b7652931e +part_01386_of_02048.hdf5 8cd1e45ae8fddbe73a1f95da2c7147d6 +part_01387_of_02048.hdf5 cb69cce7c38562545f2421972830318f +part_01388_of_02048.hdf5 696ac1cf32c6722f25d46ceff05b4325 +part_01389_of_02048.hdf5 2bd30de9e1be67680587327ff24110bc +part_01390_of_02048.hdf5 d21ad2cf13bfa063471d0211933ce198 +part_01391_of_02048.hdf5 221ceab04b352978ad728218e86c51eb +part_01392_of_02048.hdf5 0adb55f24d24976b1abd48c3fb94e5d1 +part_01393_of_02048.hdf5 e32c890dd5c2aa3b892ca0217708f3f9 +part_01394_of_02048.hdf5 0495098fe4025d1fb037a49a93d49346 +part_01395_of_02048.hdf5 e0f5a4aa05b03b432417e913c75343fa +part_01396_of_02048.hdf5 81d412aaac1cdf7d88f89ca4848c521a +part_01397_of_02048.hdf5 9b2c646658c7388b426dea0bdc80c9fb +part_01398_of_02048.hdf5 417d1a40930857f30a430342b194e446 +part_01399_of_02048.hdf5 3219353623fb14d4d9ddb2dd53a6138a +part_01400_of_02048.hdf5 0716bf9267446570740333b81e2ec95b +part_01401_of_02048.hdf5 471585e5b1573e7d9de166a5627d0cbc +part_01402_of_02048.hdf5 afbb1f808cab59d59df7c6d0f22a0f45 +part_01403_of_02048.hdf5 c68bb386ebd4fb7a3a24d90797bde6c5 +part_01404_of_02048.hdf5 e68662a5151a238b6d0a93ae1f78d8d6 +part_01405_of_02048.hdf5 84da5f53f9a33b2f3099885f6f90457c +part_01406_of_02048.hdf5 8116249e1ae357d2a92588e1959a1da2 +part_01407_of_02048.hdf5 a57c9cdaf06b1ed54bf4ac1bd35bb13c +part_01408_of_02048.hdf5 2c024a8168be2dad01deb3f298d3a4b5 +part_01409_of_02048.hdf5 ccde73a1f1a8fc634a4b312c42146f2f +part_01410_of_02048.hdf5 8d5154cab5e4a4a61f74fdd7f9d9fdc0 +part_01411_of_02048.hdf5 48f8977f655404969e7317818285320d +part_01412_of_02048.hdf5 c95d1048343111a31997194e320c1adb +part_01413_of_02048.hdf5 454734e3ca19ec744ea9f796d6bf35ac +part_01414_of_02048.hdf5 0aae7ece14ff81be595ff75ecfcdafb4 +part_01415_of_02048.hdf5 efbfcaf976e4d71551e5289f762bb600 +part_01416_of_02048.hdf5 9f9c8044793b40fe1874ea9ef6784c52 +part_01417_of_02048.hdf5 ab1f35bd52ba5aba76b89922cc23a3a8 +part_01418_of_02048.hdf5 b64c985b44780ba36c1933f11abf039f +part_01419_of_02048.hdf5 6cf9cd746acc9c9ceeb8ef3b10dfc557 +part_01420_of_02048.hdf5 4be09cacd92562743c8841d05ad46af7 +part_01421_of_02048.hdf5 cb24d2c80033ce82ca2fe25646787d62 +part_01422_of_02048.hdf5 066819c714d5f71b47c3012ff15d6e3c +part_01423_of_02048.hdf5 cb2d4beeff28e483a7b9e94a7e1a1f0f +part_01424_of_02048.hdf5 47d4b83c69da1442017fde5da7ddbb0e +part_01425_of_02048.hdf5 fe827db52961c0c8ef9ee65fe5fcd090 +part_01426_of_02048.hdf5 828caeb40d76becff20199f7c16c0e12 +part_01427_of_02048.hdf5 183f1f75ecdfabb9db90fb5af1287067 +part_01428_of_02048.hdf5 de726593c810606038e8a764950f3621 +part_01429_of_02048.hdf5 5ff1ab82d1b695fdb2ed3faa15e01e84 +part_01430_of_02048.hdf5 12bde6cb376d1254bddc53096b4c94a4 +part_01431_of_02048.hdf5 3702f799ffd5fecbbf9a5e91c1565e74 +part_01432_of_02048.hdf5 b9dd0ac5a8aa7cf9e9a773eace5e5a9f +part_01433_of_02048.hdf5 9a1612fc3045f1ba1a96b53e4c7d3db5 +part_01434_of_02048.hdf5 4a8a5ccedad8fe03cb4057fc29ce0618 +part_01435_of_02048.hdf5 bc3e9e2f6e2b9fc2957b12e47c7b836f +part_01436_of_02048.hdf5 2e6dc72824eeea1755cac28ef6deb90f +part_01437_of_02048.hdf5 e25eb94feee39a3c679f74ba52034b4f +part_01438_of_02048.hdf5 05765de5af1a27d96bd3a7519042a649 +part_01439_of_02048.hdf5 0e8d7c827d26bb4785fe335c07c8384b +part_01440_of_02048.hdf5 2793f5a45139ff99fccb835b888a057d +part_01441_of_02048.hdf5 77342feae55379e366980ac505f624b3 +part_01442_of_02048.hdf5 a5cfdba01f21a2b34b62819fb070bdb2 +part_01443_of_02048.hdf5 3d91749e2f087e5fcecf72b923bc82cf +part_01444_of_02048.hdf5 a581662a0cd87c0976411d3bcacaa9c2 +part_01445_of_02048.hdf5 2ba21b2ad09fcedda120837137e230ce +part_01446_of_02048.hdf5 c53cc892436823be7976d26242e7425a +part_01447_of_02048.hdf5 564e08f6e41d50583de6cc1c9147a8c4 +part_01448_of_02048.hdf5 284e9a3a495fc1338fe572997202c74f +part_01449_of_02048.hdf5 7b2f313f47779109f137cedcf2bb6a6e +part_01450_of_02048.hdf5 f82bef0c32e6976db8d59bf714f6aaf0 +part_01451_of_02048.hdf5 dffc97d2451d90071330bcd953f3d3d1 +part_01452_of_02048.hdf5 047ec0afdbe85750175a32b4ad6965d1 +part_01453_of_02048.hdf5 6b2f9a931f51d77ad452f3e7d25090d6 +part_01454_of_02048.hdf5 531ff3b05b2b50ee9a4be344eac1b7b6 +part_01455_of_02048.hdf5 5dc586a1f1e4b9edac68871341d56911 +part_01456_of_02048.hdf5 c059afb15cc175138d7a8503050520a3 +part_01457_of_02048.hdf5 62eea019202b27b614f6fad436b5b840 +part_01458_of_02048.hdf5 715ac633cd4c6dd85cd9a1eb1a4a64b4 +part_01459_of_02048.hdf5 ae3fcaa4adbe71932b1ded9f36d4a351 +part_01460_of_02048.hdf5 243d7102fffbb1d3ff0966e6c754c340 +part_01461_of_02048.hdf5 1ce07c792f26bbda86d11b0e00071799 +part_01462_of_02048.hdf5 bc5909a34b5354ddc0bd8102e163c369 +part_01463_of_02048.hdf5 b0c3583ef57d0ec9c876c21f25fd09dc +part_01464_of_02048.hdf5 d3c8f1302319d502e8c6e52d88bb3cfe +part_01465_of_02048.hdf5 591ad09aa00ceaa1478d266043cb526c +part_01466_of_02048.hdf5 d166db9c4f1b82a022f65f5ab0712fc5 +part_01467_of_02048.hdf5 546ef2a3c5cd8e9c1c8f6ee222676691 +part_01468_of_02048.hdf5 8b2638288d87eea57586e726abf4a320 +part_01469_of_02048.hdf5 5338499fea901fed5c1005f774aa5b5b +part_01470_of_02048.hdf5 72c18e6c3d3bb34d710d5ca4c4f5562c +part_01471_of_02048.hdf5 179a4cf7a34205fdadb303359e038de2 +part_01472_of_02048.hdf5 cef2fbbe5b8a1344e4c36989a437175b +part_01473_of_02048.hdf5 c55356f14b7c65a2df193869748d61f7 +part_01474_of_02048.hdf5 fa18fadd52a8b5ed78acfe2bcfc130c9 +part_01475_of_02048.hdf5 434c9d51651ad8f2acae6c48c120921b +part_01476_of_02048.hdf5 ae6b4cc2d39dafc3f7b3dad42d20c700 +part_01477_of_02048.hdf5 46e138083734fe6da14a0fc0816e4836 +part_01478_of_02048.hdf5 44383ee726cfba0300c53f1cec1235c8 +part_01479_of_02048.hdf5 30bb347ffe0e50ca3c1a573655e7d5bf +part_01480_of_02048.hdf5 82d45171841128aa02def03a529c50b5 +part_01481_of_02048.hdf5 56bcc6f89141360b9ecd3e366fceffc7 +part_01482_of_02048.hdf5 ca1aa300444797276fa641d37029a6ed +part_01483_of_02048.hdf5 cf5e97ea6b5dbbd3932ae132aa2836f1 +part_01484_of_02048.hdf5 e4fcfb9c8921ba2a1096cc4591b94400 +part_01485_of_02048.hdf5 2523e9fba417211afde946bea99109a7 +part_01486_of_02048.hdf5 8eb71fd846962d25d701977acb603506 +part_01487_of_02048.hdf5 9a97da503fd7b115b0bed8db011ed88f +part_01488_of_02048.hdf5 6b5ff841c64beae796b2f82494264a41 +part_01489_of_02048.hdf5 70d56cef6673e41e0109cae2cf7a6e96 +part_01490_of_02048.hdf5 2d65618d0bd1d310a153eaeea8b937a4 +part_01491_of_02048.hdf5 ea0dbd6fb4d2dbb6dc683478cfa8bbf8 +part_01492_of_02048.hdf5 0b982f789fe2af48a18b7b6753f80775 +part_01493_of_02048.hdf5 39437c4ad90c3a028c0429eb9b65899e +part_01494_of_02048.hdf5 81cad023b65f5f8500bf49ed7f153a9a +part_01495_of_02048.hdf5 8b362987102129dd0edd9db01a856d10 +part_01496_of_02048.hdf5 b0c3d514ff77013d6ccb5d934fee1e7b +part_01497_of_02048.hdf5 b7b2a48c929ff8b5e2a227843cef2856 +part_01498_of_02048.hdf5 fb054ee92a996a57959f053529a567a1 +part_01499_of_02048.hdf5 dfbf136fb3d787b8a501f3163f4c1cc5 +part_01500_of_02048.hdf5 27fadfe35e49aa02b94758b9ab63e3f8 +part_01501_of_02048.hdf5 7cf985d817bf3bab3e316b396e0fb1d7 +part_01502_of_02048.hdf5 22aab2810bc157a8ed7ef2edc42336c5 +part_01503_of_02048.hdf5 0b3bf9269cd243291d5e3e5d3053714c +part_01504_of_02048.hdf5 68447d19b8e5c6804dd3e87cf570dec9 +part_01505_of_02048.hdf5 ff91a4f546e3c420785df8690bd2ae55 +part_01506_of_02048.hdf5 e72370c4ae564dd5278e8b6781605bf9 +part_01507_of_02048.hdf5 cdfb56a371af545676ddbcc89c90b371 +part_01508_of_02048.hdf5 d19641267050bf7de7cba5d7a83b2c46 +part_01509_of_02048.hdf5 720c8df2a92d0b42b000456484e3c92c +part_01510_of_02048.hdf5 9dd6c1eb8241c311a487b2a4b209fa4c +part_01511_of_02048.hdf5 0987d90585ede88ed7c4dbdbb6bbe19c +part_01512_of_02048.hdf5 ecae97fcad4575ed433ff8a1371dc2c2 +part_01513_of_02048.hdf5 7cf56cbc0c61af4b9511259a8ef30d0c +part_01514_of_02048.hdf5 bbce9cb109f8572086c8122655a731b1 +part_01515_of_02048.hdf5 67bfa6c9ff3fc687b63d4e39815809e0 +part_01516_of_02048.hdf5 afff8164ada1a81e932846f1610cb1b7 +part_01517_of_02048.hdf5 ec582f637f8de7b9b080ff5d3e970620 +part_01518_of_02048.hdf5 59272c86752e2f2c02234d4005cb96b3 +part_01519_of_02048.hdf5 77f33f380fee21051cf3953c957c29fd +part_01520_of_02048.hdf5 cf04fb39b877693d84afdf3f13f535a5 +part_01521_of_02048.hdf5 202ba0be38a8f3bc0811807cb39192e6 +part_01522_of_02048.hdf5 1ba5fb023a526cea7cfb3ed452a81022 +part_01523_of_02048.hdf5 27a70d331990ce0d5e8197645d42d9e3 +part_01524_of_02048.hdf5 db6d4a47a82abfd2bfb12c164ca251bd +part_01525_of_02048.hdf5 5bd8cd0b2f3278c75361b8ea931209c7 +part_01526_of_02048.hdf5 bebe0b342634b6409f47cb279e94d495 +part_01527_of_02048.hdf5 9ac291e99c30ffb3b18079c2a0db411b +part_01528_of_02048.hdf5 534d1018628dd7e230037e6dd5c5956d +part_01529_of_02048.hdf5 c48db89c4f010b67bc3072db7981f4bd +part_01530_of_02048.hdf5 652f360d4ddeb690fa77395913dcb610 +part_01531_of_02048.hdf5 9b1f2326d94ccee0b4278d4307761d45 +part_01532_of_02048.hdf5 4acd9e1ce7a64fd35b4f8fd78035d43c +part_01533_of_02048.hdf5 c4065853da4547aa5150617c93bbe0f6 +part_01534_of_02048.hdf5 f80486eb8ac962c954967d37d646b45d +part_01535_of_02048.hdf5 2de7bb62a81afd8dc020445a73cac4e8 +part_01536_of_02048.hdf5 3356c174771f0bd168680d71d1f15c44 +part_01537_of_02048.hdf5 ab367b9573d7039c5b6615037377a900 +part_01538_of_02048.hdf5 3f2a34a77719b251e4cce8f88ffcd469 +part_01539_of_02048.hdf5 939803c6e045599a6ca9a71ef2801299 +part_01540_of_02048.hdf5 b006d710f736123608b2cdb10d28465f +part_01541_of_02048.hdf5 40c84c5b0501c774c8d7ac5b5b7cb1e7 +part_01542_of_02048.hdf5 2b1778830499511f3566d2052ad69219 +part_01543_of_02048.hdf5 041cd840b361a4b0bc33e5c055c65f78 +part_01544_of_02048.hdf5 a5ba9a69285924abbce4deaad584b975 +part_01545_of_02048.hdf5 fda454daf01bab49e49e39811269d65a +part_01546_of_02048.hdf5 52e12633144c723d2cae35a749b50c2f +part_01547_of_02048.hdf5 85bd5d90b887337b114cbab10c29b69b +part_01548_of_02048.hdf5 21434d90e10d474c7f33fa7452cc9eeb +part_01549_of_02048.hdf5 308e31c5f678e740f14e6cffa6b4b489 +part_01550_of_02048.hdf5 54e3363a2d08b41add6b44f44476d6ee +part_01551_of_02048.hdf5 8c68c23f6b8c4ee9aa250298b8657639 +part_01552_of_02048.hdf5 3a11c4068b67a3ab56da20062307f564 +part_01553_of_02048.hdf5 43e27401bdd1a781f3e52a62ef1c10b1 +part_01554_of_02048.hdf5 0894f85362521d6ab43295417e73b18c +part_01555_of_02048.hdf5 97d9bb0a61478e169f5888ba9f8df466 +part_01556_of_02048.hdf5 194ce59c86102d7ebcd875dc5b9cebc2 +part_01557_of_02048.hdf5 420b34f3769fb247a8f0b5f5a3e8fe4e +part_01558_of_02048.hdf5 36a6620aade4a417a93f79c902edc61c +part_01559_of_02048.hdf5 1b63c085bdf5c9a15b5aab31aca5619f +part_01560_of_02048.hdf5 bc56cdeb140e2615800037253c8679a4 +part_01561_of_02048.hdf5 fd4449fd9fde4302f6b54e8b9a586be0 +part_01562_of_02048.hdf5 896222438e1068c94ad43a02d7bc9e5d +part_01563_of_02048.hdf5 28f31cf17b144a23098dac883a74a8c2 +part_01564_of_02048.hdf5 13efdd394c398e3946d25c20748264b2 +part_01565_of_02048.hdf5 d41471273f572c7d4da81a00626e3c2a +part_01566_of_02048.hdf5 9a4dd3b4afaecfa05da2222ace14f1df +part_01567_of_02048.hdf5 e6abc18f268e0678e0ef12eb30ace9c6 +part_01568_of_02048.hdf5 92691786a59195a91780e4187d9ea6f9 +part_01569_of_02048.hdf5 5307b284158a7ace58bb7963275ee12b +part_01570_of_02048.hdf5 7f3e42b1839244f52ee70570e6213721 +part_01571_of_02048.hdf5 b1969914860a82555d430b146fd863b0 +part_01572_of_02048.hdf5 a880736e68df94dcb9f0000f1140d57f +part_01573_of_02048.hdf5 755ed03676cc31f447c11cf3aabf5744 +part_01574_of_02048.hdf5 6761b92ca5d1aa3a008f95ef3f1f92c2 +part_01575_of_02048.hdf5 9adc3a35c1f68163c768df3d3205e4b1 +part_01576_of_02048.hdf5 b888c31889e696d246740f097bd113bf +part_01577_of_02048.hdf5 4fdd063d6ac7a2167ab1b738256901d9 +part_01578_of_02048.hdf5 5acaa98ecb44188c66f8d48622d9fba9 +part_01579_of_02048.hdf5 95802d0fbac6708a63da245f05e57035 +part_01580_of_02048.hdf5 fbfd99ba70a660a07feea18436173fbf +part_01581_of_02048.hdf5 bbec2f2948fbfc528b11465ab08c0947 +part_01582_of_02048.hdf5 2950faca288119bef8164cf220830b64 +part_01583_of_02048.hdf5 2133d932e616cae55833ced0f6eb5083 +part_01584_of_02048.hdf5 26e10367a6d405773499fb73359314eb +part_01585_of_02048.hdf5 182506596a039e6ceaade98c0e4fc5c6 +part_01586_of_02048.hdf5 c70ca9cbecb1e7037fa60546cf7fad12 +part_01587_of_02048.hdf5 8835960dee55ee55e6c347b6613174aa +part_01588_of_02048.hdf5 32764306648a33d71935b99563f136f8 +part_01589_of_02048.hdf5 e31adf86693c1a3c9e2546a572a8fe62 +part_01590_of_02048.hdf5 53a3a6acebebf2d62e848c3695cde159 +part_01591_of_02048.hdf5 8a5882d032ab91ce8e132670ff87e0ee +part_01592_of_02048.hdf5 3d64ba642c038f7ff0c155a3d0b553d6 +part_01593_of_02048.hdf5 3292448af3b85bd46efb864acee615f3 +part_01594_of_02048.hdf5 659c43bd6a825b23a5e6b5551187e396 +part_01595_of_02048.hdf5 30df24b985a9191002d21d1c7ae0c5d7 +part_01596_of_02048.hdf5 fb9bd1cda7efd2c3d6e504d75abe5711 +part_01597_of_02048.hdf5 c63aab62942d03c72ba48e1a6c558963 +part_01598_of_02048.hdf5 4f164e2404d594cba7efddba95b6b8c5 +part_01599_of_02048.hdf5 d8781f4ede7c290a9bb14b793ba81ab4 +part_01600_of_02048.hdf5 02a7407cd823de3099d1527cdea1674e +part_01601_of_02048.hdf5 7159b431198b07b119fe85bb3ff7f9a7 +part_01602_of_02048.hdf5 fdbd6a63410884b4cf20dfa78f5309e9 +part_01603_of_02048.hdf5 b7e2cdcbbaaa91cc4fc07534467973db +part_01604_of_02048.hdf5 2e184962ae5d95e8cacf8fb4a57d1104 +part_01605_of_02048.hdf5 8bf1ad88c12193f708b1312086819628 +part_01606_of_02048.hdf5 4a40da30bc811a989141a932fc2e00c9 +part_01607_of_02048.hdf5 55cd3031453d5f4f8aeec1d2bbd80a35 +part_01608_of_02048.hdf5 5a2c56fe7709414be8becb2160f9a90a +part_01609_of_02048.hdf5 86929a3c6ce61ef17d9a8318795c82f9 +part_01610_of_02048.hdf5 f7461c152939ab8add690ea679c5f338 +part_01611_of_02048.hdf5 35da52f1b035c593311542e04c3b5a3d +part_01612_of_02048.hdf5 e1305937fda5cd08b8b613c960082f65 +part_01613_of_02048.hdf5 078486eff9e4405e2a17a9a5eb95dcca +part_01614_of_02048.hdf5 d94341e111318b682254c4a71f7b864c +part_01615_of_02048.hdf5 ee89289f48919c36f97b91da5857141a +part_01616_of_02048.hdf5 15684af2368b73cf3b1cc11336b9244a +part_01617_of_02048.hdf5 b6e32b935d416610c9a822087c134496 +part_01618_of_02048.hdf5 ebcf1992ad00456cd522bfbfeb917be0 +part_01619_of_02048.hdf5 9402b94602168a6474f7eee9b95301fd +part_01620_of_02048.hdf5 7f0a98f968207161205df40128d5661f +part_01621_of_02048.hdf5 d33a0d24f333e7563d3df75ce529528a +part_01622_of_02048.hdf5 cf92b792c7eedc95b4c8e5a8ec61446d +part_01623_of_02048.hdf5 a727e4b2726b6eede5992feaa64b2090 +part_01624_of_02048.hdf5 bf73ee941ad85f67c924efb09c065be5 +part_01625_of_02048.hdf5 f5e3ba5d0478e0cb12b57e7eab933a5d +part_01626_of_02048.hdf5 3a5a7b671ab4d66e937d358dba91776b +part_01627_of_02048.hdf5 c5701163aa75be840db6b238693eaeb9 +part_01628_of_02048.hdf5 b2620e2821a993c05925c31200a3803e +part_01629_of_02048.hdf5 2c6196500d05b5d25e75d1a0c9166a3e +part_01630_of_02048.hdf5 ae2a5f829728c68cf5e446ba3b6e852a +part_01631_of_02048.hdf5 e1c12974aec5dbe80039396700b376c8 +part_01632_of_02048.hdf5 44f352fa829bd25d82cc1dcfa14cb7f6 +part_01633_of_02048.hdf5 2761ce8042e28ee5e33bdfbdc2a48156 +part_01634_of_02048.hdf5 e77c23191519446ff190b52db5f5a95e +part_01635_of_02048.hdf5 16958ec6c247047db49bcafeff36e643 +part_01636_of_02048.hdf5 37006aba43bc5e6e5f540259978c4875 +part_01637_of_02048.hdf5 b9ddd5a82011be709a8d67e45e3a0e4f +part_01638_of_02048.hdf5 87f811b2b7d60a2bbacc2a5ac800e095 +part_01639_of_02048.hdf5 8852c14c1528efadf189294b07c347ad +part_01640_of_02048.hdf5 092a7cca9c7ae12b0cd7fb17fad1c5ac +part_01641_of_02048.hdf5 994fff14c28ce1c0adcf377c0d64a40d +part_01642_of_02048.hdf5 603231620bf1e205bae3404ba51f0f09 +part_01643_of_02048.hdf5 34751f5d99d2d707e647be0b6253425e +part_01644_of_02048.hdf5 299540554b386e923b10d41164d7e315 +part_01645_of_02048.hdf5 7abeb93e28c42171f28e5c9767feba80 +part_01646_of_02048.hdf5 da5ec42db945886a463c142af167499d +part_01647_of_02048.hdf5 bf95e345e37ce2726fa0a24151a013c2 +part_01648_of_02048.hdf5 f6753c41c2c8237d134986d3b8dee9f8 +part_01649_of_02048.hdf5 4face88f6b70dcdcf15b1a00dc8b62dd +part_01650_of_02048.hdf5 baaa4fa27e45c56530a1af42da26f386 +part_01651_of_02048.hdf5 ec37b66ca34e76e1715c8c305ec6bf7a +part_01652_of_02048.hdf5 f169be00fca6847ae4cd37f100af2067 +part_01653_of_02048.hdf5 13ee42bdecd5bc500eb2a6e3704f0b65 +part_01654_of_02048.hdf5 cf431d39eaf45c4f9e622092fb68bb85 +part_01655_of_02048.hdf5 b61dc1da9cac07bf7959d9667987c3e3 +part_01656_of_02048.hdf5 67f7d5a45d83d0cf319aef19ffdb22c6 +part_01657_of_02048.hdf5 60ce884ecc61e443b7b2c183bae68674 +part_01658_of_02048.hdf5 bcf1dd49bbd556e7f8f3fd0ad6d0d5c4 +part_01659_of_02048.hdf5 1b6e68510d25c24c86dbdd594a54b661 +part_01660_of_02048.hdf5 e3d778042906e5e20a1d93fd79079ae5 +part_01661_of_02048.hdf5 805a26887c792c6f2ef5ea0c18058723 +part_01662_of_02048.hdf5 1ede28cb8cba255465507444f45d4ec0 +part_01663_of_02048.hdf5 8a7995d26ba9272b1d0c804c5bd3ab9b +part_01664_of_02048.hdf5 47db4e266271d46d6792987ab9c25184 +part_01665_of_02048.hdf5 193f90708063bf1d2175a5e80e973435 +part_01666_of_02048.hdf5 d68b7983652e4c02b54cae1bdb48bfec +part_01667_of_02048.hdf5 482ed1c0c828dd0b627e1f433fef5e94 +part_01668_of_02048.hdf5 2e73cc7f61d09d5540158a9339ad3604 +part_01669_of_02048.hdf5 2f9254729131b435de9b0c9b07abf2b5 +part_01670_of_02048.hdf5 9a72ba3b91474eac6d03bf6195925864 +part_01671_of_02048.hdf5 b1b15560715eb4998a747c5b57d6f8ed +part_01672_of_02048.hdf5 afadcc50f633f662e14aa1db3529e88d +part_01673_of_02048.hdf5 7b5c16a7781ccf9117d345b4dd4254c3 +part_01674_of_02048.hdf5 bf26349938a260d6a377cd5288d30355 +part_01675_of_02048.hdf5 6767536b95cf002bdc7a593dc081d542 +part_01676_of_02048.hdf5 6d6ad20a961568a28d656d968e9f5d06 +part_01677_of_02048.hdf5 7e873ddd2bf184c4849b69376818360c +part_01678_of_02048.hdf5 3b8c1e889e769cc8f30be303246eec7f +part_01679_of_02048.hdf5 181273ba6da9f994b10b819bc486edb8 +part_01680_of_02048.hdf5 33e45b440887a6ea0f023c1ac09e8e31 +part_01681_of_02048.hdf5 d9ce37a4f2bf228aa630e00418c2b635 +part_01682_of_02048.hdf5 1ad896f58c4f1e8e66d3dbbe3df01bc2 +part_01683_of_02048.hdf5 2fad3168787a3e663b3f38cc6df92a3d +part_01684_of_02048.hdf5 486285c6835285521393b1b1c284b555 +part_01685_of_02048.hdf5 596bd8caa719badef00b763a533ae4bb +part_01686_of_02048.hdf5 0ac8b3b6cac546c5356117f505828517 +part_01687_of_02048.hdf5 7af7112cb0008b66ceb3e998ce4baac0 +part_01688_of_02048.hdf5 24e45cbdf481b080c5477a3a3d0936e2 +part_01689_of_02048.hdf5 b98f70affa33d72af7426ba2c2d4386d +part_01690_of_02048.hdf5 ab4ee495cfdf94cdd59f3ea59122714b +part_01691_of_02048.hdf5 35354ab40ab17d2c3665caa395b55d5f +part_01692_of_02048.hdf5 027fe5ee51497feb7ba0bbe45944b70b +part_01693_of_02048.hdf5 698328a452d38d6e1822cefb350db505 +part_01694_of_02048.hdf5 882473e21e9a0a46b0aaecc85c8f8c2d +part_01695_of_02048.hdf5 9ce45ec4365dc13eca1a36b59f3978f5 +part_01696_of_02048.hdf5 9cfec80f6f8eeb641d94a432642927f9 +part_01697_of_02048.hdf5 315915f5933af0bf275643cb10381fc0 +part_01698_of_02048.hdf5 b65068f212f853a6fde829855908479e +part_01699_of_02048.hdf5 4bb925b371380f9997ab5b9244179309 +part_01700_of_02048.hdf5 7662b5c281dbf78562a3522b092b69c8 +part_01701_of_02048.hdf5 cc6093ecd78c3af14473354cd1918247 +part_01702_of_02048.hdf5 0c2cfb2fce54866687b5b23f2a58fe09 +part_01703_of_02048.hdf5 f115c367be8dfdebd635452c88e10572 +part_01704_of_02048.hdf5 1f05be8ede1dee9f11d6948e8a6f5a31 +part_01705_of_02048.hdf5 89543596dcc66bd6c798440a73a8fe7d +part_01706_of_02048.hdf5 61d10306c575793ec65c6a64283463a0 +part_01707_of_02048.hdf5 86a322be827cbb5603a96bf91f03fe9a +part_01708_of_02048.hdf5 39fff1d9c2b21cd5743525bd63aecfde +part_01709_of_02048.hdf5 3915ad99572319a75e34c2e2546dc315 +part_01710_of_02048.hdf5 86f8adce268db75c2fbfec2b8c259d3a +part_01711_of_02048.hdf5 f0023a86cea70c7e68dff1b91f23a4f9 +part_01712_of_02048.hdf5 0587d4091d30b138249a695f4e957b98 +part_01713_of_02048.hdf5 04d39661566ed7e0d066c6f32e5bd717 +part_01714_of_02048.hdf5 cce158e21e8f0def950dfd465f147dc9 +part_01715_of_02048.hdf5 bc9eb39ef4ac4802b4d49db9d6da35d8 +part_01716_of_02048.hdf5 aafb683e8b0c679857d8a67c0d2bcfd0 +part_01717_of_02048.hdf5 67cbae04baa9aa3243e195a109543c02 +part_01718_of_02048.hdf5 f7f5bac9b6911cded24ae0174a6b2756 +part_01719_of_02048.hdf5 3fb6e64b04c7a1eb94b898475e71e320 +part_01720_of_02048.hdf5 bfaf0232e28f73b32314787de0144b14 +part_01721_of_02048.hdf5 4215359a16760ba0501f03a4f9a2e1d2 +part_01722_of_02048.hdf5 0a7f88f815834a1b461db9e4f9b226b3 +part_01723_of_02048.hdf5 de31039a537a54e80b1926c6b6506029 +part_01724_of_02048.hdf5 3aafda8202435492a56c3707c95d7fb0 +part_01725_of_02048.hdf5 5d27ea506273a381b069a8f617066fb0 +part_01726_of_02048.hdf5 c30a9741b553c86f288ba63f068178a1 +part_01727_of_02048.hdf5 14c72e6b337b82ca8c50e4565b8662ee +part_01728_of_02048.hdf5 06b7064bf065217d859eeec696431c1d +part_01729_of_02048.hdf5 90ad7fe26fd54ee6df676948068b6335 +part_01730_of_02048.hdf5 0843f1f03cdb3bd7078f938f19c7453c +part_01731_of_02048.hdf5 22ff6390d56ea9da9373c59aa23dd0a3 +part_01732_of_02048.hdf5 9336d3a1606148f3905608b0da0c992a +part_01733_of_02048.hdf5 30a4fc0703bf41a42c411ffc590be626 +part_01734_of_02048.hdf5 103578b099994e91424d1d9b8c049c80 +part_01735_of_02048.hdf5 8945548037a9d727ec3bc1714d4b8feb +part_01736_of_02048.hdf5 07fce8adada57d80d692e901f9f39af4 +part_01737_of_02048.hdf5 ec646677bbc759732a80dc2cecbb03a3 +part_01738_of_02048.hdf5 ba9b6ffd7b00f51f80e1c909e3392fe1 +part_01739_of_02048.hdf5 6296832cecdbf5787c8c297327312d46 +part_01740_of_02048.hdf5 b24cf16eff14f6d54ecc5bc95a6c5e99 +part_01741_of_02048.hdf5 99b6297959408b8078551e2da18a8ffc +part_01742_of_02048.hdf5 0afb493cede5f0b2dde498a6f178e4f1 +part_01743_of_02048.hdf5 ff17ac1c0421e6366a91ab49e4e1a8a4 +part_01744_of_02048.hdf5 7347d2b5fb2dac48531af78e38de1601 +part_01745_of_02048.hdf5 10ad6fb125532290484b057a58e1da4a +part_01746_of_02048.hdf5 7e805affd2abcf3dd0ebec0b00ceabec +part_01747_of_02048.hdf5 11d95a7e53f311a40b18c1ac0e57060c +part_01748_of_02048.hdf5 1eae6ffa046c932cefd32bc46be02231 +part_01749_of_02048.hdf5 8b2bc9a71d1bcb851c0e13ed2cf1f4a7 +part_01750_of_02048.hdf5 63e984a47f1e3eb69c7ff88d3701ce78 +part_01751_of_02048.hdf5 40d8d685e78c00e790db3ce47dad1629 +part_01752_of_02048.hdf5 7b56f5e86d02c3b28949daeaaebfae10 +part_01753_of_02048.hdf5 e25bc7aa8991b7bb9a88b5665fbbc6eb +part_01754_of_02048.hdf5 f64c6ce43511dcd10c511793e64d8189 +part_01755_of_02048.hdf5 85524234c15023f1f5d901f976cadbd7 +part_01756_of_02048.hdf5 8cc5d245b239ab1eff41aa2db0afa2e9 +part_01757_of_02048.hdf5 25e9c061854e7be4ff434dc70dce686f +part_01758_of_02048.hdf5 0e252557ad4c9c935a7c920ba589889b +part_01759_of_02048.hdf5 97f171d966c46c54ea34ee538af517ee +part_01760_of_02048.hdf5 31ff3d744a33f41efab39d5795496fb3 +part_01761_of_02048.hdf5 7f54aa9cf7b40335eddaa9f76a858dea +part_01762_of_02048.hdf5 63bfd9fe38f1806c49f796a85974ecae +part_01763_of_02048.hdf5 e0ece0f6f8a363c0cf3596525e215c53 +part_01764_of_02048.hdf5 aa2ecb72b69c5f6b9cbf341412c4eea9 +part_01765_of_02048.hdf5 a19461ab82f0f61626785127de846f3c +part_01766_of_02048.hdf5 b864faf89ef1a51747f4956c30aa7fda +part_01767_of_02048.hdf5 d5c6af00d2b0c40b53851d014acb97cd +part_01768_of_02048.hdf5 a5bb2b573bd13d68c620c6d79ae02c5f +part_01769_of_02048.hdf5 bb016244800c4e5daf5894b40d5c67e2 +part_01770_of_02048.hdf5 616fe25536d6c3e46e757b2a75671729 +part_01771_of_02048.hdf5 ef19308b03df9b5cb8ac61f0cac03c32 +part_01772_of_02048.hdf5 fd7927b64d13ae46088be67fb74af300 +part_01773_of_02048.hdf5 bef8c9b9846f4006494e1b01ec0589a1 +part_01774_of_02048.hdf5 86e8eca9ee38ce70fd37a8d064061d76 +part_01775_of_02048.hdf5 181fe7f52e5261f891c7be86e54bae11 +part_01776_of_02048.hdf5 afb39ae73fdea6608f41193856058f7d +part_01777_of_02048.hdf5 d9fb6d48b9ba11b027b59741f6010f1d +part_01778_of_02048.hdf5 a51039407ad52bc8463d7870bda0b22c +part_01779_of_02048.hdf5 c53bb93287d9b849613f973caa518841 +part_01780_of_02048.hdf5 03751b66c30b0a86920f31b1b767999f +part_01781_of_02048.hdf5 bcaa303a867e5b4b012b989a1996ad21 +part_01782_of_02048.hdf5 98abb55af177cf2f5a259ab9c16c89d4 +part_01783_of_02048.hdf5 8d2faf63098fb4928989ddabca0ee624 +part_01784_of_02048.hdf5 e091fd13c6ba72c8abf2bb145916d030 +part_01785_of_02048.hdf5 2dc54cd0a07f329c29139d52c30e2ca6 +part_01786_of_02048.hdf5 a3372c4ecb17131089cf8dbff8b28207 +part_01787_of_02048.hdf5 874aef4107b9e5f2e2efb3faeecc7f8c +part_01788_of_02048.hdf5 1c9c2e8ec0fb7be12c9df332a4f62bca +part_01789_of_02048.hdf5 fc1df51ec576f6e798b78650f3c73a6b +part_01790_of_02048.hdf5 f8c29c2055b976e1fa9d5f95cec8de99 +part_01791_of_02048.hdf5 bffe62f912a696c716875d25ac13385f +part_01792_of_02048.hdf5 fe0765b1e92728bdb62d21a2436742b8 +part_01793_of_02048.hdf5 9fc6b806e14fbfae7dce9266830c00a4 +part_01794_of_02048.hdf5 984ddea719ae9ea77e2e8e2ae7aa3f52 +part_01795_of_02048.hdf5 235a6a6b76b83b3907380391c11324dd +part_01796_of_02048.hdf5 1c6b6c470eefaedec9ae54ddbdc92164 +part_01797_of_02048.hdf5 561e8b625f7f277a2863582179343721 +part_01798_of_02048.hdf5 d213165cf8f20edeb086debe4fee059f +part_01799_of_02048.hdf5 f1b850b39236f8318ddace446a1d9c56 +part_01800_of_02048.hdf5 f6d385350f66e013b5efdd64843bfdc2 +part_01801_of_02048.hdf5 2235dbb9f27e123aedb471ba50a5294d +part_01802_of_02048.hdf5 33b4c853e468431a22172d96b30f5336 +part_01803_of_02048.hdf5 20c9869c2178ea64d84082d0f62e9efb +part_01804_of_02048.hdf5 c9b1545b29b2f0966a349109b30c5606 +part_01805_of_02048.hdf5 627dd6f44d1e04ff77b0262c39d0d62d +part_01806_of_02048.hdf5 1bd3a016fbbb7e91078ce55400ce9bb9 +part_01807_of_02048.hdf5 a2d90a5da33649859f9cab08c76b5432 +part_01808_of_02048.hdf5 2cc7aaa8dfa4a43681520ff7b3a3edf4 +part_01809_of_02048.hdf5 1be842d8fe9ced0b2a60b8d47ef4e56a +part_01810_of_02048.hdf5 4bd64dcba90c79c3ef8a1a09d3cb0ebf +part_01811_of_02048.hdf5 b0f5015e0192ada1055f9365601b7cc1 +part_01812_of_02048.hdf5 1de07e82201d0ffcf10d2705d3fde97a +part_01813_of_02048.hdf5 87356d178575c588ccc611fc0619d0fd +part_01814_of_02048.hdf5 496c7108c6f59fa276e8b7fc8d059b4c +part_01815_of_02048.hdf5 588d9b3928c69165fcc7ec4cb903cf88 +part_01816_of_02048.hdf5 0eefb473446db7650ba37e8f115a8956 +part_01817_of_02048.hdf5 f180314df1978e8f860118c0c03a6f87 +part_01818_of_02048.hdf5 65424ad4c7efe62e2e69a8a8e95abb2b +part_01819_of_02048.hdf5 dca51a2fba8972f7e0431a3eb6037b21 +part_01820_of_02048.hdf5 e2735da1cd7596fcee50478494fca3d9 +part_01821_of_02048.hdf5 4e7f71385ecf26f66bff28df19c24a46 +part_01822_of_02048.hdf5 dee499406975b283ce39bc472c6d3a58 +part_01823_of_02048.hdf5 ed6ed68a366f0336af6c5e9e80bf83dc +part_01824_of_02048.hdf5 41bf1e22cc0bd71c8aae809bd63f6391 +part_01825_of_02048.hdf5 891b9a1cc6760a90c4ddfbcbb4a764ba +part_01826_of_02048.hdf5 1f2b2042dbaa3cc5fe7d55d8950ea8a7 +part_01827_of_02048.hdf5 7bdc6a59c5e84f31941241114b7e7fd2 +part_01828_of_02048.hdf5 ca24e1db14969a81513357b56aa838b9 +part_01829_of_02048.hdf5 470f38750551da20472465a834428b07 +part_01830_of_02048.hdf5 0dae98d02adbd6e8d4489a58213903df +part_01831_of_02048.hdf5 827e13795d01043b559f584f3d6c0da2 +part_01832_of_02048.hdf5 7de926bd9aab525922f1c576aa52b1f4 +part_01833_of_02048.hdf5 c7fae5ba17507087ec7ac1969bb67e8a +part_01834_of_02048.hdf5 453a5081c0c61fabb03b5ed585652fed +part_01835_of_02048.hdf5 2eade111a358eb8baa547117497ee8a8 +part_01836_of_02048.hdf5 9a3b45f45d923e6f7d03398c89a7e306 +part_01837_of_02048.hdf5 cc58d9f612852dfb202c92dbddc26182 +part_01838_of_02048.hdf5 6fea7302c15391ce122dbe646ba989cb +part_01839_of_02048.hdf5 dd8d66d0479075dbf596b8f34fc31f3a +part_01840_of_02048.hdf5 d717b6a1c60d4d1b74772b8736ea5381 +part_01841_of_02048.hdf5 3d3b3c9271c398adf7fa877cf831319e +part_01842_of_02048.hdf5 69563f3b169a2798b8c07f10362f8acb +part_01843_of_02048.hdf5 2d2d131422b3eb6f0891e056a8d0dbd6 +part_01844_of_02048.hdf5 813d408079617ccae9473807b4c0200f +part_01845_of_02048.hdf5 2cf993d271a2f6edefea275d17658535 +part_01846_of_02048.hdf5 900be99c68039e7ff37157e8efaf8e17 +part_01847_of_02048.hdf5 c6d0ff46d847e7e5becd550d420a5c81 +part_01848_of_02048.hdf5 edeec89079c47bbab5f936e3237c66fd +part_01849_of_02048.hdf5 014ead6c2cc6b399b99856876b4ba32c +part_01850_of_02048.hdf5 12c294293e46b83871c5e3e85bece25e +part_01851_of_02048.hdf5 9949ed925b3b76a979be69a1f84d9a99 +part_01852_of_02048.hdf5 aeb8d5b89b3676940a64c938e37455da +part_01853_of_02048.hdf5 f3cf1b3ccf849e5fbacf4d0a72f9e68c +part_01854_of_02048.hdf5 2e9333fe5c33050c6fc04882398b8921 +part_01855_of_02048.hdf5 d880bfc57a86be17596544d4032c3ab1 +part_01856_of_02048.hdf5 656e168c626d4548b8033e4b3cb47add +part_01857_of_02048.hdf5 fa72b103a7672f7f9193a476b94f4d6d +part_01858_of_02048.hdf5 cd09705657e5e98c7ec6d823d68175ef +part_01859_of_02048.hdf5 19d30aa11bf86245cdb89260ce89b920 +part_01860_of_02048.hdf5 8a1c2609b3949563fc4603e5c4e6b218 +part_01861_of_02048.hdf5 bc0795d6a7353569a2c620bbe6d11e77 +part_01862_of_02048.hdf5 cc6b3467293761ca3a1f95adbcc8eb3a +part_01863_of_02048.hdf5 07ba8b7808b6a4f4fe466dd85ed00a0f +part_01864_of_02048.hdf5 306700b2a8f68c1170f09a561baa587c +part_01865_of_02048.hdf5 4946ff017c5de55c8befbdd823595314 +part_01866_of_02048.hdf5 f0e991820497d125eae56fb9546279d1 +part_01867_of_02048.hdf5 7945a064dc8f62df5b8d1ce8bb17dbc6 +part_01868_of_02048.hdf5 59d7f22f729e262ea4862624e7fcbaed +part_01869_of_02048.hdf5 51e6de124077707bb7621a77ce49aa9d +part_01870_of_02048.hdf5 bbea43427a89872663eb805e38b793a1 +part_01871_of_02048.hdf5 e0567bd6ba1890b0f3859fa76f67155f +part_01872_of_02048.hdf5 616d44662e4c2fdd19007ccfa8e28ff4 +part_01873_of_02048.hdf5 f0cc8c0d425d22bdb9db3b114f9728b0 +part_01874_of_02048.hdf5 8e1b2d947b4e79c99f704d6a3c78c675 +part_01875_of_02048.hdf5 080e84ceea86a0e307cb2a54f44edd5a +part_01876_of_02048.hdf5 a31a74a22ca68742b2bf0844541237ff +part_01877_of_02048.hdf5 fd0dcafad62bf35b43a0bac63b3be667 +part_01878_of_02048.hdf5 daa543d2f1ff274add19aeb9e08ac7ab +part_01879_of_02048.hdf5 b14bc72ceec636145580a99b01e6ae74 +part_01880_of_02048.hdf5 8a8255ddf0dff78bed67ba5204ad4ce2 +part_01881_of_02048.hdf5 aa5c674a0567680322e38d2a76b95190 +part_01882_of_02048.hdf5 6b69751ec34144db6e301fc1b08f202a +part_01883_of_02048.hdf5 49d64db91d16cf9224c6ed7a2fbc1a76 +part_01884_of_02048.hdf5 a5bc69d234b621fca01154a5ccaaccb4 +part_01885_of_02048.hdf5 9b207f25a243fa607845e6b45ab4db6e +part_01886_of_02048.hdf5 ad41735d50db1a6c164eaa76c7428447 +part_01887_of_02048.hdf5 1cc7c4f1e01cd5399db43c0494d29369 +part_01888_of_02048.hdf5 0a421220a5de79ad912a8fcd1a8b7acb +part_01889_of_02048.hdf5 bf006bb00a726cea01bd7b6ec0679076 +part_01890_of_02048.hdf5 0dccdce0bdfe14ead0ba1e6e45f875bb +part_01891_of_02048.hdf5 18b23e85d4452c218dbcae5499d4356f +part_01892_of_02048.hdf5 ccfba42fc56255afcbd6e597247d93f1 +part_01893_of_02048.hdf5 8d10f35fe173873fea8412df22b64d73 +part_01894_of_02048.hdf5 88c0e838ff4e9d284a9b2be6f848c98b +part_01895_of_02048.hdf5 bec64959a193c9e47afa1bc9931bb7b5 +part_01896_of_02048.hdf5 e07bdd276aeedf39b233279da2e20b7f +part_01897_of_02048.hdf5 cd86b180bfb0dcaac6345546a9afc1f6 +part_01898_of_02048.hdf5 49173746dd2ad982a6652574556f23c0 +part_01899_of_02048.hdf5 1590a2b8ffb1afcdf3bd5f47148325b7 +part_01900_of_02048.hdf5 9657f3666487d904c5006541eb279654 +part_01901_of_02048.hdf5 a1197be049efc774b02bcd3d899d684a +part_01902_of_02048.hdf5 8283643244d76f491bf72bdc682ce9e1 +part_01903_of_02048.hdf5 80bb60fa154f0b9d1b1c58938721803f +part_01904_of_02048.hdf5 38b6da267f066eba1dae774acb386e57 +part_01905_of_02048.hdf5 4ca868ec8edd8985aec03b0c88043043 +part_01906_of_02048.hdf5 898701acc4637c421cd7a3029c6a9c9d +part_01907_of_02048.hdf5 2d976333b412d84e99c73231a4753092 +part_01908_of_02048.hdf5 25f7d594118f17458c4b1ef9f5d0276b +part_01909_of_02048.hdf5 6239690a7d97a390d2aeb9570938752d +part_01910_of_02048.hdf5 e206816141941c8980dc0523d43f842a +part_01911_of_02048.hdf5 86a9345c40d783c9a62848774fb99a26 +part_01912_of_02048.hdf5 92b12512e6d161b8670b21617debf1a2 +part_01913_of_02048.hdf5 9407c68e489b7b8c8c65803932a0c1af +part_01914_of_02048.hdf5 ca6824f533d7664cb8919179b22be46e +part_01915_of_02048.hdf5 a1b5cd0dc1282b66539b5bbeff6c9bfc +part_01916_of_02048.hdf5 45b8a0e199ed16fd883d37cc685745fe +part_01917_of_02048.hdf5 09ce3226804bef98e6f09f632968beaa +part_01918_of_02048.hdf5 77262b84c87576c41cae8994f79a9894 +part_01919_of_02048.hdf5 2fb10cf0fd2482af6129d33c8d6aef65 +part_01920_of_02048.hdf5 f89a9277a757ea78634c8caaa54e47fc +part_01921_of_02048.hdf5 842861017dd84569c2ca41d388dba8a8 +part_01922_of_02048.hdf5 7ceff2d4006bca27eaf36aaaf4fc5edc +part_01923_of_02048.hdf5 412f9d1f47e759f9f7ac6f059a254dad +part_01924_of_02048.hdf5 d6d5601c7ac43bf6c3c9b67fdc680699 +part_01925_of_02048.hdf5 0f8bfa2b5fb2e2a61341cc9d43177740 +part_01926_of_02048.hdf5 4bf2103164cbe8ba6563fec99643ebf8 +part_01927_of_02048.hdf5 22896e9c90c8873667cb9cfbc96d3548 +part_01928_of_02048.hdf5 3d10b164daa30c2c0513011d43971cc2 +part_01929_of_02048.hdf5 4b8cc61dc4d34e69ab44a05036e5f13d +part_01930_of_02048.hdf5 c642593eebcb0eae94729d30e10d0dd8 +part_01931_of_02048.hdf5 4b6b2cbc7f64178da52a51b10e64977a +part_01932_of_02048.hdf5 bda7e9b39a1a87c2dc4d01baf859bdbb +part_01933_of_02048.hdf5 bc0de7753f0b9d177d9c1db4ceaa5bb4 +part_01934_of_02048.hdf5 4e889b3459498dc46aeb1b30e5967434 +part_01935_of_02048.hdf5 37f48b973d17c9af15c8d604da9892ae +part_01936_of_02048.hdf5 a3c43bb62b00059213967737100e0161 +part_01937_of_02048.hdf5 12217812e0346db94a3c3c5044bd0865 +part_01938_of_02048.hdf5 cf4ed02058fd098074eda423b9cbbb53 +part_01939_of_02048.hdf5 25848d2fdef82408de246d01f0db38ab +part_01940_of_02048.hdf5 f71be4ade49b2deeed04f208265c0576 +part_01941_of_02048.hdf5 2d07de30f7f16574a5b9480a1b8b6aa6 +part_01942_of_02048.hdf5 5583c0c4a67484553b35fb42af5ef66c +part_01943_of_02048.hdf5 c5f34a3625c3a40a0a0a64b9bd8332c8 +part_01944_of_02048.hdf5 798f822d17d46b88e32a815e0ee78f97 +part_01945_of_02048.hdf5 30ee498c4955562c30fed8d39e6a9741 +part_01946_of_02048.hdf5 2a67f2df07d9dfb6566cc23fe23b721f +part_01947_of_02048.hdf5 48b3a307b81f16f582a76444219bfdd6 +part_01948_of_02048.hdf5 37255e9467bd5b14824a3f0bb51d312f +part_01949_of_02048.hdf5 e4f890437b18bd9023c74dd31b12e64e +part_01950_of_02048.hdf5 cf3fac2710bd4557a51c9a508751ca33 +part_01951_of_02048.hdf5 639019c0d695a18f5acff0f5268a12d6 +part_01952_of_02048.hdf5 b17f234c9f3b704020d22de803bf0447 +part_01953_of_02048.hdf5 b40252cdce57be628ffac3b12e6f9efc +part_01954_of_02048.hdf5 3ddabeaf8385608c0ff13dd5ec629c57 +part_01955_of_02048.hdf5 1941f1f756e909322e084836ad635935 +part_01956_of_02048.hdf5 190503679dd60665e35288ef550792ac +part_01957_of_02048.hdf5 70b48e27d21aca71ec67deb8895e7740 +part_01958_of_02048.hdf5 d11a3976f3b670214e7935f5a8f761b3 +part_01959_of_02048.hdf5 f2eff575e0e9ab0ee03fe66c277679c7 +part_01960_of_02048.hdf5 a3375c5916cf42e4d70154e837f13e0b +part_01961_of_02048.hdf5 5b9bb0a78facd9e71d45f2b99e62a4e7 +part_01962_of_02048.hdf5 ffe4bad33ad71aec3a18689b8550a136 +part_01963_of_02048.hdf5 253cecee537f081c1aee74d6473273f5 +part_01964_of_02048.hdf5 019f329114f8e1e8f5f88e933155322d +part_01965_of_02048.hdf5 c053e906999ec660ef10dcc64686cce2 +part_01966_of_02048.hdf5 f9285db0aafde3fb2e362b4fd83ea0fc +part_01967_of_02048.hdf5 d2ce5c5185e65da6b543c5c019dd754b +part_01968_of_02048.hdf5 737c320b5c89d0e7317057bda6d1a753 +part_01969_of_02048.hdf5 5e1480e58b6601ab6ccbb8403ee2801c +part_01970_of_02048.hdf5 9ca8725fa7d3ab383c830e80bcc29128 +part_01971_of_02048.hdf5 f38f96fd2c9ad4d6455dbe63de3198ee +part_01972_of_02048.hdf5 97b53bb6c1a5bdfcd90755ae21621c5c +part_01973_of_02048.hdf5 1ff5b1fa69cb2577a4e4c6c4a64b8872 +part_01974_of_02048.hdf5 1f6703b3efffcc7fb8e236d2f453cf15 +part_01975_of_02048.hdf5 b668ffb96ccf2ae51d7f47402751a4ca +part_01976_of_02048.hdf5 2ba4ab80b4405b589c884e38ac7986d8 +part_01977_of_02048.hdf5 029cb44af593f59fb941fccfca1c5959 +part_01978_of_02048.hdf5 65e994b4a89852775a9e3a58b7b1b379 +part_01979_of_02048.hdf5 c8033283c392307990eb39394e8fde03 +part_01980_of_02048.hdf5 003fd3daf8262d524fae9e2d6f6a2811 +part_01981_of_02048.hdf5 a16488d1a6e3ae9b2e5966791f552762 +part_01982_of_02048.hdf5 7b0a9b876ac15b80414503fae8a13f0d +part_01983_of_02048.hdf5 9464f735b05a692b81d9aef518e16794 +part_01984_of_02048.hdf5 a16a01f72120346383ac993ba0ffa102 +part_01985_of_02048.hdf5 ac86b435f8424e7e5ab05a470481d316 +part_01986_of_02048.hdf5 b49dbdaa73e7d7be41525efa49658193 +part_01987_of_02048.hdf5 13f47dce7002a2b523a41a2191fcfb89 +part_01988_of_02048.hdf5 af8af8739b2b0345a132771bba78951d +part_01989_of_02048.hdf5 34d90359c9e4b5f990efff65cd52cb87 +part_01990_of_02048.hdf5 717194b1dcdb68e24173eba23b529f55 +part_01991_of_02048.hdf5 9f33c1bc074a7aafa25839f62ec3484b +part_01992_of_02048.hdf5 2f0795f20eca1519572af6a1a34ada89 +part_01993_of_02048.hdf5 10a0e2314b80d0801a1e23d68250f579 +part_01994_of_02048.hdf5 ba3b7ea24ac4833830e03fda10b80b44 +part_01995_of_02048.hdf5 ea1a7abb04f05691422845fcc02ad2f1 +part_01996_of_02048.hdf5 14f74ecb91b06cda2ab30fa907121c6f +part_01997_of_02048.hdf5 354893f700873c242ae6b20776f21b48 +part_01998_of_02048.hdf5 34007bf3c46d61e6d9f2683e4135227f +part_01999_of_02048.hdf5 008ac60cdb8a0f9818d44889a305de5b +part_02000_of_02048.hdf5 32d14831a20b37dd1948c62975c63e4d +part_02001_of_02048.hdf5 9f049a4f35543d44559ef3c3e525c5aa +part_02002_of_02048.hdf5 c0d9324bb67d6fb2fd22661d0dcb979c +part_02003_of_02048.hdf5 03faf5e57a148ff93a6f23c9bb1d6ed6 +part_02004_of_02048.hdf5 3a73a1a8b50928921fc369bdcc31d6ae +part_02005_of_02048.hdf5 468b1ef7f82a3cc5958d65f1c9b6b12b +part_02006_of_02048.hdf5 3e410e4d5dc33cf751206459184a109a +part_02007_of_02048.hdf5 3e1351399b96e27bae6d4c80668aa824 +part_02008_of_02048.hdf5 910e9185d02c87cf46016d6b33ed97fa +part_02009_of_02048.hdf5 e9c77a3de25484621b86937ce9d6d8ae +part_02010_of_02048.hdf5 a3858d63e008b22c1ae358c29ecbac53 +part_02011_of_02048.hdf5 55638099a566f58adf70a8c0f74052f6 +part_02012_of_02048.hdf5 d088e7abf0e7514df21bea02f76f0052 +part_02013_of_02048.hdf5 63418092f9dc3ceeb42d13ec68b3928f +part_02014_of_02048.hdf5 b91bd9ca0c0dc0d12b7bf8c520d3c007 +part_02015_of_02048.hdf5 8acb790295e7a86ba5093b1730f50d36 +part_02016_of_02048.hdf5 422b73bd62b92aad5980b312dba0ef96 +part_02017_of_02048.hdf5 07e851c31ca5360a2e6c18dd364d2084 +part_02018_of_02048.hdf5 a3ed98a0e5aaf297d008a6c96b656e99 +part_02019_of_02048.hdf5 043f581ebc10b36a8f8c37371ed68be4 +part_02020_of_02048.hdf5 ac6d4adf47ae457777c8e89edc6fee28 +part_02021_of_02048.hdf5 90f87b20c80d10f04f65e0b34dd1a3e5 +part_02022_of_02048.hdf5 6f877a0899e56c195da45904b3e3095d +part_02023_of_02048.hdf5 91d1bec3965f43827321f6578bd8eb30 +part_02024_of_02048.hdf5 072ba9ace12660ce3cd4a8c7ba828d35 +part_02025_of_02048.hdf5 0a1aa1d6350b57a120086a48a65f419e +part_02026_of_02048.hdf5 6ac5e2eededca1b4540352004a99f0b8 +part_02027_of_02048.hdf5 5c1d3e9fd478febb5303e93f98ba1322 +part_02028_of_02048.hdf5 ee81ad2f6e9e53f275718776846c1b63 +part_02029_of_02048.hdf5 fc58f42e556366a79d659bb8ff4cec16 +part_02030_of_02048.hdf5 4de7736ade66ed0960f05b95718d0714 +part_02031_of_02048.hdf5 2f5f9da3402e73a380c915cf9c5ae520 +part_02032_of_02048.hdf5 4776c75fb148c33ea27d69c8d98b143e +part_02033_of_02048.hdf5 e1d5050a46f118b1c3e2d17d3bf93dc3 +part_02034_of_02048.hdf5 04da804973b0ce05765f225ac7ad2647 +part_02035_of_02048.hdf5 7df04d61ba0a7827560ccc3ea7dd4b83 +part_02036_of_02048.hdf5 b113b455a554b19b4db9e20d9b9175c2 +part_02037_of_02048.hdf5 1688eae9276722f68dcf61ec5fc7bf74 +part_02038_of_02048.hdf5 8f067ab77026682b3182d5132a11f4ef +part_02039_of_02048.hdf5 9520103dd92a4289b64c05b1664e059f +part_02040_of_02048.hdf5 cfd6ec8684e6b9ebfa851c052ed40e60 +part_02041_of_02048.hdf5 fd71332d50d8a210244e1d14ede8982b +part_02042_of_02048.hdf5 db45c3e4d0ff1a51e7ab7d2e0575f24d +part_02043_of_02048.hdf5 18c76d4ef7aa25917ff660f6796caac1 +part_02044_of_02048.hdf5 86394565e33b034101a6d7db3391459b +part_02045_of_02048.hdf5 0c49d26c3b04a3d5f90206d07eb79681 +part_02046_of_02048.hdf5 7e02b581f991cbf9404db65839b86b9f +part_02047_of_02048.hdf5 7460162aaa6952ee5192e6fb3c5d5805 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/4320_shards_varlength.chk b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/4320_shards_varlength.chk new file mode 100644 index 000000000..b259aeeaa --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/4320_shards_varlength.chk @@ -0,0 +1,4320 @@ +part_00000_of_04320.hdf5 e25b1a34b8d14179b2774d1dbf3b7561 +part_00001_of_04320.hdf5 f9455f720575b25ec923a71caa657f84 +part_00002_of_04320.hdf5 f1869d6460efcfa5eaee73a08fde097c +part_00003_of_04320.hdf5 f1d7681e60083964b161303e0a256dd5 +part_00004_of_04320.hdf5 75aac8516a7a829cbaed67bad36cbc28 +part_00005_of_04320.hdf5 b2bd9b31850f99e64e6680cc8932148c +part_00006_of_04320.hdf5 fc58f25b2adf87536b1a08714c71f754 +part_00007_of_04320.hdf5 c9282fd31d177aa73d0f0a32180d6369 +part_00008_of_04320.hdf5 53bd94f5447b257d4922a9c11f5bd5c6 +part_00009_of_04320.hdf5 33b2242fc9480ae9cf249cf1db1c3867 +part_00010_of_04320.hdf5 3b9605ac0fc4a1a04bab12435277d560 +part_00011_of_04320.hdf5 1cd23247637a2bcd2063ec700a7f1356 +part_00012_of_04320.hdf5 06de420a3134e36395080fe46440b158 +part_00013_of_04320.hdf5 989e82ab2250e6d29dcac5b4f46fca87 +part_00014_of_04320.hdf5 48ed1af82b21b3b5082ff16b902fd6b3 +part_00015_of_04320.hdf5 92174da07d07ab2a2123ed73ff37f157 +part_00016_of_04320.hdf5 b0d8a4f8d93ff5f272fb99a6a2ebf8f0 +part_00017_of_04320.hdf5 2d3e8cd590abbd29aac4b3ff9059f933 +part_00018_of_04320.hdf5 97232601baf199d083e4e4f7a4889e65 +part_00019_of_04320.hdf5 6add965d541a2983afc497ede51907b0 +part_00020_of_04320.hdf5 3f24191bcbec6f3911698f30f75a71fb +part_00021_of_04320.hdf5 0c447ce3615f12528186f8e27aa930df +part_00022_of_04320.hdf5 afdea4b784d11101b3356f06abd45b57 +part_00023_of_04320.hdf5 4b1450b2f51c6e8f0053afb91852fce2 +part_00024_of_04320.hdf5 08acee7e13a65180acdb6867e6b5c886 +part_00025_of_04320.hdf5 fbf67820211c18a780f2f788773d617e +part_00026_of_04320.hdf5 1132dba5d6d262d3dfd07ae02176e1d8 +part_00027_of_04320.hdf5 0ccae432ec9d1d2c3a3a1fc9ab430f1b +part_00028_of_04320.hdf5 e3846c5d3690e9e18c88bd93af80b140 +part_00029_of_04320.hdf5 0e2982e29095d9c4c7ce42a2cbc927fb +part_00030_of_04320.hdf5 f531138dc9dda7f92c623b744717305a +part_00031_of_04320.hdf5 5646b9f3d2d0cbe4220fd9bb657ea797 +part_00032_of_04320.hdf5 9bf1b18a5fba9f554c0f4229b8dc6248 +part_00033_of_04320.hdf5 075256e562fb7d19d8755ccfcd91d25a +part_00034_of_04320.hdf5 f86167071c48fdf9555de3dd904a46e8 +part_00035_of_04320.hdf5 3ed3da292471a7fa365cbabfa6f5ffbb +part_00036_of_04320.hdf5 76d04c157f331f714aef108a50feea43 +part_00037_of_04320.hdf5 65b5978805538396f340144e56c07006 +part_00038_of_04320.hdf5 91e0b49dc321cb35a911d13ff0c5d47c +part_00039_of_04320.hdf5 5d151e5616f5214d6a4abbefe99e033c +part_00040_of_04320.hdf5 3b569a0a8761f59764ca6773c93d3a92 +part_00041_of_04320.hdf5 47eebb1738dd0c693b823588a2186045 +part_00042_of_04320.hdf5 243eeb9da6bb36c8ba6029d1d4bec890 +part_00043_of_04320.hdf5 02ab253e5b7888257ed3046ef076d10a +part_00044_of_04320.hdf5 de0751538fede407a0946789361af8d1 +part_00045_of_04320.hdf5 61b80c66a8fb3aeeb6d35e2576c67deb +part_00046_of_04320.hdf5 c6ae2efd9a0baf399f0a0b94cb27d1e6 +part_00047_of_04320.hdf5 b90743bd3b188b094445bd69768514f6 +part_00048_of_04320.hdf5 c99bead2307d9ab13e7638bf4936d82e +part_00049_of_04320.hdf5 d9930b22acb3b33b13ddf530bf5bb555 +part_00050_of_04320.hdf5 401d616f075abf222dcb7c1dcd74d3cb +part_00051_of_04320.hdf5 ac5181741cc0fab2f1727f45b04110ee +part_00052_of_04320.hdf5 3310185e8aa42e06bd5b146294aef9c0 +part_00053_of_04320.hdf5 ff8fdc9bcdef4cd69c677b5afb865933 +part_00054_of_04320.hdf5 574c4715cd7291628394f48fe872f301 +part_00055_of_04320.hdf5 4523a702e987d813295c1111faafd87a +part_00056_of_04320.hdf5 86ca07acecb16aa5418046a190a911db +part_00057_of_04320.hdf5 c978b91ee84aece86421ea43e006aac8 +part_00058_of_04320.hdf5 4e1ce3db897d44cc6771353e68697d02 +part_00059_of_04320.hdf5 8ee42610d87b463d81a56439be868668 +part_00060_of_04320.hdf5 208b6b83c460e01b33b2fc6f0ac2f7ba +part_00061_of_04320.hdf5 6770fe918250632ae3b7f96ff1e74eb2 +part_00062_of_04320.hdf5 623a22d4e6dca4c35e3e62170fe8a3cf +part_00063_of_04320.hdf5 841e13b5e0996a4519cc62b7216fcc78 +part_00064_of_04320.hdf5 1fa39f65542df27aa85fe0c2a9f7976b +part_00065_of_04320.hdf5 40639d444689cbf22c8fa5cd79feeb9e +part_00066_of_04320.hdf5 f1f639e0a819ecc5d46990c0b337d5a9 +part_00067_of_04320.hdf5 2443f2f77a4dbdadd87e6c7c038dcf67 +part_00068_of_04320.hdf5 38a60d4cdb9806a169cce2b0eb24fdfd +part_00069_of_04320.hdf5 9b47cc8ad792e912260f1ca708c2a3b2 +part_00070_of_04320.hdf5 2407d9d2946818bfa713982151ef72c3 +part_00071_of_04320.hdf5 52123d586e608142be1bead1f8517c4d +part_00072_of_04320.hdf5 5da6cd9c8618d327299e0cbb9dba28ac +part_00073_of_04320.hdf5 4972499b37f6573d923f0d569dee24a0 +part_00074_of_04320.hdf5 d7d92a688059b726cea20f59a98e6495 +part_00075_of_04320.hdf5 3c2b1021296ec44535e3060c544fe94d +part_00076_of_04320.hdf5 ddd0a1b7bb8992fd0c1995d7e21a27c6 +part_00077_of_04320.hdf5 330fdd327901ab31f0d76163e6239552 +part_00078_of_04320.hdf5 0df7bbfecb3149a2a7a683f3836ac3bd +part_00079_of_04320.hdf5 a5024429b4d83ff30bbd8b9e43422b61 +part_00080_of_04320.hdf5 48616e8e1eab2c0ca071b2a2a26d69ba +part_00081_of_04320.hdf5 e7068c828447ba5f381897f6c5cce1e6 +part_00082_of_04320.hdf5 6cf6e9451992ec035dcf7d745f01531b +part_00083_of_04320.hdf5 8283396780202430c7a0e8c0c40610b1 +part_00084_of_04320.hdf5 9c5e34b47afb5d5ea4157712a2f7c2c0 +part_00085_of_04320.hdf5 b5c118945567d95aa060a46bee565d9b +part_00086_of_04320.hdf5 7f599edc96b7a391d4d75af2de8a5098 +part_00087_of_04320.hdf5 3affa53b9ff0363360f9e776bfbe985a +part_00088_of_04320.hdf5 81f2b73578fc90a075f224ef844ea70c +part_00089_of_04320.hdf5 cc4bead0ce2ef4be55fb7c2174ab2d08 +part_00090_of_04320.hdf5 d9f0bc7d4009c7e7b46552c064516e44 +part_00091_of_04320.hdf5 b7a9180add4ca8511e7a8465ee14d37e +part_00092_of_04320.hdf5 2e2f89bb852c692184d7ca5492e05840 +part_00093_of_04320.hdf5 42de1e45439475a6b5efd64320a7a5e7 +part_00094_of_04320.hdf5 d0bc2f8826cd006956bbb070d87ab138 +part_00095_of_04320.hdf5 aaeb51ee885848251c0e163f9d318d0d +part_00096_of_04320.hdf5 6446db369475510acdd2abf8069bc8a2 +part_00097_of_04320.hdf5 cfb2bb254e8dad8e0b03649a4a391b49 +part_00098_of_04320.hdf5 ea09e112722c822cfe5fca0686593ab6 +part_00099_of_04320.hdf5 e1d1d337131a6110a6f2807b01bf420f +part_00100_of_04320.hdf5 e967bea11271c854933c08b07e2bfa7a +part_00101_of_04320.hdf5 4a2864505fb1cde933f8c50ced2a2a89 +part_00102_of_04320.hdf5 22ac7a896f49ff5b168e7841f38ea81b +part_00103_of_04320.hdf5 9eb925afb8b3908e513d94147f7d3094 +part_00104_of_04320.hdf5 de2acf866f1b6b6a46b6515f7374efe0 +part_00105_of_04320.hdf5 468843434371b13204e27932175147ee +part_00106_of_04320.hdf5 32231875fe2a0052108e674f9dd7fd52 +part_00107_of_04320.hdf5 a468e13bb06210398411363807600449 +part_00108_of_04320.hdf5 b941404f7ec965a01be46f256c1a36ed +part_00109_of_04320.hdf5 c81b803a20d98c96b998cf3570f5e496 +part_00110_of_04320.hdf5 7cd1301094d79e8ee68e04153150e1e6 +part_00111_of_04320.hdf5 9693b2392b7587ccbc5f14c7e6087700 +part_00112_of_04320.hdf5 b748889c5f85fe471cbf6bd2b64cbe80 +part_00113_of_04320.hdf5 9cd07e597f9dd75ba39d1796c67cd643 +part_00114_of_04320.hdf5 77d7263960a40fcca131da1f826fcbfb +part_00115_of_04320.hdf5 4a323456afb77118cf9ce0fb3e299023 +part_00116_of_04320.hdf5 0d16e21fcf71d89a8cf1b4b484100ff4 +part_00117_of_04320.hdf5 4d4662748380141895a158504d429c1a +part_00118_of_04320.hdf5 b72250c3f0a9f64758922e33280678f6 +part_00119_of_04320.hdf5 78bf668e39f88e27dea7fa52903e6ab2 +part_00120_of_04320.hdf5 d96aa3615c58a5140d45b4ef6d4bf24e +part_00121_of_04320.hdf5 72fbe981134c707ffb613b1f39bb8cd8 +part_00122_of_04320.hdf5 646bafcd4f732dd02ac95df0a21ef56c +part_00123_of_04320.hdf5 d827d736a4a65a74b1171bbb3839aa81 +part_00124_of_04320.hdf5 45dbd74b1b996657eff5344f2494ad37 +part_00125_of_04320.hdf5 264a76bdd91c1f5c28edb38227d445c1 +part_00126_of_04320.hdf5 0b4c43365cc4337caec44582cdb22966 +part_00127_of_04320.hdf5 5efc8293b1ecbedc4635e4868c7b05aa +part_00128_of_04320.hdf5 243c5b59ae4c887a81e9c7010cb3e66b +part_00129_of_04320.hdf5 ced647656b15fc1268c8292d561285f3 +part_00130_of_04320.hdf5 966613bd41ce7c33fca891e4c7b99b5a +part_00131_of_04320.hdf5 47f5a79f1cdd99592e505a39cf5c7097 +part_00132_of_04320.hdf5 2efe43e931759986fa50a70be9202194 +part_00133_of_04320.hdf5 afc01a74de0e9c1b8a73d30673f1afd4 +part_00134_of_04320.hdf5 1e915f8ac89962bb473bb09dc1793b42 +part_00135_of_04320.hdf5 e85cac2ca107c44e5374184f60ff7ebb +part_00136_of_04320.hdf5 ef22b4a6cf5b19699a185cebd213f96e +part_00137_of_04320.hdf5 26d9c409624c19afb8b45330bb9081fd +part_00138_of_04320.hdf5 abd1e8cf7526c164b4bef32b63a67be7 +part_00139_of_04320.hdf5 bdd4a120a76e801e7e09ad4b5be05d33 +part_00140_of_04320.hdf5 a3bf4caae27a8036275d42354a3b5019 +part_00141_of_04320.hdf5 e29e9d5ee3b3d4250c9dfb0c454dc0d7 +part_00142_of_04320.hdf5 708f07e17ea5cffbb45742474a98bb55 +part_00143_of_04320.hdf5 29e17700c4457ded2bbeb268c7aaebf1 +part_00144_of_04320.hdf5 70a9b19799d860703dcc62f0f425512c +part_00145_of_04320.hdf5 8ce6da8f19189ea41b5c43f3e0212adc +part_00146_of_04320.hdf5 991603f016e07361635fd8392ea12982 +part_00147_of_04320.hdf5 d81fe5d0bf228665d6d5df9010feca5b +part_00148_of_04320.hdf5 a87d99474da6b3f2c686df67d7e8b04d +part_00149_of_04320.hdf5 e6ed8fa6f29aedf658bded44a1b8593f +part_00150_of_04320.hdf5 255387b8e882ae64645cf133eacb0aca +part_00151_of_04320.hdf5 dd739642e8bc436c9aab0c6dae8a1159 +part_00152_of_04320.hdf5 265e94f0c34a2366bfb88283731bcc77 +part_00153_of_04320.hdf5 ac397b9964648ddfed19bde7cfb9033f +part_00154_of_04320.hdf5 2b8803417bd7fcbd6a6ab27fe8868f2c +part_00155_of_04320.hdf5 5754ce84ea90bdd0a72ce48c554ae42f +part_00156_of_04320.hdf5 e22de3c459acdfeb0e9d1b291f53e4f7 +part_00157_of_04320.hdf5 d5603cb68f15d00068b03c795a3232fe +part_00158_of_04320.hdf5 0a02ac2d97b2241473fe49bf2714bace +part_00159_of_04320.hdf5 432dbb49bae3d1f04ba7137c063ec46a +part_00160_of_04320.hdf5 1a5ad65c82b23380050ec10970f57ace +part_00161_of_04320.hdf5 6e1a311e9b471e6c6ff2d3afa586ea97 +part_00162_of_04320.hdf5 b2c5dfabe9685546e815a3632c10a835 +part_00163_of_04320.hdf5 7423f21dea856c7f6a30ed7564524355 +part_00164_of_04320.hdf5 77d75764269eb10c1b3052d30b01448e +part_00165_of_04320.hdf5 265de81dd66da39252f8676121ec6059 +part_00166_of_04320.hdf5 2e60dfee293c731c1d067473fc9fa954 +part_00167_of_04320.hdf5 0bcd772ce3feeb4bca9b10f4eaac732c +part_00168_of_04320.hdf5 69fdf9abc4a99f0eae54cedc193b89ef +part_00169_of_04320.hdf5 0dc7576099588814da3d6b08c6936e25 +part_00170_of_04320.hdf5 214287504028f892d6e042bcf71e8ccd +part_00171_of_04320.hdf5 4824c586f5b179c125eaf3102f5c3295 +part_00172_of_04320.hdf5 340dd66e910d8fa31eb0a5b0f33517ce +part_00173_of_04320.hdf5 564e9775f556e921996f18df1e5091b2 +part_00174_of_04320.hdf5 45aeeb486fbd31c98677bbc8f5cfdb15 +part_00175_of_04320.hdf5 d114c76776d634774d30a91392f4adfd +part_00176_of_04320.hdf5 10a7c888aae098a35e0bd337ddcec32a +part_00177_of_04320.hdf5 e24a81410964d3f3999b25d3d3aaab06 +part_00178_of_04320.hdf5 f265ab82e078c1e86520818e4175fdd1 +part_00179_of_04320.hdf5 c7ad5f5bad555946673b1ee03565103c +part_00180_of_04320.hdf5 92fa5571a8cc0b1b62ee3245ceb2ef87 +part_00181_of_04320.hdf5 7916e37df259dba26ed8f88ad4cd5d2e +part_00182_of_04320.hdf5 6bf9c255b55021ff6766d7b34a74f4f1 +part_00183_of_04320.hdf5 4dbab19c3552408adc07eebb98bc8798 +part_00184_of_04320.hdf5 6bed6eb51dc2f8d145fd8388fcaa3a9c +part_00185_of_04320.hdf5 9fe81bcc23365b335dc2530334e3a1ba +part_00186_of_04320.hdf5 3436c8effad5f5115b0db1576f2a11bd +part_00187_of_04320.hdf5 f416e311b04c75848f975e84059ee8d4 +part_00188_of_04320.hdf5 7271eced0b5868ccfb97258f165bda1b +part_00189_of_04320.hdf5 59ca23598fcbe63696ff476e74848278 +part_00190_of_04320.hdf5 25e7f4ff9a729b83a97ae1ae2774ba31 +part_00191_of_04320.hdf5 42d3e24802b945f52009871bc421ba1f +part_00192_of_04320.hdf5 573040cdd6d51c54d07407864bb881da +part_00193_of_04320.hdf5 f8e26be2485553690e0ed0a4cf89394d +part_00194_of_04320.hdf5 bd32a7e9b68cb00b363edafbc4c13ca1 +part_00195_of_04320.hdf5 468a84cc6ba3ad403011972e5d357593 +part_00196_of_04320.hdf5 7fbf28f7b1f4eaed6024b12fc240e63f +part_00197_of_04320.hdf5 6a630193c61c40d864f85be58b24a414 +part_00198_of_04320.hdf5 d724f892fde9bc8979c0e4ed03e1967c +part_00199_of_04320.hdf5 0be5d99f7a75f45b65904aeabcd0d5e7 +part_00200_of_04320.hdf5 bad20bd59fe939f6ca4b9a5f77ff24ae +part_00201_of_04320.hdf5 073899c9d62f4d1121fb9d60c2ce0eb8 +part_00202_of_04320.hdf5 6048232411eda733df1b2722a009d328 +part_00203_of_04320.hdf5 317cadacc4904c448450d35225d641ac +part_00204_of_04320.hdf5 9cd9a64e622c89593f3126a112461c6c +part_00205_of_04320.hdf5 e027b0079f295dae997062443eced07c +part_00206_of_04320.hdf5 2813176adc6ab66b7a4e8bad62083efb +part_00207_of_04320.hdf5 3c5d2ea3b3e637a83d002128399a222a +part_00208_of_04320.hdf5 f150dc1557ed9c1ac528d461c7ac1e3b +part_00209_of_04320.hdf5 fc79a94c924a1768f6ea95335d6f9ef1 +part_00210_of_04320.hdf5 9c91727210350740c2e5228a0fee6fbe +part_00211_of_04320.hdf5 481e8be50d11b012af02305991317f34 +part_00212_of_04320.hdf5 ced679a978771dafaf7266683d094fb7 +part_00213_of_04320.hdf5 fe9daaf9461c5b5b3cd83ce8118de4d2 +part_00214_of_04320.hdf5 009e61478dde75a42a2a18cd02f48199 +part_00215_of_04320.hdf5 5475c065c81485723eb3e0e05824a71e +part_00216_of_04320.hdf5 0f7f3464b19ece93c37fdefac1bc4dbb +part_00217_of_04320.hdf5 58b5bfc9c50148ddac81cea1b9e1bf26 +part_00218_of_04320.hdf5 39709abe5d492de4a68f33046b78dc85 +part_00219_of_04320.hdf5 08bf06fb3e0075bd03a05afa71a9875c +part_00220_of_04320.hdf5 1c8e96e6facdc09ae1ae207d509cb735 +part_00221_of_04320.hdf5 b20f5faddcbd0df4541b4d8fec9b9c49 +part_00222_of_04320.hdf5 ede49671ac6221cfc0c35ba0d280d96d +part_00223_of_04320.hdf5 15b9e3bbd37902943512a267218f7538 +part_00224_of_04320.hdf5 d22308ac8a2674d31de95d80b77d2c1b +part_00225_of_04320.hdf5 1550e09f4764ad8184d257bd64558ef4 +part_00226_of_04320.hdf5 fcb00fc647fb7ddadd54e9c1121ab72a +part_00227_of_04320.hdf5 51c4b332d1a8d0cdb563886fa1b3490d +part_00228_of_04320.hdf5 ead794d47aaca72cf3225fdd05e12cd4 +part_00229_of_04320.hdf5 5e65cd2cfc38e1d0264a99ff916626ca +part_00230_of_04320.hdf5 e96b9121e36d67d08bed6c9a8453a75e +part_00231_of_04320.hdf5 3d5f376d124af0110fccdba16abfb6f3 +part_00232_of_04320.hdf5 f203570ddd5dea5f208210baf7bee4e2 +part_00233_of_04320.hdf5 fda9cbeddbee35acc74534c664600d37 +part_00234_of_04320.hdf5 a3324194889aa2e95d48a4cf15954792 +part_00235_of_04320.hdf5 835cba720b7fa426c1f03a027fed75b6 +part_00236_of_04320.hdf5 c5f925b5946bc777d6da5b2c48f7a274 +part_00237_of_04320.hdf5 d9f4eb059bdf272399e6b4763e74ffb7 +part_00238_of_04320.hdf5 5302baecf31f27438b891413fecf5d0b +part_00239_of_04320.hdf5 79b9f12baca2f271e0c7a86b85034ada +part_00240_of_04320.hdf5 8bf4118433ffad5b492221c9c31ca3ac +part_00241_of_04320.hdf5 beede6a440000518bbb4061f2ac9aff5 +part_00242_of_04320.hdf5 a53d97e0f760c768e867768baf104a64 +part_00243_of_04320.hdf5 88f6364663fcb8d3d197f6d22c1441c2 +part_00244_of_04320.hdf5 8e3eba78696c2d80aaad25c52a158113 +part_00245_of_04320.hdf5 c84db5dd0d850c852f48715512ef42be +part_00246_of_04320.hdf5 e75d5e166118535b4a9b1e2cd4a1d362 +part_00247_of_04320.hdf5 0bef41606a06070b5c051134e378559c +part_00248_of_04320.hdf5 043ca3fae1adda11dca0db171ecbf345 +part_00249_of_04320.hdf5 5cf20b58660d8a9eaaaf1d97da2c5688 +part_00250_of_04320.hdf5 c7932c84027e1f722e6d7f51ac757094 +part_00251_of_04320.hdf5 c2dae497c7aa5e456160c84cb9ad4591 +part_00252_of_04320.hdf5 9c0950b26348747cc8235a425c5d003f +part_00253_of_04320.hdf5 5ac8b8c452859bfa06aff22431edda05 +part_00254_of_04320.hdf5 0117bc8f8e533fc886ba9ea50aa4e360 +part_00255_of_04320.hdf5 514ad57c9a86da4f27a3e327e71cf266 +part_00256_of_04320.hdf5 58808d08196f7a2db00e73e7d1467a44 +part_00257_of_04320.hdf5 6fd4289dfdcc445ea6164d23ecfe0e78 +part_00258_of_04320.hdf5 865e6482f7d4a92bd33c1f5096099f50 +part_00259_of_04320.hdf5 cd4a899dd40ff1b163be79358a6650c7 +part_00260_of_04320.hdf5 c284dbfa8c23bb2093ac5429af3c3d42 +part_00261_of_04320.hdf5 3b9e0228b5b05ab4f42b5b1fb68d9553 +part_00262_of_04320.hdf5 b27fa1c3278804fe4e0ab3b0bc2f23b2 +part_00263_of_04320.hdf5 31f1b45ac151f1b791095bf757afe08b +part_00264_of_04320.hdf5 11b46a2cfc902d3be75a56eefd40c302 +part_00265_of_04320.hdf5 2750f1f9ed37f930ae965f0ffaf2af9f +part_00266_of_04320.hdf5 ae08e62d208127c5e8bd7fc5f6e8c5ae +part_00267_of_04320.hdf5 aa261d1f4ee4b5d7eb02c87c8844cdcc +part_00268_of_04320.hdf5 f402d8e69f6c432611794eefdf4e162b +part_00269_of_04320.hdf5 7ec553c84f93abb8b12af9709afd8988 +part_00270_of_04320.hdf5 7c038b5952b8bc0440a6fd15dc98c8f2 +part_00271_of_04320.hdf5 b64737cdd3a32c1663932f678b747847 +part_00272_of_04320.hdf5 6d5f7b798b463ebe5a1d4d2547362653 +part_00273_of_04320.hdf5 998d7baf3392fac73ff029a5d139d97a +part_00274_of_04320.hdf5 6f7535881985025d37ecfecacc235637 +part_00275_of_04320.hdf5 70f7c506750096f289ef12d885bef338 +part_00276_of_04320.hdf5 ba3d21e94db7eb1807aa660b145cdc96 +part_00277_of_04320.hdf5 c1fe52fc99a31f57536378507cdc582b +part_00278_of_04320.hdf5 2462a85bd3e2d0bb86d72195715a6de0 +part_00279_of_04320.hdf5 cd97aa15172d275c025712ebed6d18ff +part_00280_of_04320.hdf5 08f8c182c2d67bd5176ee8a66b53ee84 +part_00281_of_04320.hdf5 2d5b3a7937e9c4810021ecb0363b475c +part_00282_of_04320.hdf5 d8508004987b6c599f62181f72c33831 +part_00283_of_04320.hdf5 b808f192152fdd64b03040fb6467d5b2 +part_00284_of_04320.hdf5 36a319ed6b2bb0bc39587584c3ee2c10 +part_00285_of_04320.hdf5 ad55122068f3c131ba6258ade37fd833 +part_00286_of_04320.hdf5 000bdc800c121dfb3d876892c3714648 +part_00287_of_04320.hdf5 0dba7615fb3a20b8bc4e31418cdbbbe4 +part_00288_of_04320.hdf5 3ad3c0f68a0ddc2ebad865ae2bdfa07b +part_00289_of_04320.hdf5 42a79047196407ebcd76a720b4ff5c8d +part_00290_of_04320.hdf5 9cc39b2b357f18e6f2f75bd3ee1b1959 +part_00291_of_04320.hdf5 4737be949b2aad177f855f1ee8278318 +part_00292_of_04320.hdf5 d0d478e67bba5358b96175cebd2cae21 +part_00293_of_04320.hdf5 23de4c6ddd1d1eb04e0948b8278bb44c +part_00294_of_04320.hdf5 ce070a637a1ff764241021d9d0aa5d5f +part_00295_of_04320.hdf5 581ae7f1d7f0730177de5a2815f8ff24 +part_00296_of_04320.hdf5 1ca602871919540a1f795d81ba90e1ce +part_00297_of_04320.hdf5 9cf97a6e3b0c8761dfa6b36f023fd8c1 +part_00298_of_04320.hdf5 8d473529e5ec8cdf8d9344006715a1d9 +part_00299_of_04320.hdf5 9207ea3ed4cf3f95d6eca31e1a8c11af +part_00300_of_04320.hdf5 bfaae7ca3559def77194084bc46d5e7c +part_00301_of_04320.hdf5 ea3a979cc50f2cb715af35e65bdf6798 +part_00302_of_04320.hdf5 368d677ac9fa9ad2455524b51203577d +part_00303_of_04320.hdf5 c4fd1bb037a93f59652f2d2f64d8f2cc +part_00304_of_04320.hdf5 f84039684442a8992c4b760a3249c8fb +part_00305_of_04320.hdf5 33a49d5953016e3f346527f51a328784 +part_00306_of_04320.hdf5 5fdcf0f24a87841b7ff0a1d5f1acf095 +part_00307_of_04320.hdf5 48cf95ea6eb90043031042384501f7ac +part_00308_of_04320.hdf5 1f020beadac895e90598036bc879a256 +part_00309_of_04320.hdf5 c30c3dfd2e6d5f3d278af07634c78fab +part_00310_of_04320.hdf5 f81596a0abf95d89bc2d7563a5397ac6 +part_00311_of_04320.hdf5 ec5dbd0a5f8c749297d3d024797e4e7b +part_00312_of_04320.hdf5 9a2eba9799c599cfb26a9e06d3327e52 +part_00313_of_04320.hdf5 7211fa141c99f6ef7b8bb6c3e727c4c9 +part_00314_of_04320.hdf5 f03fa1a88c6d71adda5b4f418b041ecc +part_00315_of_04320.hdf5 d88a87bb1c594ead9afb386ab4194d80 +part_00316_of_04320.hdf5 75f125fd1a5fa342abfec89b81f6b952 +part_00317_of_04320.hdf5 3a94830a66b83dc7dae054f258c28082 +part_00318_of_04320.hdf5 e76308de9711468d806bc878da5ba70e +part_00319_of_04320.hdf5 e2c6679dadb148a4a3e77a5e4a6da45a +part_00320_of_04320.hdf5 e604a00e6a53fcdd8f950aa379b6e632 +part_00321_of_04320.hdf5 7b8bdbecad24636e26b862ce759abd4d +part_00322_of_04320.hdf5 c476476e1d53fae0372f8b84e6bfced0 +part_00323_of_04320.hdf5 2dbe64e71b310e5b9af14d5ae0216b7a +part_00324_of_04320.hdf5 f30750b4645c92caed12c8b1a4ccd1dd +part_00325_of_04320.hdf5 7b55ffafbb8ca2b8434bff6485d23781 +part_00326_of_04320.hdf5 0284e433a4c16c55a54ee53d52904c20 +part_00327_of_04320.hdf5 be1750ed61ca7674fc73156df9743f52 +part_00328_of_04320.hdf5 c9e3e23f9d348c20b7f7f697ff6f7779 +part_00329_of_04320.hdf5 043199f8e686c79c78bb5054aacdfa60 +part_00330_of_04320.hdf5 110f7e5b38ccbd2e2eaee2dbd93dd463 +part_00331_of_04320.hdf5 399892ac31ce5188a8aa5f18c2d3aa6f +part_00332_of_04320.hdf5 f86e384e53e292f80e7c6578cc864cab +part_00333_of_04320.hdf5 562bb2917463d77aae19fcd7f6a59f89 +part_00334_of_04320.hdf5 b502c0acef28730b956dd16cff48f03e +part_00335_of_04320.hdf5 b91a1a0593ba751004f499469c44afc0 +part_00336_of_04320.hdf5 59466a04c50b3dac333a03944e28397b +part_00337_of_04320.hdf5 9fb48d02b097f048eeb77887a54f730e +part_00338_of_04320.hdf5 2b1d99eb929c78fbbdc052730b0e91c7 +part_00339_of_04320.hdf5 4d93141d49fda0315ae744615c31970d +part_00340_of_04320.hdf5 f4c741e7886c3967b8a4d0aa917837cb +part_00341_of_04320.hdf5 84507eab8d10a3076e47d60bd721cf4d +part_00342_of_04320.hdf5 65e96de3a3880b846bc26c366849e349 +part_00343_of_04320.hdf5 070a0ddf9ae1b9dbee8020e992e3bd09 +part_00344_of_04320.hdf5 5b3e35b7c012e3fc6f11a0ac149ea709 +part_00345_of_04320.hdf5 55617c7c3732d8d6e6261934018e2c5d +part_00346_of_04320.hdf5 3f237ca0ec377b65f15821ef174bdee8 +part_00347_of_04320.hdf5 6a48c7637ef0b3867c4477328f43fab9 +part_00348_of_04320.hdf5 0442df68fcaf2f840f980590456d702d +part_00349_of_04320.hdf5 2af842ce86f922899876ec7e0f4951d3 +part_00350_of_04320.hdf5 f4a1bb1090f961d721543c1267390e90 +part_00351_of_04320.hdf5 5bac6b26d8427376e6a1cb0fa97b69fb +part_00352_of_04320.hdf5 a64d2ec7e80846f2f10d7beb8b6e038d +part_00353_of_04320.hdf5 6e7a73440670a4181c3fe66dfcaecdd7 +part_00354_of_04320.hdf5 261069f5f55b247936585428767d3bf9 +part_00355_of_04320.hdf5 4225cc74f75372f6c273e9eeb4aa976b +part_00356_of_04320.hdf5 892001e46890bf0af16834a500b2652b +part_00357_of_04320.hdf5 eb41058b7b9040e46900c90029f6b95b +part_00358_of_04320.hdf5 2c698ed50caeeb346480b15fe772ef39 +part_00359_of_04320.hdf5 3a7c55a0d4b52da05d802970810ad145 +part_00360_of_04320.hdf5 eb30ec9bbfbd3b28e48718f17f71e496 +part_00361_of_04320.hdf5 2ea1e9b3eed66d99ada051ef8b138de6 +part_00362_of_04320.hdf5 82814d40b6d1580899310466275d28f8 +part_00363_of_04320.hdf5 b02e857fe3c8472f151cf4db8754ab06 +part_00364_of_04320.hdf5 1d173b4b5cc5103081e5ab09321b3f7a +part_00365_of_04320.hdf5 8ee6a3c1b66c08a2a0f6b55db2ebb6ea +part_00366_of_04320.hdf5 d8c2fac248ffd58d6fe8df529d3755c9 +part_00367_of_04320.hdf5 24e2e4c61d368d234b78717f07340244 +part_00368_of_04320.hdf5 8e55b936846f147f3cbf25163810bef5 +part_00369_of_04320.hdf5 d7e5b3c446abc028e6367d4b4a143f0a +part_00370_of_04320.hdf5 d0f4020f3529224cb9c3ce21b55eb96f +part_00371_of_04320.hdf5 18f7d6f5384fc97b83711c53fb877e5d +part_00372_of_04320.hdf5 aa50994a34506abb7e85af9edf0a73c6 +part_00373_of_04320.hdf5 4f004b9e17958de29995663d340dee46 +part_00374_of_04320.hdf5 f3d52f60f90278157b62da7c3d99681b +part_00375_of_04320.hdf5 18a254fdb20d7070acad394eb21aa742 +part_00376_of_04320.hdf5 6a1f9228a7a7732da63dffe42ecff612 +part_00377_of_04320.hdf5 40fef5eff5a3a4761f7c590edf82bcc9 +part_00378_of_04320.hdf5 dd49596b2b11d4bd52373dcd5cda6966 +part_00379_of_04320.hdf5 55ef262d5cd5688e0ffd2c870d0b4380 +part_00380_of_04320.hdf5 5347ca8485f9d6786700dd1112006f22 +part_00381_of_04320.hdf5 f7d792e26f27b4418a52c1699ca94416 +part_00382_of_04320.hdf5 9ed88907da41a1fe4734c474ea2d5057 +part_00383_of_04320.hdf5 5d6325dfb88df016740c8ac5c3842447 +part_00384_of_04320.hdf5 929767100e82231de95db2c07f033537 +part_00385_of_04320.hdf5 ae7a38f16f231f43a9195d13265f6c69 +part_00386_of_04320.hdf5 422dc65db63a99a3e0bd9c4e68745014 +part_00387_of_04320.hdf5 6d2c7394bb8986c4c751416b170db4d8 +part_00388_of_04320.hdf5 f6e9502410c26bca48064398c8fceba2 +part_00389_of_04320.hdf5 143c74bce6751367878daf9ad98a8ccd +part_00390_of_04320.hdf5 8ad4ba9b3049e8827f60c8e2e258b1f7 +part_00391_of_04320.hdf5 646452858499055424eee16714280d85 +part_00392_of_04320.hdf5 1c58b110b5483b875a1d835feb532bd6 +part_00393_of_04320.hdf5 3f96b30449248ca46acf61f5835b3cad +part_00394_of_04320.hdf5 2b65bea38b31e06700168391f8c1ead3 +part_00395_of_04320.hdf5 7df2e632d56094718304111fd7a235a9 +part_00396_of_04320.hdf5 63f84be70b423d4ec74f8ea10ed7dfa0 +part_00397_of_04320.hdf5 ef01bcff48b4602d4ab7714c9d018bcd +part_00398_of_04320.hdf5 917b7728877fef1bab9c3039473ed657 +part_00399_of_04320.hdf5 a15566defc56b56808c77f46a9e66151 +part_00400_of_04320.hdf5 291c7927314991f45e4de8087f91309c +part_00401_of_04320.hdf5 cb46867032c6788cc69c1e7e1d94dd0d +part_00402_of_04320.hdf5 be296c4ec316a0596bbb6da2c7780059 +part_00403_of_04320.hdf5 99fada3e5b07e340b90588fc0dc4e082 +part_00404_of_04320.hdf5 d34f04e858b7785230a3e2ebea74b99a +part_00405_of_04320.hdf5 656f5f547dce92d6c5c9b80cb485de6a +part_00406_of_04320.hdf5 523c04016d8259104bac5c186d4ad96c +part_00407_of_04320.hdf5 01df8f4cb29e3f2aa65dee86d694737c +part_00408_of_04320.hdf5 95f464d7e1c4276986c946cfa0a75348 +part_00409_of_04320.hdf5 8d3cd83d3054635d57ceea4710011bc0 +part_00410_of_04320.hdf5 f4220cb41acf5ea93fb67a9554a9495e +part_00411_of_04320.hdf5 4e314ca1cb5ec7ac46c53de93dc3ebe4 +part_00412_of_04320.hdf5 a2521ba6554cc2047c6bd0e648970db5 +part_00413_of_04320.hdf5 e1d3cfc16216387c1cd32c7c90e4fbf6 +part_00414_of_04320.hdf5 8209a3f97737eb907724cfe0d5caff6e +part_00415_of_04320.hdf5 4efa1ca32d97a6e72b2aa124787a845f +part_00416_of_04320.hdf5 52898dd2811c48c427547745f2b49b80 +part_00417_of_04320.hdf5 699fa959d6ddbaa3754e9d5e2bab6928 +part_00418_of_04320.hdf5 e059a1151a035febd3c4748fac5fd9e7 +part_00419_of_04320.hdf5 6eacea8200da3869be9a1ad1f61c1b4d +part_00420_of_04320.hdf5 3bfe8798980c374d416d6aaf8499e535 +part_00421_of_04320.hdf5 8e7b4746301340de114b293d2bd18009 +part_00422_of_04320.hdf5 77e826d25243f44e6100c2452226bdc6 +part_00423_of_04320.hdf5 0593178e2a6033e618495b6d0cc9869d +part_00424_of_04320.hdf5 e779310eb494423e84a9cc5eed6fb090 +part_00425_of_04320.hdf5 c0c421634f466431f8c0c86f450a4ad1 +part_00426_of_04320.hdf5 e052afdfeb3628c6e84056157a1c75de +part_00427_of_04320.hdf5 9fb40669ef4ae65e60001ca6b58aeb2f +part_00428_of_04320.hdf5 7d6d9399cb135c65766f8d3068ee5684 +part_00429_of_04320.hdf5 ff966d85af1fb15bd32e0763d94ccce6 +part_00430_of_04320.hdf5 533c970e305becf1343c2dfb97bf4a89 +part_00431_of_04320.hdf5 cf2b601cd09cd1c1baafac31a04ebc8a +part_00432_of_04320.hdf5 81412d89dc5e2287bd8664e6ae8814b2 +part_00433_of_04320.hdf5 33f1b48aa0d348730ef1454441420e2f +part_00434_of_04320.hdf5 7c9dfaa6c83b5b98b06977b3cb2fbd04 +part_00435_of_04320.hdf5 3bac1017246e68941958e8205440bd53 +part_00436_of_04320.hdf5 a02ae931e1c5821041e02ac091e2493b +part_00437_of_04320.hdf5 01644549273ba15baca114e8ef263249 +part_00438_of_04320.hdf5 eaa51fba84874372ddf13e80d59fb8b0 +part_00439_of_04320.hdf5 64a3604f847b272b2554da9296400346 +part_00440_of_04320.hdf5 4925c08962d3e8eea6d47c6da42cc6a4 +part_00441_of_04320.hdf5 d38ddd5dce8df81d676bd91daca0b505 +part_00442_of_04320.hdf5 5418bd00d194aca1cd404d08fb6a596c +part_00443_of_04320.hdf5 3f86b611bd6ef5ec83a79e7134e475d6 +part_00444_of_04320.hdf5 20a15dad60b490f4480ec67e3c96a424 +part_00445_of_04320.hdf5 93b3b1d0f2e00063e3fbba7688ecb15d +part_00446_of_04320.hdf5 4605c3b6ec152396274fb2b3f546a595 +part_00447_of_04320.hdf5 f83ee1cac5029afe7263c710c894ee2a +part_00448_of_04320.hdf5 07a90fd533e36ef97a63f58cf56bcb47 +part_00449_of_04320.hdf5 b5bde1e2caa09202c760de516a509524 +part_00450_of_04320.hdf5 14eeccaf2983ed1249d638aa4a6f14a3 +part_00451_of_04320.hdf5 fc46802cb55bf8c1f2b2a8b8871db29b +part_00452_of_04320.hdf5 88cd589efbc9fdccd11ddafd3f6abb18 +part_00453_of_04320.hdf5 e54188f54ead90db09118eaed3a576f3 +part_00454_of_04320.hdf5 48769ef8944821edd5efed59f8d37830 +part_00455_of_04320.hdf5 2f035634ea11a52fa3b36c3da00a159f +part_00456_of_04320.hdf5 38817530f1f58c2d64d84139ae3f2b5f +part_00457_of_04320.hdf5 b2c81d2c593ac9d142979e49ca21d06b +part_00458_of_04320.hdf5 cbf448e16e6690f96d46b58fd334e2fd +part_00459_of_04320.hdf5 d3ea7635b7ec74a2701d85fd3077202b +part_00460_of_04320.hdf5 82d2e482be526144dd5fbb8f09488f20 +part_00461_of_04320.hdf5 c491fa230a516418b8fcf84804e25cd5 +part_00462_of_04320.hdf5 b19664f2f737cdd756b9614967d57f40 +part_00463_of_04320.hdf5 792af45a93393c18da5361480d75bb87 +part_00464_of_04320.hdf5 d535c405f5883ab1cc2aed1ac564303d +part_00465_of_04320.hdf5 4668069988f54b92d92a004c6b7c42c9 +part_00466_of_04320.hdf5 b64fa60751d3858d157c5332399090f3 +part_00467_of_04320.hdf5 9d534979c5653c8491e702be5ef7fa2d +part_00468_of_04320.hdf5 38971a3bfd96a6bb5e55817cad24a757 +part_00469_of_04320.hdf5 6e6fbcae502710b1095adac3a8b138a3 +part_00470_of_04320.hdf5 896f6d35953ecec7fcfe43103e929e8b +part_00471_of_04320.hdf5 357d7ba738d31b39c4be35c1872a4003 +part_00472_of_04320.hdf5 b6a9926f54c6d80474dd7a9145dd26d4 +part_00473_of_04320.hdf5 c562fed765a6db7ebff4fb7c5b3c1c05 +part_00474_of_04320.hdf5 8a5c5e26dc6b2ededaab88cf6a638cd0 +part_00475_of_04320.hdf5 d899b3716657968cf398acaf74ae3997 +part_00476_of_04320.hdf5 7b193c7e06b462ae02870ec92dbdc153 +part_00477_of_04320.hdf5 29d2633c003d12ea29f688add4b8d278 +part_00478_of_04320.hdf5 70c14c538eea9341aaabcc8e5a2fca1c +part_00479_of_04320.hdf5 0d1ab203e79882d6254c092f5531922b +part_00480_of_04320.hdf5 25e6625de6872e4c919ec10c3ac27467 +part_00481_of_04320.hdf5 f5ffbe387b401355e008e1673a5e4466 +part_00482_of_04320.hdf5 de935495a1d384b3189bcaac1b164736 +part_00483_of_04320.hdf5 eedababda293ba74540e34e5136f01f0 +part_00484_of_04320.hdf5 7895583e6ec4a6337486bfe297287533 +part_00485_of_04320.hdf5 9bef72d393524c378a2d7970f268a7c1 +part_00486_of_04320.hdf5 484da7d2e2b810e9210c91619ce4fd00 +part_00487_of_04320.hdf5 30dd1ada4ab9a79415c1fc8e3b6e5e1e +part_00488_of_04320.hdf5 b12692b5141a25383cf8ae29cccd2f4c +part_00489_of_04320.hdf5 16a1ad181d5ee289f72d0f91eddcb543 +part_00490_of_04320.hdf5 18563d16de463be525358842290b1629 +part_00491_of_04320.hdf5 8a8ac5656b800e35d286689681bd5d4c +part_00492_of_04320.hdf5 7aad8d17635bf8f987ee50de26369f28 +part_00493_of_04320.hdf5 564e69701613ad981f3669d03afa4f48 +part_00494_of_04320.hdf5 1982942c6b128eecf3b1302172dca2da +part_00495_of_04320.hdf5 dc547c214d23c8893b169f81b3e199cb +part_00496_of_04320.hdf5 31ed3ae9799471e1de80c6f98dc5cd14 +part_00497_of_04320.hdf5 c938fa6ca7f793fc40633fb5e932928c +part_00498_of_04320.hdf5 f7020194c4ffcf8524047cdd4dd95727 +part_00499_of_04320.hdf5 55ae9e135577c282863aea51d236ed0a +part_00500_of_04320.hdf5 f9a5f048b4137d74652f8d9f65da13a8 +part_00501_of_04320.hdf5 618cfda42d66b7a3a3a3f2f50d70f869 +part_00502_of_04320.hdf5 e3b355c20c5606fbf3e7132aa43599c9 +part_00503_of_04320.hdf5 903cffc44a776ac10ff3ff09690e5966 +part_00504_of_04320.hdf5 1b7daacc8b4c5196bf2b20114c747cdf +part_00505_of_04320.hdf5 aab161a3f5e9e504f3433dc19fc3c689 +part_00506_of_04320.hdf5 01efb239875d9c93b52b006ea817c997 +part_00507_of_04320.hdf5 f28f506874031473978f515597524b9a +part_00508_of_04320.hdf5 1c8ea833813f7c0cb657ca15886451d6 +part_00509_of_04320.hdf5 f4cdf0079018317db3b72fe69160492d +part_00510_of_04320.hdf5 c5b312abf6466b30514ed4dc80e60ba7 +part_00511_of_04320.hdf5 9c4f3cb25813b553e61bde8b2a5ced54 +part_00512_of_04320.hdf5 31d59784ccf18c2105756cbde0326088 +part_00513_of_04320.hdf5 d9b3d88afa5358f4c3d58166ffecc873 +part_00514_of_04320.hdf5 26490bfd1e6a90341ddd9061c9ec0468 +part_00515_of_04320.hdf5 c3efe1a02e0eac579758711094bf05ec +part_00516_of_04320.hdf5 165b60507054db0b8e94ba9e9314644b +part_00517_of_04320.hdf5 76b74ce821de09ba7e3176ee3aea3738 +part_00518_of_04320.hdf5 c7a86ba292005d81674a271f7dcc32f4 +part_00519_of_04320.hdf5 82e274caf15ff2b515ac6beaa48313cd +part_00520_of_04320.hdf5 520bff78bced2b1d59519839ad672c0e +part_00521_of_04320.hdf5 bde06a59f0ebf27f3d7e5ccb0eb91134 +part_00522_of_04320.hdf5 46a4dbd6fec3fe1c4c4f779db0a4d8b2 +part_00523_of_04320.hdf5 e2be7fd85e1ac05df20ee2c7337dcf67 +part_00524_of_04320.hdf5 b12d83d61c51acce628723b2634ab7f9 +part_00525_of_04320.hdf5 925dd958425dbed4b9e24cee68ee47f8 +part_00526_of_04320.hdf5 3963cf18cd8805ee0f583a0f205f15a9 +part_00527_of_04320.hdf5 a51cb7fb07021824154918b01f97e2f8 +part_00528_of_04320.hdf5 7a8fb4ae0cb3722c53c8bce63f7e116e +part_00529_of_04320.hdf5 2354917782e605ea18d03305116eaf9e +part_00530_of_04320.hdf5 632799588989b7c0b4519cc76210c48b +part_00531_of_04320.hdf5 f85f3086ed7ffec1c00d9c51be53527e +part_00532_of_04320.hdf5 a58b94fb379845eb97f476dbc99c1c41 +part_00533_of_04320.hdf5 211389b72b92d6462c7e53182227043f +part_00534_of_04320.hdf5 e5cff5057c036e940097b360087b8bd2 +part_00535_of_04320.hdf5 4f6d90151896e7598ccef66e91d8b4d5 +part_00536_of_04320.hdf5 eb85d8d1f84ef88bb8e3669e0e8c26cc +part_00537_of_04320.hdf5 b34a379672055e131f6203f6f650dbde +part_00538_of_04320.hdf5 2b1fae27592eff3da6cf531be956c5c4 +part_00539_of_04320.hdf5 1f97a309c998576a611861598c66ecc3 +part_00540_of_04320.hdf5 310905a93095b292b3b83deb7f835215 +part_00541_of_04320.hdf5 28dfc675dc3c42de0268f39fe8fbf2c1 +part_00542_of_04320.hdf5 5423dfcf4ec2c23a9944433cf400d62c +part_00543_of_04320.hdf5 8ae94450a06c8364f4fb1a8ee2e430a7 +part_00544_of_04320.hdf5 19658095b6d8585a81a311852d8a96c6 +part_00545_of_04320.hdf5 f90f4ce94f747564056752f3b9ed55a2 +part_00546_of_04320.hdf5 5413c53ce043cddea6775c423f111ab3 +part_00547_of_04320.hdf5 aeebbb77695d69257b2916955bbe86b8 +part_00548_of_04320.hdf5 4630d76e245ce6dbb8427ebad14c97dd +part_00549_of_04320.hdf5 b9f6e338949ed66b0bd586fb5eefd9b8 +part_00550_of_04320.hdf5 fb07bc91ccbffb1017c5d70384a32ad2 +part_00551_of_04320.hdf5 3c5f71d6b061453f935250cdbbed9f36 +part_00552_of_04320.hdf5 5f8785c78bf27a31d43788c7ee3b38ca +part_00553_of_04320.hdf5 ad167c28a73f88fb10e9a2bc5e869b27 +part_00554_of_04320.hdf5 0c69788f66906210d06a3d13e6949bf4 +part_00555_of_04320.hdf5 e411e44b15502bf1dca4811748391d1f +part_00556_of_04320.hdf5 43e608aa97bcef7eeba550e8b350e0d5 +part_00557_of_04320.hdf5 f2b03d91dc4ec2c566d72df44421c03e +part_00558_of_04320.hdf5 125533fec3ef05084248b70fca08828b +part_00559_of_04320.hdf5 148873d36a92d2f185daf5964b936f3b +part_00560_of_04320.hdf5 53744df927a679f053c6cf141ec616d8 +part_00561_of_04320.hdf5 84350d882304c2e34d67b2355e11798b +part_00562_of_04320.hdf5 5f328c70cd7cc35ff85da6f851928605 +part_00563_of_04320.hdf5 de9fcaaeaaa00c67f5df269e4ba302f0 +part_00564_of_04320.hdf5 e116073bf23b745526449a86ed542f2a +part_00565_of_04320.hdf5 4fa07d70a5e920b00caa2ccd0bee25ba +part_00566_of_04320.hdf5 ace370c93c22a4e3bd5c633d08af6d30 +part_00567_of_04320.hdf5 ce27f147963284337f969bd5fda99d39 +part_00568_of_04320.hdf5 82e7e452dec0928a688696facb910c9a +part_00569_of_04320.hdf5 df8cd7262af54b758ead7d06a701e1ec +part_00570_of_04320.hdf5 2614d41ecce328f3cd0ee6f4c1d1e06d +part_00571_of_04320.hdf5 fe543255c6077756ba243e3d1fb03b91 +part_00572_of_04320.hdf5 6a011834dd76c8b50c50cacfff1f2535 +part_00573_of_04320.hdf5 1826a5d25e3d2e3becaa6a059e2b988b +part_00574_of_04320.hdf5 416357c1e71a444b455c38b50a6c8fff +part_00575_of_04320.hdf5 6a3bf927f34cb768876455c1a3e62455 +part_00576_of_04320.hdf5 f4c3fc23588db434c3a772b182e04531 +part_00577_of_04320.hdf5 dce30500f85c7a21c7c9ddf56e1d6d1b +part_00578_of_04320.hdf5 fc77b4fe99ab2b55d61ba25ed0a80fa8 +part_00579_of_04320.hdf5 448ee2eedd68584e88fed32788ea80dc +part_00580_of_04320.hdf5 55ee484d42db8496e09d2a27307bf94b +part_00581_of_04320.hdf5 f80e93f7e19272137d590cb0c95a4b47 +part_00582_of_04320.hdf5 84a7960e62e620c4a6fe845be999a11e +part_00583_of_04320.hdf5 20f630e9b870cb4028921b2be980c2c3 +part_00584_of_04320.hdf5 46126bb5e4da323a3a17ea3836b98bc7 +part_00585_of_04320.hdf5 44a63b23164db8f2ed866727c9046f74 +part_00586_of_04320.hdf5 fb14a71c3045b66799c4949107932114 +part_00587_of_04320.hdf5 30d563f839a2264414c1e2eadf7e5cb8 +part_00588_of_04320.hdf5 7fe4f6e1232f7f5ec3365511987e86a8 +part_00589_of_04320.hdf5 24da743ffb17fb35f9144231872ec3b7 +part_00590_of_04320.hdf5 5329ffdba7e6a3c0bf0f72dd2ea69938 +part_00591_of_04320.hdf5 49c0fd58da4a3dc42185aa8dfcd1531b +part_00592_of_04320.hdf5 2fb37cd3083326ec5b91bbbcad144326 +part_00593_of_04320.hdf5 af6f34c9c0d229b069bdcdff3951e095 +part_00594_of_04320.hdf5 5635e23cc86512717058609a2e5b9855 +part_00595_of_04320.hdf5 78a2addf19d3afdbcead567b7b467772 +part_00596_of_04320.hdf5 2ca23afa2e9b3f4faa6a6b8e9f7f4206 +part_00597_of_04320.hdf5 d19d002f459d8b9ae63c1172de3f523e +part_00598_of_04320.hdf5 2e7cac10e690094ea59305ec3d9cff28 +part_00599_of_04320.hdf5 1f13b22755eed37660e0e467523b3816 +part_00600_of_04320.hdf5 41dd79b357307ce88a30fb7dde9c3c1c +part_00601_of_04320.hdf5 aa467a0a923a875950f0fc8737c7e63d +part_00602_of_04320.hdf5 2f322e696a1f572cac10c9045c5353c1 +part_00603_of_04320.hdf5 e913f2e9088360220923e9b685f6a0c2 +part_00604_of_04320.hdf5 01409100f531994785925703e291a18b +part_00605_of_04320.hdf5 7c305c932cdabb1b4ca8f52ff7e0e71a +part_00606_of_04320.hdf5 243b6109db9a90ba8958d7a2c3240c60 +part_00607_of_04320.hdf5 171e08b9fe53fbe19333229111bc3758 +part_00608_of_04320.hdf5 9e66808579dd8c7a5cdf49b103968b30 +part_00609_of_04320.hdf5 99aaec057be4990c1a5d06f42203b246 +part_00610_of_04320.hdf5 e15686185f163429618e70d0c30be969 +part_00611_of_04320.hdf5 39e545852d852c1861f1908965e33b93 +part_00612_of_04320.hdf5 ab12581f10cb86a3dd999685bfaccbe7 +part_00613_of_04320.hdf5 d087510c329ccd47a14357db806c85b4 +part_00614_of_04320.hdf5 e4883031e34ca4cdb96a8df51c5105b1 +part_00615_of_04320.hdf5 a8ae12b486f8187167130c286b5f8ff3 +part_00616_of_04320.hdf5 65bd7c3e1457f890bcb1b24c62637f2f +part_00617_of_04320.hdf5 c7798d24ddd1afc45edeeac95b5c133f +part_00618_of_04320.hdf5 48551973d0ef8495035176b751a16cee +part_00619_of_04320.hdf5 4b273ab16fafb0599ca3b97c9684b203 +part_00620_of_04320.hdf5 a5f0485f6a340b10402759acdecc0005 +part_00621_of_04320.hdf5 e9ac6497a15e0774c6ffff3fb249ab68 +part_00622_of_04320.hdf5 f215cfca00fcdc9d7088a9b9066cf08c +part_00623_of_04320.hdf5 75824afd48a3fed694c84a8e5f721549 +part_00624_of_04320.hdf5 20e5aa63db38706312a60ec72c42a86d +part_00625_of_04320.hdf5 fb3734785f54dc8238b5786f4dc2c5c3 +part_00626_of_04320.hdf5 bcfb7cd12e2c4c0e7988d8539c8b8b25 +part_00627_of_04320.hdf5 655c386ba947e40abd8c68c31511c678 +part_00628_of_04320.hdf5 d0a5c5ef52c46009c3e3475765cfb50c +part_00629_of_04320.hdf5 d5b3dcce264e9579fb4d8d24bfa03d62 +part_00630_of_04320.hdf5 d19d2faadd7d4cd18a9be816d087dba0 +part_00631_of_04320.hdf5 335a069660f72489d62e3bdc06521bb0 +part_00632_of_04320.hdf5 2d054fa82ce48036e381bbab9e101840 +part_00633_of_04320.hdf5 2e06cdafb47b1a6afb93472b0c7fd35f +part_00634_of_04320.hdf5 c0d02be495050bc59e79d524e9f9e7d2 +part_00635_of_04320.hdf5 4b9ce29bf844039f96aa56a4820e43d8 +part_00636_of_04320.hdf5 4a0fe20dab2896f81c2daed00e675e97 +part_00637_of_04320.hdf5 44d05821ea70497ced08f602fdae92e1 +part_00638_of_04320.hdf5 a1dffdb2e51726aca321f43608c9088f +part_00639_of_04320.hdf5 71222cfaf0cb43719d351372e224f8a2 +part_00640_of_04320.hdf5 c1862d525ac08971b0a4c04f6b45dd0b +part_00641_of_04320.hdf5 03a2bf6f0047b7cdf656375c870b2799 +part_00642_of_04320.hdf5 95a4a839927a7c665585701f050e92a1 +part_00643_of_04320.hdf5 be0b35410d2aa20a7912b1a3d36fc1a2 +part_00644_of_04320.hdf5 c46eacd49d0b14001720150d5aa8d6ea +part_00645_of_04320.hdf5 c7ca0eb72b963c37444158fc21902ff0 +part_00646_of_04320.hdf5 85f2de67bd5789e571284380348b571c +part_00647_of_04320.hdf5 0aa5ff1b6ab6ac2f8b22630572702f8e +part_00648_of_04320.hdf5 44bdc1b202cbce7e5e50907c0f749086 +part_00649_of_04320.hdf5 26cf25c35288bc47a095175c1476ce16 +part_00650_of_04320.hdf5 6de6a809093ad2eb5b943812e5c05d79 +part_00651_of_04320.hdf5 a86f1fe4b18b3485de6bf212de101cdc +part_00652_of_04320.hdf5 dd5682002cfeeb459ae3af071c0faef4 +part_00653_of_04320.hdf5 cba96c50212ba0f86453f8b4ad6d2330 +part_00654_of_04320.hdf5 d663e32275e017638c6dcefb3f7c18ce +part_00655_of_04320.hdf5 d01dae078d4ed5c2620f7ffb364ef47f +part_00656_of_04320.hdf5 27b47eaf71604beec118093a85ec35ff +part_00657_of_04320.hdf5 88a6fcc7cd6fa0a834d2e69b27f6ce58 +part_00658_of_04320.hdf5 eabdfa71f98dffe5ca6aa1fb4b051052 +part_00659_of_04320.hdf5 b05a8ee7fcde527257b42adb03d3f426 +part_00660_of_04320.hdf5 4d0fd4c77e4aec9cb874d1e019898638 +part_00661_of_04320.hdf5 5c70713cf555b8d1686b179d5ccd41a8 +part_00662_of_04320.hdf5 959ed9c4956752262200d2d4e0c206f0 +part_00663_of_04320.hdf5 a6fcce60ce313ceea5a751c26c0978be +part_00664_of_04320.hdf5 d29dc05a69ba54a750ca976a89f7af4e +part_00665_of_04320.hdf5 9eb02e76ca92f819354e7b935ac91654 +part_00666_of_04320.hdf5 2e2ffe1c13ff2a11dd772a5812894258 +part_00667_of_04320.hdf5 92d68d43c2d54e08ff7eafa0ce04c1e2 +part_00668_of_04320.hdf5 eb9b4f6379f8ed3cc4c33ff20b7e35ea +part_00669_of_04320.hdf5 04a62d4529c2e6f5b2da977bb1a3a4a8 +part_00670_of_04320.hdf5 4988b6595b6c966b2003ae10d493f28a +part_00671_of_04320.hdf5 b727a047061297981af826dff25e0e3d +part_00672_of_04320.hdf5 adebb29a8bdc474d7b6aaba812b59414 +part_00673_of_04320.hdf5 ba4a7a1e09d9f889d697096cf97aa5a0 +part_00674_of_04320.hdf5 23bc50d6686365c438f8f1c015206bae +part_00675_of_04320.hdf5 9baefe36e00bd238b57c702d6dfd98a7 +part_00676_of_04320.hdf5 d2ca06bcd4ad5ff72089e7eaf167ee6c +part_00677_of_04320.hdf5 bb3b6ea8006687cd3948d0fe141829d0 +part_00678_of_04320.hdf5 7f7d55e0f513b3984fbf556a1ba80a62 +part_00679_of_04320.hdf5 b72a0b0694ca5b93e2388f71813d0042 +part_00680_of_04320.hdf5 9607b72d26cdef2d4101699ef946ff2a +part_00681_of_04320.hdf5 6899870f76783a6a550f371d7586bdc8 +part_00682_of_04320.hdf5 2015aba041a67744dc01abec64099f6b +part_00683_of_04320.hdf5 af24eed7d545eb52dc6d87a7a8e44275 +part_00684_of_04320.hdf5 33705cb6474d694174b06d98a5ccc05b +part_00685_of_04320.hdf5 f53d882ce68fab2c526bbe2b6508d520 +part_00686_of_04320.hdf5 3565c370ee16357fd534897d2a7b4bf4 +part_00687_of_04320.hdf5 f88ac6cd7e534b5d386249c43bff873e +part_00688_of_04320.hdf5 659a58cd7f2d6fa880790ee2b116945e +part_00689_of_04320.hdf5 ff020db883b7525c370ee4c3ce706e74 +part_00690_of_04320.hdf5 6c5a3ec1f1c116f87e61bfa7d3b80c79 +part_00691_of_04320.hdf5 3109da1daede20a9bba509a70954b455 +part_00692_of_04320.hdf5 58d146f86bba9e1fd4978c45e9cdc0a7 +part_00693_of_04320.hdf5 2bf27172e3256a0846da207afe6a8a13 +part_00694_of_04320.hdf5 68999cfa1864c431caa7a2c83ce1a05f +part_00695_of_04320.hdf5 d31199adc005359730fe6b0303132756 +part_00696_of_04320.hdf5 11f3ca91b5ba8f35d0dce64e02f88787 +part_00697_of_04320.hdf5 89a564558550cdf2f6e6be0aa2c0bab7 +part_00698_of_04320.hdf5 914a31f51063c6ccfe5f0ee7c48e0242 +part_00699_of_04320.hdf5 ed0eddd98874db911dfd1e43e7515eaf +part_00700_of_04320.hdf5 db90b9c2bd07c55d3b8ade5edc216050 +part_00701_of_04320.hdf5 44621a76334746cb9073e1513dd898b5 +part_00702_of_04320.hdf5 0f15493fa330cbe5db838cd11faf5c51 +part_00703_of_04320.hdf5 d526b86c7c7989d75202e61afa6e5f76 +part_00704_of_04320.hdf5 1acc686cc0ed8fcaee19fc29b47b7e94 +part_00705_of_04320.hdf5 bf65ec19969d94ef04a1fd0c06e6e038 +part_00706_of_04320.hdf5 4b44a217263975accd19dce6e0370bdb +part_00707_of_04320.hdf5 589c981426d250ceefb25abe3479af1d +part_00708_of_04320.hdf5 32983bac8d9be7bf9857b52e1c5d5072 +part_00709_of_04320.hdf5 4addde5d1aa454ecf71a9b6e7abb587e +part_00710_of_04320.hdf5 2594379570269c096652b87ee3244c54 +part_00711_of_04320.hdf5 1044a1091dadd41452a09c433615897b +part_00712_of_04320.hdf5 54934d8f39c22d87daa9b0e30d368169 +part_00713_of_04320.hdf5 95b28839dd9f53450392fcfdfef53cc5 +part_00714_of_04320.hdf5 de151dce0c83aca65e6cb74a6ba775f1 +part_00715_of_04320.hdf5 9551543dc53bf321267e6d7d4d354236 +part_00716_of_04320.hdf5 c4a2af844c5896ee43329fc9fdcceba6 +part_00717_of_04320.hdf5 f09a73eab148448560cd3f73519e4bd2 +part_00718_of_04320.hdf5 507613c21a07d5ca59f5ad940a8c0b56 +part_00719_of_04320.hdf5 803745c8542e4985465fe784f18bb859 +part_00720_of_04320.hdf5 f4aae55b18944bbd9bc44128fa1bdfbf +part_00721_of_04320.hdf5 8093771aaefce57f0fbb3dac428f4954 +part_00722_of_04320.hdf5 e42055fab5b8fda897b7c1f81c8c4a5a +part_00723_of_04320.hdf5 3a82ad22a2581b9ec85da7b1ed68bd67 +part_00724_of_04320.hdf5 3f62cfc91f1b0c60bff24a4783cff771 +part_00725_of_04320.hdf5 b3ed928083079dab9c1d03f919e09893 +part_00726_of_04320.hdf5 bbbdcf1199a4b95d808ee3be07c09b3f +part_00727_of_04320.hdf5 cd56f0d000b060ec84fcefe87b484617 +part_00728_of_04320.hdf5 95cee8847f041c267461adb067b91330 +part_00729_of_04320.hdf5 3d8c04ae067afe9b15f00f9f70aed2ee +part_00730_of_04320.hdf5 45a62b6f6cc6ccdff784614c7f9c70fb +part_00731_of_04320.hdf5 69dd89cf6b2bdcfb46eb56a92f41a82d +part_00732_of_04320.hdf5 27dfce6704e52ce250fd93dfe5e5d34e +part_00733_of_04320.hdf5 70a16d0c23d060893e85398e773bee62 +part_00734_of_04320.hdf5 ab04565b16d7a117643fcd936a803cdb +part_00735_of_04320.hdf5 247b0617278dc3d3929b284aed2c3033 +part_00736_of_04320.hdf5 c684bfa74b94eba79b3530fdbb298bf2 +part_00737_of_04320.hdf5 6c273c8b5a5e297151d3a86684d17aad +part_00738_of_04320.hdf5 fa979a1e4e518585f8cd68b7972217fc +part_00739_of_04320.hdf5 5510d1248ae0dd843f7ed4cd28775861 +part_00740_of_04320.hdf5 feaa381d99a2e86825a7af15a55efd02 +part_00741_of_04320.hdf5 dca85fc13bd4fdb7e64250f94c6aaa42 +part_00742_of_04320.hdf5 092b103dc1c83ee695f4d31df0b51860 +part_00743_of_04320.hdf5 9e6546de86ac23f56912e28e50a648b2 +part_00744_of_04320.hdf5 de7b9c23fd1eefdb380de11f42bd61b2 +part_00745_of_04320.hdf5 2c91e5c63026fb3806dc9419d10ce061 +part_00746_of_04320.hdf5 57f620f0d052de351143dc55cd180bee +part_00747_of_04320.hdf5 0ac0c5e6532823b074bc12469fda8c4a +part_00748_of_04320.hdf5 59ae525d21cdb4479fb640e4bb659000 +part_00749_of_04320.hdf5 40c9bcef6f56a0d5ec8a0f6576850106 +part_00750_of_04320.hdf5 335de89f2a6a6c9894d5111214cfc6bc +part_00751_of_04320.hdf5 1b3242cc6f1eac719fa1a270b2b17b84 +part_00752_of_04320.hdf5 e6229f4a02d4bbe4d2aa6f6754cf8b50 +part_00753_of_04320.hdf5 337f24ff427170daeca7bd1c55f8795f +part_00754_of_04320.hdf5 06bb4d581143cf5d01dfc2bc36b78850 +part_00755_of_04320.hdf5 b53877c206f82e66dc0a340512a6112b +part_00756_of_04320.hdf5 745c09ef9ef79e2a236860b2c1ca6ec9 +part_00757_of_04320.hdf5 805b703fcba02df8c072ad3ef3e594e8 +part_00758_of_04320.hdf5 47e001f9ac6a8d91eae07c09b630f798 +part_00759_of_04320.hdf5 6bcc35101d03e450c28e2581e7242b27 +part_00760_of_04320.hdf5 f08574aa64b0fc3e4e914da7ca3bbf9a +part_00761_of_04320.hdf5 41af4127bf1e836c0b2249b6bf0105a4 +part_00762_of_04320.hdf5 b33c462b1544bf08fd51c27341731971 +part_00763_of_04320.hdf5 70d12aa6691370fac9fe0804bdf06b98 +part_00764_of_04320.hdf5 116f599a970f58e5aa1a6657838bb01a +part_00765_of_04320.hdf5 0ed938a8a506d25a6f8fb2413b7ab5c6 +part_00766_of_04320.hdf5 64a9cfd2f0f8c1cad8ca4a15af69efe2 +part_00767_of_04320.hdf5 82f67da2c3cdfd43b5cb94c6dfc6bd6d +part_00768_of_04320.hdf5 99aec697af090419f329eeba4307380e +part_00769_of_04320.hdf5 af6b2f9badda8ee86affd45388dfbaf1 +part_00770_of_04320.hdf5 41b082822196abdf19f476fbaee4ab7a +part_00771_of_04320.hdf5 cf66a82bc40e29e80984353dbd2bc6b6 +part_00772_of_04320.hdf5 7913c7a87739092302d486ed25e90535 +part_00773_of_04320.hdf5 9639dcb1df49692b9c380b24dbef2086 +part_00774_of_04320.hdf5 1199923111bcf137d4e0bbeb74268e45 +part_00775_of_04320.hdf5 185159436d8d2de9aa4923b80fc11495 +part_00776_of_04320.hdf5 a1732767425d09b5304cc6a2c1aee1dc +part_00777_of_04320.hdf5 80864f4d5b063c149bea273a7851cf61 +part_00778_of_04320.hdf5 b4a4fba532241467fbdab78fde015cef +part_00779_of_04320.hdf5 4d3c621971cc0c89b41c6a7099080c67 +part_00780_of_04320.hdf5 addf81f140bc2b549001601f4e0a26da +part_00781_of_04320.hdf5 dd78b8c513b5ef6b8236e2f2058b5360 +part_00782_of_04320.hdf5 fbf90ae2910b67ba27db40241c00fe4a +part_00783_of_04320.hdf5 532e9dfacb860dbc2effb11b5d91242e +part_00784_of_04320.hdf5 fad9419963996014b407e2f1a1e2c401 +part_00785_of_04320.hdf5 f94ace9d7304663c68135fca8437c064 +part_00786_of_04320.hdf5 4f030d79b67a4f4a0f8073c93fcca053 +part_00787_of_04320.hdf5 38cf3af4ef51166f8feb701b45fc78dd +part_00788_of_04320.hdf5 255d7521d337438e6ec0dfa8dc7aa355 +part_00789_of_04320.hdf5 754947f8d6a2167ec57003ba63f48122 +part_00790_of_04320.hdf5 a36f2c57cea4a957df4eaa36c722effa +part_00791_of_04320.hdf5 ff733e6c254c33cf8e1ff0a3e39f0924 +part_00792_of_04320.hdf5 c7789bc42f657f7b2d29471c6784e408 +part_00793_of_04320.hdf5 d72fc67a4a39b5fd807e2b05c8b7d3f3 +part_00794_of_04320.hdf5 a4a704d4c8236adf5d84da22487d996f +part_00795_of_04320.hdf5 fad632c19231cfcdfd8b2a044d3779bc +part_00796_of_04320.hdf5 2ab09f12fe3707113cb9cf9f804ec6e8 +part_00797_of_04320.hdf5 58fd8ec15263ec49065c685be0de4471 +part_00798_of_04320.hdf5 d32ffc5f5af034c3ac19494ab6df5c93 +part_00799_of_04320.hdf5 944c87e7512933cb1684206d685c6fbd +part_00800_of_04320.hdf5 697bc5c252a3561615a2680e81beb127 +part_00801_of_04320.hdf5 d5e5d87cc5794ea03e3a5010b26284f4 +part_00802_of_04320.hdf5 0cd417a685fdc0221e8579f3abd5cf30 +part_00803_of_04320.hdf5 43d163bd576f13065201e93fa2b24761 +part_00804_of_04320.hdf5 0eb42a7ca95bd81a3e9dc93994ab97d7 +part_00805_of_04320.hdf5 0f2eef231868cf75c2f26e8aa40630a4 +part_00806_of_04320.hdf5 0e23b79ee7be1d5c84c93595f339e4e8 +part_00807_of_04320.hdf5 f8d6f54fac02dd1258940e2a5c2f5b4a +part_00808_of_04320.hdf5 809399f8266b3762016015b7fc577f28 +part_00809_of_04320.hdf5 e37be363b6568e515ddae00fb984b610 +part_00810_of_04320.hdf5 eaba436c602a580b5667dbf739228223 +part_00811_of_04320.hdf5 55b8d586cc82371a2d56ea3779d4a7aa +part_00812_of_04320.hdf5 8ae3eb688b93d74f48fc0103505c7277 +part_00813_of_04320.hdf5 61a28da88c1192996ca273947ad005c2 +part_00814_of_04320.hdf5 1e7bee9132ec7740464ddf029de2348e +part_00815_of_04320.hdf5 8e0c758459956fe5612a42304cd158e0 +part_00816_of_04320.hdf5 8cb46b4499158faa94d1d5b2391d13f8 +part_00817_of_04320.hdf5 319d258757681472f60df5ead9896533 +part_00818_of_04320.hdf5 cc983a10727ba9bf9a4a96a507e17400 +part_00819_of_04320.hdf5 33aaa7a990f0bf7aefe5dcd4bddc71e0 +part_00820_of_04320.hdf5 5529c5e914a16ef7b45e86cb532a4eee +part_00821_of_04320.hdf5 cd34c5754950615eb27d6ded5b7f711e +part_00822_of_04320.hdf5 7d47e58339e2c0e395c473a21563b183 +part_00823_of_04320.hdf5 9b3ad986d3fc4572a7d6c959abee36ec +part_00824_of_04320.hdf5 e6e2ca244f035552298e178baf9e6ed3 +part_00825_of_04320.hdf5 f73d01d3da168831bd73cfe1322b1315 +part_00826_of_04320.hdf5 d468f3aa685a36d02d3397823e93bdfc +part_00827_of_04320.hdf5 88b2f643d93ab8417d35fc6d81c1193f +part_00828_of_04320.hdf5 d07afc929e688e1d1be8bd27fa28fb6d +part_00829_of_04320.hdf5 40dd5aaeb08678f314abcd340693f030 +part_00830_of_04320.hdf5 4d6990547ad6814d36c6eff98ac32e01 +part_00831_of_04320.hdf5 c9d00b97d3b03d9d2c7547d5fd1613e1 +part_00832_of_04320.hdf5 830b547448070a7d10c9a98c0e883c94 +part_00833_of_04320.hdf5 933b603ce1f736c87859f7da284a5859 +part_00834_of_04320.hdf5 f86112eae423d4fdfdee5f17cccad122 +part_00835_of_04320.hdf5 5f93ac00320beb936085df3d33003af5 +part_00836_of_04320.hdf5 539a9f08e878ce9a07d2d26d99074ce0 +part_00837_of_04320.hdf5 b245aca62b7498527228288155ee2c21 +part_00838_of_04320.hdf5 8fb3b842fdcf3755a93227932609d023 +part_00839_of_04320.hdf5 ceb7c8dfd243426e367c2870a031e718 +part_00840_of_04320.hdf5 0d50b9bdfe056e164fa769fcdeae98e3 +part_00841_of_04320.hdf5 3ea3d110d418b66799aa60c004681581 +part_00842_of_04320.hdf5 3c942ea58276616b76f29bf955a71f35 +part_00843_of_04320.hdf5 2d587d87d62b1dc6d1d250b784eba52c +part_00844_of_04320.hdf5 89d98a8e489c54fb902a73a167d6908b +part_00845_of_04320.hdf5 9128230b69b774c8c2373eba88c15fdc +part_00846_of_04320.hdf5 decde8d2c9454c6bc4c3063b52939d0e +part_00847_of_04320.hdf5 19fbb682429da0b88b13ccb234d18d21 +part_00848_of_04320.hdf5 d572618c3d9b0e83e6ec815ed5a4378e +part_00849_of_04320.hdf5 cc7439013cd14863ee0d832e58b0db4e +part_00850_of_04320.hdf5 73efccd65bc897249efb0d2c44c211ea +part_00851_of_04320.hdf5 ca7aa8fb2885e4f78cc624b57c5ea393 +part_00852_of_04320.hdf5 123beac6769e7a054360d6403aa6c519 +part_00853_of_04320.hdf5 18c2f0b9aff8b12fd822a01428de1345 +part_00854_of_04320.hdf5 ca1d3e8b63e1171c72be171b8c06b228 +part_00855_of_04320.hdf5 7d7f64253c4c0b507c1df4a3a2059513 +part_00856_of_04320.hdf5 7ac2020676962e6aca14240586eda2a4 +part_00857_of_04320.hdf5 563d037bf332e24aef9c0df81417b693 +part_00858_of_04320.hdf5 7db41c721bb1d265a863a50020f945a2 +part_00859_of_04320.hdf5 1a3f31b8090f3d29582c9846f78b988c +part_00860_of_04320.hdf5 1e8f87465dac6725bf186af885329888 +part_00861_of_04320.hdf5 c35beac966ba6458055119e6fbc90daf +part_00862_of_04320.hdf5 d2b6a81bc8f310dfd2531f274d2c40f3 +part_00863_of_04320.hdf5 fb70aa3c4d8fd5d6fcd686a468611e67 +part_00864_of_04320.hdf5 ba5565a33f51f207eb360d9c52eaf17b +part_00865_of_04320.hdf5 2bfbaada52b976522986d0045c753ffc +part_00866_of_04320.hdf5 44aa8f11925428d90cf7329adf8d6395 +part_00867_of_04320.hdf5 fd616cc32a942e2fc4060bdcc3a4c339 +part_00868_of_04320.hdf5 e9b8146831a44a229c01a9ca89a700d9 +part_00869_of_04320.hdf5 bc29ee8b303447c61dccfdbad52c5b4c +part_00870_of_04320.hdf5 6f11281fbf38e87a3b200a807a1f8153 +part_00871_of_04320.hdf5 13691b04b5f60f6434e202156a286ef5 +part_00872_of_04320.hdf5 295fa49838d96f4ed93b6063d774cfa7 +part_00873_of_04320.hdf5 ff1413ca094ca25b542f8c388aa2b9ac +part_00874_of_04320.hdf5 639287d199b474c12db70eb12f2446b7 +part_00875_of_04320.hdf5 be4cdfc73f5f898f86eb9d0c0ed71271 +part_00876_of_04320.hdf5 6479f64e72ce5b2684e4baec12e9de6e +part_00877_of_04320.hdf5 9679db69ddc1c1120aee78a4e2efdca6 +part_00878_of_04320.hdf5 9816dfd158260d9dafe00370c0154813 +part_00879_of_04320.hdf5 4ee63960479cc95315f333f582d02a4b +part_00880_of_04320.hdf5 d85dde0a3ec79c3f56158ea0ffe44297 +part_00881_of_04320.hdf5 900ab876ced9ca235c40da1621159b87 +part_00882_of_04320.hdf5 88a0ad5528303e2465c232f4580dce5e +part_00883_of_04320.hdf5 19eb6859e7a3cc2a465696f7433479d2 +part_00884_of_04320.hdf5 559eafa6903ddaa231101b8c1e501733 +part_00885_of_04320.hdf5 83101080b206703883e1d00534addc8f +part_00886_of_04320.hdf5 349b4d173911eeba923a9ebcc8d61f05 +part_00887_of_04320.hdf5 d93d9b70b640818c77db618afe4b1825 +part_00888_of_04320.hdf5 a0103bf2f2321d37302c6fa59f1b11d6 +part_00889_of_04320.hdf5 1cb035f78f2b68721595c62e5c79ebd7 +part_00890_of_04320.hdf5 df78f7dceff200ae0ea48adf9078327d +part_00891_of_04320.hdf5 5e6192da103fd3df4f66ad75c1859f1c +part_00892_of_04320.hdf5 e7e89df3e4b37d42a3f10cf3f3c8339b +part_00893_of_04320.hdf5 97b95b30885f1307b483b04cde3a7870 +part_00894_of_04320.hdf5 bb24dc8f97cc3581d13d69fc57881ed9 +part_00895_of_04320.hdf5 855b0ba8051727a5928058268c530e42 +part_00896_of_04320.hdf5 09e8dd2c8ea004a0b4e8954b4f0012fd +part_00897_of_04320.hdf5 554c0d6745f96cb5639581357bf57cd9 +part_00898_of_04320.hdf5 a2fa0e671aadda006324f168678e0295 +part_00899_of_04320.hdf5 d9b7c5f28dbfc20c3ef9cd8f8d75ff18 +part_00900_of_04320.hdf5 a3edb0152fe5aefe0bf3be202ee68d27 +part_00901_of_04320.hdf5 d74ec53e571d0a8e2b4cfad454c0bbc9 +part_00902_of_04320.hdf5 d1ea4c2d4bc9ae24088996b371cb7995 +part_00903_of_04320.hdf5 c118c73c97729fd78ee1c8c60f3b9535 +part_00904_of_04320.hdf5 d825ac943d3cb9f2d9bd17aec89b3f91 +part_00905_of_04320.hdf5 acf4fffda118b0fdc8fdff91cda8d7d6 +part_00906_of_04320.hdf5 147501f16d7e2f10d235daf28c223ebb +part_00907_of_04320.hdf5 8378c2a3d4d490500f1af7651db4b4bf +part_00908_of_04320.hdf5 eb0e6e10258d513733100d05172eb7ba +part_00909_of_04320.hdf5 0e79b33191826743b20e192d7516b66d +part_00910_of_04320.hdf5 2f1a664fdd3cdda3115a783a852793aa +part_00911_of_04320.hdf5 27c0a8ab213b6c8f1b6f7ae8fece0f09 +part_00912_of_04320.hdf5 7c7049f39814004167abe5042226fdf1 +part_00913_of_04320.hdf5 ae23b7ce4d3c7e3f61efaa1bd00d6310 +part_00914_of_04320.hdf5 6b57fd5469d49d3016ddc2538c8bbb30 +part_00915_of_04320.hdf5 88ae0ccb93eda7622631a3b470e46185 +part_00916_of_04320.hdf5 1ad6cd3e41c0f80cf808d027e4fde955 +part_00917_of_04320.hdf5 c6ab69cd3c2f8f0749ad88effb773fae +part_00918_of_04320.hdf5 418d65b422880041b629665e6dc8a9cd +part_00919_of_04320.hdf5 d5d43198e3f96438fb064bb2b3e03df0 +part_00920_of_04320.hdf5 f808803cf9917fc8474cf6836f292074 +part_00921_of_04320.hdf5 b0ecbb36f40f9ab173a077583109b605 +part_00922_of_04320.hdf5 46bd5e699249e9847bff045ea4376593 +part_00923_of_04320.hdf5 343e419ee6a8adbca077d00d40f4b36c +part_00924_of_04320.hdf5 926db74c3f999160546b79e2cc9deb83 +part_00925_of_04320.hdf5 f411a88990b497202a0902d6043847d4 +part_00926_of_04320.hdf5 6709c8f52183763733ca5307c0100b41 +part_00927_of_04320.hdf5 999b7704337cfbe1d00292057ab9ab8a +part_00928_of_04320.hdf5 7d7c68d7225d6767cc95190057f910be +part_00929_of_04320.hdf5 6a2dc1305ba0fc974554660ba47bcfe7 +part_00930_of_04320.hdf5 1739b7f094119fefc064d807d6071c03 +part_00931_of_04320.hdf5 92ff162afd464ae51f1d005f4283415c +part_00932_of_04320.hdf5 79a5a07cf2d00ed0d80575925bb9bf75 +part_00933_of_04320.hdf5 0a0e8a4569335f08f997a0738fced927 +part_00934_of_04320.hdf5 0e5883cbae53b3269a6523d6e939ac39 +part_00935_of_04320.hdf5 7b1524f1bc32f27ed4ad7f7fa79e1c5b +part_00936_of_04320.hdf5 8b38ca4734a518f82d3d81d3055b13bd +part_00937_of_04320.hdf5 7ed7ebe9bc1d681651a23d8ddf0bc9fd +part_00938_of_04320.hdf5 c9352ca11e89ef2c2e75e8294b7aa989 +part_00939_of_04320.hdf5 bb1ad5890e53b847d21ebb882532d166 +part_00940_of_04320.hdf5 c8db380c45c4c674edbe1509fb043f7c +part_00941_of_04320.hdf5 35f0c0056dc7c78253a8ca2f84b42c6a +part_00942_of_04320.hdf5 de5d33ede6919ff02cdc14eafbfc84e2 +part_00943_of_04320.hdf5 987c2c92636c43d81a4a52891add3127 +part_00944_of_04320.hdf5 7fcb30c373192b89580476076d8cde47 +part_00945_of_04320.hdf5 0ea360badeed59dce33a9f109e07ab17 +part_00946_of_04320.hdf5 e522785516da1302a84985ff078cf8c9 +part_00947_of_04320.hdf5 8d6bc17dd777bf550f30e10b05ce018a +part_00948_of_04320.hdf5 dcfa28e5b201fcf3e62b30e834791791 +part_00949_of_04320.hdf5 ebdba0695a9a849a391a2c1e5af8ae1c +part_00950_of_04320.hdf5 6aa7029c67f23410e7879e5d0c84d1f0 +part_00951_of_04320.hdf5 55570c973b6e8670aaf58706930b01d4 +part_00952_of_04320.hdf5 2597951926fc1889978ed49fe5cd595c +part_00953_of_04320.hdf5 a6b03d6273e1e8cdd5979c006a9dbc93 +part_00954_of_04320.hdf5 562be67295c3bc44b92e4ff6903d63a6 +part_00955_of_04320.hdf5 0e3de11ddf158c51125d9553e8d7d337 +part_00956_of_04320.hdf5 8351cf69c8b7e82a1c5982aeebdfe30d +part_00957_of_04320.hdf5 067a8b2321a492bd8464c7759577e135 +part_00958_of_04320.hdf5 47967fedd91a4753e9e84140fef833b5 +part_00959_of_04320.hdf5 d1cf6d8948e38fc10e044a5f460bb151 +part_00960_of_04320.hdf5 36fbb2bd5de97adc90944914c19164a3 +part_00961_of_04320.hdf5 5f6c4a5ce9e2683c3ba8286d105e4748 +part_00962_of_04320.hdf5 1b094b59de5822766ced1fa2b2cbf9c2 +part_00963_of_04320.hdf5 e04c754abe585e988ecd58d2b0922808 +part_00964_of_04320.hdf5 11dd868f4581ce478d3756e475027fde +part_00965_of_04320.hdf5 a44bbbbb5ee1e9d6a3fcec9c5dcee6c9 +part_00966_of_04320.hdf5 f9750d9d75a5634e6becf1679beade53 +part_00967_of_04320.hdf5 85b7aa6792e1b7d4c549e1c0a145eedc +part_00968_of_04320.hdf5 1ec78741228f067ee61660d40851dcbf +part_00969_of_04320.hdf5 d565fdae68df964e6908e7fa5acfccc8 +part_00970_of_04320.hdf5 1e2d4a11e635adfe340be16cfba5813e +part_00971_of_04320.hdf5 ba35f4af7895a55dfb10c161f579bfba +part_00972_of_04320.hdf5 42984e5fdcaead16386b50eefbaacd0b +part_00973_of_04320.hdf5 73f536d56d7e63ab047410139e8d26a6 +part_00974_of_04320.hdf5 8288e606d03ad1b028d7b87d736218fb +part_00975_of_04320.hdf5 f0c2a0f7d216cb24e7bfd940d0dd904e +part_00976_of_04320.hdf5 233adcd6e887ac1ac21a8f7835c570e8 +part_00977_of_04320.hdf5 ff73c44a114545db6ce855571dbd8665 +part_00978_of_04320.hdf5 9bc892248a6ba5b0b8e8ab688b39027e +part_00979_of_04320.hdf5 e742b14ce2bbd9c7687551303bb22af1 +part_00980_of_04320.hdf5 af6659b5a4e6dc8e38f816b55f219c6c +part_00981_of_04320.hdf5 f5d8b67ae66d0dffdb4cc1cbb3e296fc +part_00982_of_04320.hdf5 6cbad0cd67088ea1ce54250a8a41cef4 +part_00983_of_04320.hdf5 f6800747b1619d4ecd91ded13f1c896f +part_00984_of_04320.hdf5 ebf95bab051bcb23d7c8d84ce2d43b8f +part_00985_of_04320.hdf5 cd00ac552dd48c67d4b16e36364dc429 +part_00986_of_04320.hdf5 c6fed79f63d7623e1defa8772ba605e1 +part_00987_of_04320.hdf5 c79d54d15860791329f63191e55da038 +part_00988_of_04320.hdf5 632def306f888f5d0207ae5a0ee053e0 +part_00989_of_04320.hdf5 8b481305f2ce47afc4198a1a582435c3 +part_00990_of_04320.hdf5 e88c9c8b0e1e6afa571abccdb7fb9d12 +part_00991_of_04320.hdf5 5ed8ba2b577fe0fa3b03d03e60b906b6 +part_00992_of_04320.hdf5 c43368a3efb658fba443833607e33829 +part_00993_of_04320.hdf5 ce2b2a5104f490390db3d7addd80c721 +part_00994_of_04320.hdf5 869fe89f038bb7c9b92063f1c331a361 +part_00995_of_04320.hdf5 cb49aca0b02f740f0473b2e2705e9e79 +part_00996_of_04320.hdf5 7f449fd8f460efc3b9aa56185f6703fb +part_00997_of_04320.hdf5 0f4f54f555f65c53784c1895c41693e6 +part_00998_of_04320.hdf5 4f4e9216b54cbb137e13e4dc08028114 +part_00999_of_04320.hdf5 07d34965687e0ffd7b602c2670e7b227 +part_01000_of_04320.hdf5 460827e5d17caa5882d966a3922fbe33 +part_01001_of_04320.hdf5 ea527245b662453e2d7c5df74f28741c +part_01002_of_04320.hdf5 cf8789c135108960ab334f76ea9a18db +part_01003_of_04320.hdf5 96bec2674f25fc99379cd1a692b8d5fd +part_01004_of_04320.hdf5 74ee9279d2a663207b0a439c5a210d45 +part_01005_of_04320.hdf5 d516a76a35a5a329c7d6d201f8d9ed5c +part_01006_of_04320.hdf5 276a716ca47ddb752e1b84e0f7a54099 +part_01007_of_04320.hdf5 e2efeec204685be601b586ec7a336906 +part_01008_of_04320.hdf5 a48df2ae27a81444cb931a2bd6d1dc20 +part_01009_of_04320.hdf5 0bda09bb08687aeb05925ddfb2bc0745 +part_01010_of_04320.hdf5 4982571a2e3707d1ae47b4012d3aea1d +part_01011_of_04320.hdf5 7c386d1f9394dbd6139ab5cce5bfd800 +part_01012_of_04320.hdf5 7ee1467b3f41941a755aa214441bec14 +part_01013_of_04320.hdf5 f65bf5dcb89cd1d7395fe89db0aa5d53 +part_01014_of_04320.hdf5 fa35e52236ff10dd815dbfd0c7a9add6 +part_01015_of_04320.hdf5 8e5c2ac67c6653e86ac745ee3d078bce +part_01016_of_04320.hdf5 9f8b404fde96053654f44ed8382166f2 +part_01017_of_04320.hdf5 d9005bfd2abd687f8b75838c7571d173 +part_01018_of_04320.hdf5 b52906fd056295594d83500ef44a47f1 +part_01019_of_04320.hdf5 c2920c4db541cb7a35c4dda99afa9961 +part_01020_of_04320.hdf5 f704d7ad068b3590fb694a99bff4ab4a +part_01021_of_04320.hdf5 62e912ce6d9e1699040192207389f996 +part_01022_of_04320.hdf5 241221377669c0b687bed22f48bc575e +part_01023_of_04320.hdf5 7ef62f58c546ffd20144db1a38d75834 +part_01024_of_04320.hdf5 e6f4c50bb2c4d92376d4cf930f795883 +part_01025_of_04320.hdf5 3059cf351918dd07e6b6c4dd4c9033f8 +part_01026_of_04320.hdf5 41d49050f3a086f79734c83e021b21da +part_01027_of_04320.hdf5 ab39979ab8bd6d5b695630e9585bb5af +part_01028_of_04320.hdf5 e4bd51de168d480553e9664513ca7703 +part_01029_of_04320.hdf5 4f116af4bbc9c1553d411ede91746f70 +part_01030_of_04320.hdf5 daf7c581749f90870611ff1d9d2268e3 +part_01031_of_04320.hdf5 5cb5d1e1dab1a68b65bab4c7c544c37e +part_01032_of_04320.hdf5 82b0b2fde79dbcf2a0f71ecd34d8bafd +part_01033_of_04320.hdf5 2f444a3b6a552cd69c0ac65e10473439 +part_01034_of_04320.hdf5 83b9c09ebc89dc657e4d6919f3673b2c +part_01035_of_04320.hdf5 f7fced7f614b743b46501168c101a2a7 +part_01036_of_04320.hdf5 13942b6570e4a3559ad848009fcc7265 +part_01037_of_04320.hdf5 a1ba60cae91c6f28fbc62f1255921243 +part_01038_of_04320.hdf5 9f92190b67e3921a5cb44e5e83b5c338 +part_01039_of_04320.hdf5 961dbf75e61ed90050fd442a68aab3d8 +part_01040_of_04320.hdf5 fb37e4cbb7e2ab7a4d58f43c780253cd +part_01041_of_04320.hdf5 bca49086ba4dae83e763042f7339eb0b +part_01042_of_04320.hdf5 fb8553dca361c5a120c4e7c5bacc0e5e +part_01043_of_04320.hdf5 606fb08b1a86038593f334e0a8984e7b +part_01044_of_04320.hdf5 878bd5aae6597cd77c7297b524715d8e +part_01045_of_04320.hdf5 b51e4b57ce4bad51d444ead8af593c9a +part_01046_of_04320.hdf5 34db83eae28d0f0ceaeb0267c48f9a99 +part_01047_of_04320.hdf5 9d868c2f07c44f3b93041ab4cd023f56 +part_01048_of_04320.hdf5 3c109ee395d67c1180f36b91d2ca37db +part_01049_of_04320.hdf5 3be8ad1c8a115856c6af74a0625175cd +part_01050_of_04320.hdf5 b469277b29eb39b3df3aff496064558c +part_01051_of_04320.hdf5 a68229ee9f4ad522548060e9aed387bc +part_01052_of_04320.hdf5 9dfb44141c7a0798ecb3ba19d4e276b1 +part_01053_of_04320.hdf5 c3bfec974c2f0faf45521cb848549629 +part_01054_of_04320.hdf5 c6a3ee4218cea2d3f7f8fad39947740e +part_01055_of_04320.hdf5 17f4e7a7a83156cb7a4462492f445bb3 +part_01056_of_04320.hdf5 f94f0d72a00c8ca3b38c6c235a700d51 +part_01057_of_04320.hdf5 d09a647e0eed9b621aed0d58d7c4ce4a +part_01058_of_04320.hdf5 1413d7f9328a4e43a2ca2c7022d93947 +part_01059_of_04320.hdf5 1460ca4e5a9dd54f2a3095421fa38860 +part_01060_of_04320.hdf5 cf46f5f1a94b35d2504b75c5449e5699 +part_01061_of_04320.hdf5 e560750171a34d5488730812f64c85b3 +part_01062_of_04320.hdf5 b52034d5f90406bd035889b90172343f +part_01063_of_04320.hdf5 95a687321eaa41d5726e3e5d5b56cbf8 +part_01064_of_04320.hdf5 80a7b22750e0a3fa89f72079c6a4b828 +part_01065_of_04320.hdf5 023fb58e27e4a3b396d439ca46bb2706 +part_01066_of_04320.hdf5 fd5e7c80279e8c1200ab620855beceea +part_01067_of_04320.hdf5 7e993b14ae4a290d84b627a35e0f8b3e +part_01068_of_04320.hdf5 64b62ff047d2d95c508c5745b69e5bdc +part_01069_of_04320.hdf5 8797ca8b60e2b4ebdf59d094504d1021 +part_01070_of_04320.hdf5 9752fb077f83106627471525b5ecb774 +part_01071_of_04320.hdf5 2a7a3245ae8703362de261e47f588e40 +part_01072_of_04320.hdf5 a135371373e22eb1581cd21b46afe141 +part_01073_of_04320.hdf5 40e20a2fc21c404f9a927ce9149be58e +part_01074_of_04320.hdf5 9edf70f827be22d23dcdcb85668b12fe +part_01075_of_04320.hdf5 4c8a57d917914e60ee7bde85c7b7deda +part_01076_of_04320.hdf5 9b506149c16961cef15207ddb522fccb +part_01077_of_04320.hdf5 6c28d0008c745a0277f4358f475c14d6 +part_01078_of_04320.hdf5 b44384aa983db24c9cf5315116d32f8d +part_01079_of_04320.hdf5 9d219cb12577b539e300bf85c97c39b0 +part_01080_of_04320.hdf5 6ce75a30a1d6d4939d5217c450874f2f +part_01081_of_04320.hdf5 79ae2814531282ade91e59afdefe511a +part_01082_of_04320.hdf5 e854798bb6c101e993e7e732798eff5e +part_01083_of_04320.hdf5 3bc211b17eb8b8509eb0ca33f770c219 +part_01084_of_04320.hdf5 2c83654bf1104a58c621872242f82246 +part_01085_of_04320.hdf5 f723c38be634ad6e5a2c4bd8a3ae7e22 +part_01086_of_04320.hdf5 6a2386759b187d39409d4a33b28f5791 +part_01087_of_04320.hdf5 1ea61306b2e1c86ec9513ca81731c21e +part_01088_of_04320.hdf5 871ca4ee8d420fe6dfe6b74051b305d0 +part_01089_of_04320.hdf5 161bf18832583e6a34e1d44b5d619c8b +part_01090_of_04320.hdf5 e1559f226beb321fc485ebeed9ddecf6 +part_01091_of_04320.hdf5 1e33e627e476e15c73e5d11406b8475b +part_01092_of_04320.hdf5 066e1a5a79562ce4d4a22b662e82c0e3 +part_01093_of_04320.hdf5 a2873e1eb648f1bd50edce8e8f709771 +part_01094_of_04320.hdf5 4bec36059613148107ac1b08036528a6 +part_01095_of_04320.hdf5 0129069064371886c19109fbf13ebcb0 +part_01096_of_04320.hdf5 16fc7880068a309b6f8cee06f8dce6a9 +part_01097_of_04320.hdf5 d2bc7cfad7331fc652665dd93f9b5fa9 +part_01098_of_04320.hdf5 70e51d573370bd242226e5bd19cce6a8 +part_01099_of_04320.hdf5 f8116c743284d7731929a8b04edad50b +part_01100_of_04320.hdf5 13e6a7b802eba443eda5f27787800eb0 +part_01101_of_04320.hdf5 64e14adcedacd022b8fdf146a669b5cf +part_01102_of_04320.hdf5 2bdec2ab70b11f8bf2a8cb62d6ffa957 +part_01103_of_04320.hdf5 bf7af7758ab9ec3ae97cc4df5b86f186 +part_01104_of_04320.hdf5 abf6ff015a522058381d3095503a1909 +part_01105_of_04320.hdf5 5c040b58a3f149be06209fad89d9e35d +part_01106_of_04320.hdf5 663ec7b3e8655587581aa16c4fd2c728 +part_01107_of_04320.hdf5 8706a58892ba8a17c7c5ae7a5faf30e8 +part_01108_of_04320.hdf5 af582bcb0231dbad58693fa4bc7403b3 +part_01109_of_04320.hdf5 00ba7dca8cc22f8e0414758735fbb879 +part_01110_of_04320.hdf5 7d3f719abcd310c8db7fa44b929ce919 +part_01111_of_04320.hdf5 3a5bc499fdec50e89f8c056b7cdfa4b1 +part_01112_of_04320.hdf5 40571dbc6b101cfd4437ff3903309dd9 +part_01113_of_04320.hdf5 8c62ee563c6d96426d453319f475e50a +part_01114_of_04320.hdf5 15cfd6ab7e66645f939950f058014e82 +part_01115_of_04320.hdf5 ff1efa672428f1ed1cacd9b835d3ad69 +part_01116_of_04320.hdf5 ce7c10f80ed1db8ca27bb0e0d998b7d3 +part_01117_of_04320.hdf5 52fef4fa8f16b792aa965fc01bb50621 +part_01118_of_04320.hdf5 6fc019ce8bac48ca649833d34852cc82 +part_01119_of_04320.hdf5 e3c8b026d383ce466f61bee12e13b9dd +part_01120_of_04320.hdf5 f0d67acdf8a9ffd80398975deb6e41f2 +part_01121_of_04320.hdf5 dbcb14aa4e787222cac9393d7360c169 +part_01122_of_04320.hdf5 280c31f0470c3ec17e207da4f0758a71 +part_01123_of_04320.hdf5 0170df810fff2a2fb3b1f7abc18ea3f3 +part_01124_of_04320.hdf5 8da572c1c15a7c6cccbae8b0b03d8904 +part_01125_of_04320.hdf5 e1b0810b91a0e6418381b3ae571147cd +part_01126_of_04320.hdf5 88608cdda66a2b51b4dafed8b3affec8 +part_01127_of_04320.hdf5 cfb79debe800970690ce861e6b4e7092 +part_01128_of_04320.hdf5 345d2b1cfa1a3a392911f79f3a562059 +part_01129_of_04320.hdf5 3f315b7c27e0f053a71b090dc9b0e18e +part_01130_of_04320.hdf5 389c57946c45e2971af4b9896c6b553d +part_01131_of_04320.hdf5 619f6168c255c428acbf7e60e47d16a2 +part_01132_of_04320.hdf5 1a5d20a90bbe3389262bd1c3facf0bb4 +part_01133_of_04320.hdf5 48d012079e36a6b8534bfe95969c35d9 +part_01134_of_04320.hdf5 fab745f3666aeb65071c99f152bea1fc +part_01135_of_04320.hdf5 33038614ea35ef33ec4f7ca5dbb9f928 +part_01136_of_04320.hdf5 2a3a6d5396df3dab4569eef320ec2c10 +part_01137_of_04320.hdf5 b28744f603b872586a86541ac605103b +part_01138_of_04320.hdf5 49df8ae938d8d81d137df02dad5c870b +part_01139_of_04320.hdf5 e8514416eddac75d44c261bdf7c02443 +part_01140_of_04320.hdf5 52b2c814d6abd99f5890b3c35864dfd0 +part_01141_of_04320.hdf5 438701aea8f11828d7387093e101475b +part_01142_of_04320.hdf5 5881dcc168acfcf3b9bdfbacc2be26c9 +part_01143_of_04320.hdf5 02a2a42c4ee4e61cedb9c5add1e5cac7 +part_01144_of_04320.hdf5 37236907fcd1847933ccd64e414f3795 +part_01145_of_04320.hdf5 a192d0e885bc86a2887e8879c865aa73 +part_01146_of_04320.hdf5 ce6682fdd513b63c90e8eb5e88830fae +part_01147_of_04320.hdf5 8a8e9ee8faf0e9e906d8d96b9f41116e +part_01148_of_04320.hdf5 6ea92025be4e28ba64b5904d33410fab +part_01149_of_04320.hdf5 5173e0fb9fc3715e19a02f261b1de4d8 +part_01150_of_04320.hdf5 baaa919ae93da4185bd49fbab5d3ae5c +part_01151_of_04320.hdf5 a1be43556d251d276909e94708af5c74 +part_01152_of_04320.hdf5 c6fa74e647ccb2fa48f557394360432b +part_01153_of_04320.hdf5 6830d862cf9e67655ce82f053963a72c +part_01154_of_04320.hdf5 588bac2e29df1d917416ff99bf85fc3c +part_01155_of_04320.hdf5 2a88a10f5bf22e67006f3cd67d5094e1 +part_01156_of_04320.hdf5 5e59f00271b750b50e20e6926dabb208 +part_01157_of_04320.hdf5 a76644c1940f430d8d927355eec79dc2 +part_01158_of_04320.hdf5 a595e028b432483d33be7bc8e96a1f0d +part_01159_of_04320.hdf5 79089d6dce500cb290d9707bd45cf6ed +part_01160_of_04320.hdf5 29d13e55435f6a88ebe971a985a4e158 +part_01161_of_04320.hdf5 f30ee88ac525c95bb3c1fafbd9ae3157 +part_01162_of_04320.hdf5 44305750ca4df6714ac445dc84ec94f2 +part_01163_of_04320.hdf5 154d1ddfee9b19611626d89752e6bdd5 +part_01164_of_04320.hdf5 2479c182818084959faf843eacef0af5 +part_01165_of_04320.hdf5 bc0538df3f915764cb46b744b8562664 +part_01166_of_04320.hdf5 3e7870ff41028f7b5533869aac18de2c +part_01167_of_04320.hdf5 99f7729418679d36797cac8671223b5f +part_01168_of_04320.hdf5 4b789037323d6a986af59e7b7c013466 +part_01169_of_04320.hdf5 5e685dd4a22e2ed65d7929a671dfa2f3 +part_01170_of_04320.hdf5 2a328dadd93003539c87e832846f7cb9 +part_01171_of_04320.hdf5 0457b5c9e4efadda98b7a5306126dc60 +part_01172_of_04320.hdf5 f61ea594632319f3ae5b81ef685961c7 +part_01173_of_04320.hdf5 ee8da336b8aab0e001316e46381fd7da +part_01174_of_04320.hdf5 e07ee692a5837fb18e3ddefac057e228 +part_01175_of_04320.hdf5 c94245de5b3f8e5e379f0838cc688e1e +part_01176_of_04320.hdf5 4735243201fe0cb3dc78163788cca193 +part_01177_of_04320.hdf5 23c764b11ea2845011ee8a87bdf1855a +part_01178_of_04320.hdf5 358404d78db4cbc9b1f76e04bec5d473 +part_01179_of_04320.hdf5 fb88350de22c4d4a416284fab02743ae +part_01180_of_04320.hdf5 748e6c44d328f32098a2fe6320a199f2 +part_01181_of_04320.hdf5 25d2ec2ce05fa87a2563007db0bf93c3 +part_01182_of_04320.hdf5 0c5228e38e71083c468d40cf0f41c015 +part_01183_of_04320.hdf5 45a071f99b933b9efb2d7156b260711a +part_01184_of_04320.hdf5 242ca7816dde0474a383f23ffde78519 +part_01185_of_04320.hdf5 3ebb4d0c1a8bbf034248c71118cca1a9 +part_01186_of_04320.hdf5 231103ab764ebda00c2b784b39e6e00f +part_01187_of_04320.hdf5 afd95609d4ae7c8a7181f2d0ee7bdc57 +part_01188_of_04320.hdf5 98b4ae2e0a641bd863bf3d140f092f1f +part_01189_of_04320.hdf5 5e632f7d102ad706af9b43ef4087d47e +part_01190_of_04320.hdf5 f4f5cf64a88394d38cab5e7d15f9a3d7 +part_01191_of_04320.hdf5 10bb8163b210bfbc9248f89717415ce6 +part_01192_of_04320.hdf5 346c08449b10c43d9f8658bbf3d8c7e2 +part_01193_of_04320.hdf5 a6a29727e3d5cd5fc7f99c3e77a5157c +part_01194_of_04320.hdf5 dc29b9828db085b38cf40ed513672932 +part_01195_of_04320.hdf5 8f7dda7a010a94a294cf93cf89016d6c +part_01196_of_04320.hdf5 09be3baa9b697878d71b839dbfb513fd +part_01197_of_04320.hdf5 8f40b3e2650a59eb5ba282ef543a543f +part_01198_of_04320.hdf5 71640bfb0e7292df0b3395cc2fc8dfbd +part_01199_of_04320.hdf5 4bff89c663c4bdd995ee5c8503371ac8 +part_01200_of_04320.hdf5 35978eedcefbe0e5603f0542cfebfc00 +part_01201_of_04320.hdf5 f1cbcde381008aea43f4b2cf5293af9e +part_01202_of_04320.hdf5 35263c14792f78614e1c7340a1859d9a +part_01203_of_04320.hdf5 7cebf2a64d9d241b03764e0a15be7436 +part_01204_of_04320.hdf5 d622c8ce07f8c0660d06a974fe78f60e +part_01205_of_04320.hdf5 6a9cc3f0f92b7f703bae6f6cb01cdec5 +part_01206_of_04320.hdf5 3f1de6eedbb8e9b179f37d6fc3fcd0f9 +part_01207_of_04320.hdf5 121b5cbcf10054b1b30f2902adf49ad8 +part_01208_of_04320.hdf5 cad3861a46be9bdbdc5ba5515648c3e8 +part_01209_of_04320.hdf5 09663f71df41ae7163c2be495644dfb5 +part_01210_of_04320.hdf5 61e769ee1f2efb25ff2da4e5afc2115a +part_01211_of_04320.hdf5 63c5838f32212024e58ffa7d9402537f +part_01212_of_04320.hdf5 da61b18771ac0ebc1eb3beeab6898339 +part_01213_of_04320.hdf5 29632db44eff98310c3c0ca8fb60a128 +part_01214_of_04320.hdf5 9ad4e4c3c77f16f0ec9020136b92b3da +part_01215_of_04320.hdf5 068265dbb6d8d659c9000ff137e7e05f +part_01216_of_04320.hdf5 6185148e262aa5476c3c43fa60a40fde +part_01217_of_04320.hdf5 cda88a0b516f228929b7454e154ef49c +part_01218_of_04320.hdf5 6bc7a9090fa33adc99408d5dff0f3ff0 +part_01219_of_04320.hdf5 523931623ffd8763b7d8d896de3ada7b +part_01220_of_04320.hdf5 3ca9b673db742d21bf113289db556bc4 +part_01221_of_04320.hdf5 b40778e32fda838483039fa394f9b8ec +part_01222_of_04320.hdf5 c154ca831b48a2c455aba2bfe071882e +part_01223_of_04320.hdf5 e9b3101e2654890ac536905af9eb3a10 +part_01224_of_04320.hdf5 7ac33959202522acd442266fc4113e5b +part_01225_of_04320.hdf5 744a59bf5a71b23ca0e8f43f4f1d6cac +part_01226_of_04320.hdf5 02c1f833379f8d2bf40127029c9547e6 +part_01227_of_04320.hdf5 1b83ab24694855461ecd1b5870adc082 +part_01228_of_04320.hdf5 09350f6fcb48e8cbc94a7658afea43f6 +part_01229_of_04320.hdf5 2b56d953e6effcb71f60be0f06397fb1 +part_01230_of_04320.hdf5 ff9ef793d4682d23fd1884c0b5949b2d +part_01231_of_04320.hdf5 f0432bba11a326c5a22e73823388cdef +part_01232_of_04320.hdf5 91662035df6250fd075934e9b8892b45 +part_01233_of_04320.hdf5 d6dd0adbb66dd8bee27de85f063103b9 +part_01234_of_04320.hdf5 bcee979ee780e895a4fba6864bae3ed8 +part_01235_of_04320.hdf5 664bbda5fc84ae026326bf543c141c55 +part_01236_of_04320.hdf5 396426bf4f22c8e27006dc2459119e16 +part_01237_of_04320.hdf5 42655d5e19c82703c8988bbc2289678b +part_01238_of_04320.hdf5 73d385297009e9db7d60bf37d1d7af28 +part_01239_of_04320.hdf5 a5c5bd47bc342ce624717600bb0fcde6 +part_01240_of_04320.hdf5 e4e5d0e30313c6e7df38569ed1116ab9 +part_01241_of_04320.hdf5 35c1890cc07bdaab34b6d7e7834532fc +part_01242_of_04320.hdf5 1c18eb6edf9a09be8f60983a4d930ed1 +part_01243_of_04320.hdf5 87fe90378c6d618a3e1d06d2baba2a82 +part_01244_of_04320.hdf5 390a8dd9a306fed96420bc6ca2ea2fa5 +part_01245_of_04320.hdf5 433bec421595e0586de4d79b57824112 +part_01246_of_04320.hdf5 575fa2a9d3462ab01e97c0fa2bb7158d +part_01247_of_04320.hdf5 3d7a8fc7ad8cc49e79a0b496c4790b20 +part_01248_of_04320.hdf5 217c5efba8b609f9cec3dc5fea89dd2e +part_01249_of_04320.hdf5 83270caefcc8a2b258091d0d6aa76b5f +part_01250_of_04320.hdf5 9b7304e6997965ebfee284980d3aaca4 +part_01251_of_04320.hdf5 758241c3929b4b224cebbae8f808264d +part_01252_of_04320.hdf5 2206a67c561bc9f0b59924c72b857657 +part_01253_of_04320.hdf5 ada52cd36efcaec583d6146b0da8232e +part_01254_of_04320.hdf5 a64493aeee165c0a5f94ab69cde86f0d +part_01255_of_04320.hdf5 dc988d207fe8774d03107e920f2048a5 +part_01256_of_04320.hdf5 a6bdbe192aa24bf379dcbedea1c2213b +part_01257_of_04320.hdf5 d4d1011f704febdbb8a2b94f9c8802b4 +part_01258_of_04320.hdf5 c0cbcf047db3a4aee7ec7fe5b2abe643 +part_01259_of_04320.hdf5 7095044e2b6a1bb3c0a77e82b2ab367a +part_01260_of_04320.hdf5 929efc63b4eb115bab008128c92eb169 +part_01261_of_04320.hdf5 5325a3da29dfb306f4ce31e06316dd8d +part_01262_of_04320.hdf5 8a86194c814118d484bcb6b159c90b93 +part_01263_of_04320.hdf5 12bf203c35acd43cb5866fa1d45246e1 +part_01264_of_04320.hdf5 2a1f689ac4230901a8f4188b14e0d4bd +part_01265_of_04320.hdf5 6fe55757b6ba94e44518c22eb5804b88 +part_01266_of_04320.hdf5 5956d1bfa69a927e9e6d77a08352b076 +part_01267_of_04320.hdf5 a49aac7efdfb7c37cfb1540ababb996b +part_01268_of_04320.hdf5 c86fea7ad5790743f2de29c54ba27fde +part_01269_of_04320.hdf5 5e6352e1f8c88ea1730a30602533781b +part_01270_of_04320.hdf5 9ebcfab3bdaac5237a7e8db41a02e00e +part_01271_of_04320.hdf5 b2ce0df75fd10c2c31639aab3fe66ad0 +part_01272_of_04320.hdf5 073b9e7beab1403c5b1a076dadc6ad28 +part_01273_of_04320.hdf5 3de9af1e2e66b1905e6b1067b0c3b7e8 +part_01274_of_04320.hdf5 add64a7b49069b542b5cd9ca0588b7ee +part_01275_of_04320.hdf5 2fab2f1b0baa01a3a2105197c3a06ae0 +part_01276_of_04320.hdf5 55e7373f0627cf1e567c6181fa5b63ea +part_01277_of_04320.hdf5 2251b63f34956b685b13f72475c24948 +part_01278_of_04320.hdf5 383e90f690e2609934046819b7d15169 +part_01279_of_04320.hdf5 cb7f79b28ac785382e33cfd6eab00e47 +part_01280_of_04320.hdf5 b54675ba00e4cc89a151f310557fe81a +part_01281_of_04320.hdf5 a33765619f2e4f98736f459c9bc00e2d +part_01282_of_04320.hdf5 b8475422fa473b1c82c5b84b2ff87a59 +part_01283_of_04320.hdf5 422a55453879ca7c33f49078e89839b6 +part_01284_of_04320.hdf5 4ce451aa4e63046c85cde9281669052d +part_01285_of_04320.hdf5 49fbaa646e178a1526e96a49c9599b84 +part_01286_of_04320.hdf5 758a12b565dcc6b0c124e0d1a665df3f +part_01287_of_04320.hdf5 565edcc394710a327047e323901deb75 +part_01288_of_04320.hdf5 d111bb89f19cbaeb3016aaa70def8daf +part_01289_of_04320.hdf5 bc59e54513cce04cd5e98ef7d7701061 +part_01290_of_04320.hdf5 84f00a41cadf38afcd28f5bd48c24131 +part_01291_of_04320.hdf5 dfa33b6f59e0a3c9d3f02dabd4fa16bd +part_01292_of_04320.hdf5 5c5cbbb528deeb6b4f75d2f497cfd1f4 +part_01293_of_04320.hdf5 c530e311dee678725b749edb1b96c900 +part_01294_of_04320.hdf5 34e92223424d3e469fd33011d6c916a7 +part_01295_of_04320.hdf5 2a57dbe4f593fd8dbc6392c551169fde +part_01296_of_04320.hdf5 35d193f95044202fec90369ba68a74a9 +part_01297_of_04320.hdf5 3c7f5d5b3c093ecb9cf8a0c0566f7f1d +part_01298_of_04320.hdf5 8035eaa8d26ba629d49f760705ff85b5 +part_01299_of_04320.hdf5 f715ea6f67a89f3b4905d9323299a37c +part_01300_of_04320.hdf5 1b7fdf65445b9366056d1ebb3bce872c +part_01301_of_04320.hdf5 4d037f114992ed83e038128ed612fe48 +part_01302_of_04320.hdf5 f5674adfeb199a9b1bbf15f9794def7d +part_01303_of_04320.hdf5 c4c7706d700a1c93da22d76619de8d82 +part_01304_of_04320.hdf5 4cca651ebe9319081443e68033ca8a21 +part_01305_of_04320.hdf5 cd13acdf362a4f1f58f867323bc6ad58 +part_01306_of_04320.hdf5 864e54bcd7f30938c877c9f216976f82 +part_01307_of_04320.hdf5 38110bcf9cfbed231e1eef8c3c979b7a +part_01308_of_04320.hdf5 9c882875e21506a8252be6c0fabd7747 +part_01309_of_04320.hdf5 0adce4a6fd7e78bb23842eb55bc0b525 +part_01310_of_04320.hdf5 3e2febf16676334fbc6ce1d8cb02a533 +part_01311_of_04320.hdf5 d5def9c26a9750429aa44a75f85e8421 +part_01312_of_04320.hdf5 3443a5f6bdc0d373ed6597d48cb1e008 +part_01313_of_04320.hdf5 4c44b2e2ab03de61e53bec2afab83924 +part_01314_of_04320.hdf5 1612565266a4e1493014a6769a67d2f9 +part_01315_of_04320.hdf5 ecd583015c3ac116d2c6c90d3db0f90a +part_01316_of_04320.hdf5 245b0024e767ac51a7799cb024240199 +part_01317_of_04320.hdf5 282043063c3053f6ac9efd63832c451b +part_01318_of_04320.hdf5 d0f2593ebcda2c3a0bd0559b1591648b +part_01319_of_04320.hdf5 e6ac2ab07db2a6321cef6797769bb70d +part_01320_of_04320.hdf5 55d176ac4a384a998577e3a841bbf8ed +part_01321_of_04320.hdf5 c1c3371aafdd7d5c495dc64d363ae863 +part_01322_of_04320.hdf5 9fa5a54c48b998f8a4a47fd36d5a42e8 +part_01323_of_04320.hdf5 df7c420f33365db9b76c0f5893038bbe +part_01324_of_04320.hdf5 3c82d01203e405db65f40ede30f08f4e +part_01325_of_04320.hdf5 a2f15b2a45e3d1e824d35668f6eeec64 +part_01326_of_04320.hdf5 50e477ffa73285710dcf0e2d60ed27e2 +part_01327_of_04320.hdf5 8654110d1522176832198e9c1500fa63 +part_01328_of_04320.hdf5 3b7371cf15580db653944b599e64df2d +part_01329_of_04320.hdf5 77573fcb7874f39d8c29b4cb3d53f274 +part_01330_of_04320.hdf5 95ac1d03d86b897a5fcefd2dd06cbcec +part_01331_of_04320.hdf5 517af1568426c2755779fdf905488e07 +part_01332_of_04320.hdf5 8c8bf4159142453838313ee13ddccf7a +part_01333_of_04320.hdf5 834823acb5e0108bf8ce607f6f247c65 +part_01334_of_04320.hdf5 0a981cb8583722428ef6fc803aca1d93 +part_01335_of_04320.hdf5 cbdac07c7b9f2b8880563d269eb943f3 +part_01336_of_04320.hdf5 c594c3ec4fe83b62167178d5f72b3c5e +part_01337_of_04320.hdf5 7d25a02af0600555b7e93dbd4055a8f7 +part_01338_of_04320.hdf5 aa8434a0d5927df8e11a04a4e51baed6 +part_01339_of_04320.hdf5 20dd9e781fe3daa65fce1af96f8b705e +part_01340_of_04320.hdf5 a140ffa39f49f32c3fce9fa3c3367a6e +part_01341_of_04320.hdf5 089153cc8aa2f2aa2519b837c7e00441 +part_01342_of_04320.hdf5 3f292cbe3e934c3436f71429ab01d815 +part_01343_of_04320.hdf5 a49031ec75936d3e61ea95d44ca50497 +part_01344_of_04320.hdf5 bfa88282557420a34dba0810d9ab8435 +part_01345_of_04320.hdf5 9a86e6ba4b62641412a65a7b5cb4c744 +part_01346_of_04320.hdf5 9471ce91df4afbedb56ff65da6c972ea +part_01347_of_04320.hdf5 5fce8c869a7f3806cab2e92e12b06c8e +part_01348_of_04320.hdf5 014dcab14b69521a3101236fd84e63ef +part_01349_of_04320.hdf5 ced6cb53b55e69de4a69fedbfd5d4877 +part_01350_of_04320.hdf5 d78da55daf38763342d8ad91a3cffa4e +part_01351_of_04320.hdf5 6e66bebcf7b93740de4c845447370aa5 +part_01352_of_04320.hdf5 113f512b057d1fd54bc9fcf8cbb1631f +part_01353_of_04320.hdf5 caa0520562511fa407518e097fafb143 +part_01354_of_04320.hdf5 c388a57581b200196bc568e0f02d34cb +part_01355_of_04320.hdf5 03b1f4ae61034cea96433e215bca5b66 +part_01356_of_04320.hdf5 1d280597b5bce49082270755df4ca943 +part_01357_of_04320.hdf5 23e16b4bef7b8cfbef812ba03261755e +part_01358_of_04320.hdf5 e8daeacd5bd696378078976dfa954899 +part_01359_of_04320.hdf5 e1d598ce4001dbcfdd3ca3939f1a82ae +part_01360_of_04320.hdf5 32a079b2b929837f6c6bf0edf085a31b +part_01361_of_04320.hdf5 b5aae61aab5a05a4afa8569108ec2031 +part_01362_of_04320.hdf5 08c8b75e942c8b9f95d8816a6050ee8f +part_01363_of_04320.hdf5 4cec2f9aa88479646cb3189036350251 +part_01364_of_04320.hdf5 620cb794c74766d490bd6b6d80eef490 +part_01365_of_04320.hdf5 3c96418ea0a6ba004949f0dd87c54ba6 +part_01366_of_04320.hdf5 51567252148fbb857b4df7edab883eb8 +part_01367_of_04320.hdf5 6621296242325c09bd0f3f2036b33b28 +part_01368_of_04320.hdf5 968a692754ae86b32938805fdc7bba45 +part_01369_of_04320.hdf5 1d898cb9dc5a6df1ffbc5b4d00e93504 +part_01370_of_04320.hdf5 dc98470b889062411d4ff29cd62fddfe +part_01371_of_04320.hdf5 142a828ed5a1ce8a2ed2560ddd00669f +part_01372_of_04320.hdf5 148ca4c74cb0c446d8df9ce942ceb5ae +part_01373_of_04320.hdf5 1541ff6490cbb5c80506eab6e100e00a +part_01374_of_04320.hdf5 12ca09c47ac75f0393aaf522d4c65269 +part_01375_of_04320.hdf5 e1fd92e887f5bc5de2e8cd3ac7e41be8 +part_01376_of_04320.hdf5 68a822f328f948691da895d5bacf10e7 +part_01377_of_04320.hdf5 6071f24c3148df2a46162df48b98dc4d +part_01378_of_04320.hdf5 3050973776817c0cbaf3d8da944c94b8 +part_01379_of_04320.hdf5 1718299284a9e216f42797ff13436805 +part_01380_of_04320.hdf5 c971d23032be8039e12df692fd41c7e2 +part_01381_of_04320.hdf5 795cafa379fba1d234426693d58f0c08 +part_01382_of_04320.hdf5 1d4e0b72caa67e66203af1d70a5885b6 +part_01383_of_04320.hdf5 7a20ea561152b605d663527e09285a39 +part_01384_of_04320.hdf5 3746e731950fce82e5009a1670092cc7 +part_01385_of_04320.hdf5 d055e9fb8f4a45d0db3cb332aca7adf6 +part_01386_of_04320.hdf5 024fca157e14ed0486e2d766337686ed +part_01387_of_04320.hdf5 58192b81d602439042589c0a4da510ee +part_01388_of_04320.hdf5 dabde2c24b5e934255824c8bec2b70d3 +part_01389_of_04320.hdf5 c62aa6aa9ed848378b09afa19dc307fa +part_01390_of_04320.hdf5 6ca8970c999ca7774ba111a6ee2c195f +part_01391_of_04320.hdf5 4438fe3b092ec8cf0fc1373cd850f562 +part_01392_of_04320.hdf5 2697e93cc286102b2180808e15289159 +part_01393_of_04320.hdf5 0a5d4afe278b1f491286037dd9e532ed +part_01394_of_04320.hdf5 a510e2173a1c8361e6b3ccc19c2500ea +part_01395_of_04320.hdf5 0b20a39af26bbd70f2089c36647ae6e3 +part_01396_of_04320.hdf5 decd84e17a46e96254b912d822db0c7f +part_01397_of_04320.hdf5 3e5d91c4d0836c95357818b899a6bd4e +part_01398_of_04320.hdf5 55e4ef6b5ad8eff948358388003804cd +part_01399_of_04320.hdf5 d96e25173917df792cb00d508e119f37 +part_01400_of_04320.hdf5 8cd792e8b111b4fbd49520d38f7cf1a2 +part_01401_of_04320.hdf5 2753e26747dbd3bae61e8b2fb3f81a02 +part_01402_of_04320.hdf5 8c27dc106c6ba8bebd0da14cc6dbc371 +part_01403_of_04320.hdf5 7439494f1fe08a3f287083dde5ac296c +part_01404_of_04320.hdf5 59f493f0a68317d9a5fdff939345d88d +part_01405_of_04320.hdf5 7983c020a1d81c73f1cd03249958d64f +part_01406_of_04320.hdf5 92f4dfaa87984d684500ee601c300de6 +part_01407_of_04320.hdf5 4ae45d789a261d33dc0dccd54221c823 +part_01408_of_04320.hdf5 8b8c4455249671db2af14f1c46be7602 +part_01409_of_04320.hdf5 df2d977ade42ac0b7c55a1e481c47670 +part_01410_of_04320.hdf5 9f3f66bfff584f0e2d36ee482605c246 +part_01411_of_04320.hdf5 9eaaf96330d378397bb87ba4cdfe2ece +part_01412_of_04320.hdf5 8e64ff6174b360241addfeb8c8f9c625 +part_01413_of_04320.hdf5 6559aa6902a991fa6a2f06f8e4de6b73 +part_01414_of_04320.hdf5 0ec6d337ed5e33a2b4e59ee2f8f27af5 +part_01415_of_04320.hdf5 7058491e86159e0f9b2b11ceaed4350b +part_01416_of_04320.hdf5 5aac6d0d13fa4b8cee6b2b7b9bbf27bd +part_01417_of_04320.hdf5 99828cdaa171d30d72dbe8f5f3b02f85 +part_01418_of_04320.hdf5 dc7ca0b7764662fe7fdb14ef484a298e +part_01419_of_04320.hdf5 b433f355cfee1fc52b5ef0952c8b22ab +part_01420_of_04320.hdf5 247468d43babdd5dd227572b9f8803d3 +part_01421_of_04320.hdf5 20c3ebcc0b6d7032c574f75298e6ed52 +part_01422_of_04320.hdf5 3384f91a011976cc411faf34b01158d5 +part_01423_of_04320.hdf5 a7ea154575f33a20fb24000771a844de +part_01424_of_04320.hdf5 384247cf59321b1ca798153d4fc33c19 +part_01425_of_04320.hdf5 1534a93a65af746327a48323fb203fbf +part_01426_of_04320.hdf5 6a4f73d16af202a7f49afb77e99e9e8a +part_01427_of_04320.hdf5 1ab3ec518c586d114c84c78a8fd34da0 +part_01428_of_04320.hdf5 19a2f6801f31a4cf1b5fed9f2ad10c93 +part_01429_of_04320.hdf5 e84fbe0e79e2f13761ad6060b35d6941 +part_01430_of_04320.hdf5 f574021b2a3df1bf3954849b2b1ca6ac +part_01431_of_04320.hdf5 550f6845e3cf4ed9b3eae8847bb44a47 +part_01432_of_04320.hdf5 1bbaff61d06027527c0f1bd51b47dc03 +part_01433_of_04320.hdf5 632570628579255ec88dd9f33ddf3ad7 +part_01434_of_04320.hdf5 ca121956d88204e07b8ad07b194c9a96 +part_01435_of_04320.hdf5 1f33da810e9a976fe5966514264ae4c8 +part_01436_of_04320.hdf5 ae29ed76e0935f7a96661c90766fe35c +part_01437_of_04320.hdf5 850270eaeac1889710413d79b73ed443 +part_01438_of_04320.hdf5 065499795b705822e7cec700b68f36fa +part_01439_of_04320.hdf5 2e0723ee5118773eaaf21146e71a474f +part_01440_of_04320.hdf5 6e827f72652612125f3372c49943b7d3 +part_01441_of_04320.hdf5 c91e17550d69b77c0012cb4c81c9793f +part_01442_of_04320.hdf5 9a1967096672167fa39118ed51c0f251 +part_01443_of_04320.hdf5 6bc8abe2e3cce09368114cf424e7f782 +part_01444_of_04320.hdf5 0d7feefa68bc93540c87cd94e619666a +part_01445_of_04320.hdf5 c2177f881e38d6eacc910ecb9da9cc9e +part_01446_of_04320.hdf5 8efb92c8eaca423ea6ce1ae915135c32 +part_01447_of_04320.hdf5 66ac50bdf847b63e9427daf8f7cc86a5 +part_01448_of_04320.hdf5 896efb08191238855d4139eb8708682f +part_01449_of_04320.hdf5 93b4d1fcfcdf714a2c2cd8c4fbfb0de0 +part_01450_of_04320.hdf5 e57b4c8704448e384385e8c3ec525644 +part_01451_of_04320.hdf5 b74596a966251465013903418bfc91ad +part_01452_of_04320.hdf5 8c1a440c6b484d36639179386b9e2379 +part_01453_of_04320.hdf5 f773bbe281a8b52cd277754fd313fcba +part_01454_of_04320.hdf5 c0bef7b5902b9f865e957dd2ab1101e3 +part_01455_of_04320.hdf5 2b206f66d639da781255d7f2e768f0e4 +part_01456_of_04320.hdf5 885f4b65267621ca4f967096ce4b7031 +part_01457_of_04320.hdf5 5ef6d7fa6bbeb17025282db75f720286 +part_01458_of_04320.hdf5 f8a89011eab5083d4c9613d8d7cf70ce +part_01459_of_04320.hdf5 44ea7a0b89a0326ba8a511f1a91a5bd5 +part_01460_of_04320.hdf5 f78f39c187f736a580b4d5316baf9590 +part_01461_of_04320.hdf5 98273240744aeaff449f2f9de535a008 +part_01462_of_04320.hdf5 1ddf2b89d3d4dc8e9d4c3ba3e760eda0 +part_01463_of_04320.hdf5 176b1f20e1de8ddb4ef7486e135ece36 +part_01464_of_04320.hdf5 0fb92b9efe00396f14efa2b0a42deefb +part_01465_of_04320.hdf5 76bb4ba296b7ae7ecfb01807ff1cdca7 +part_01466_of_04320.hdf5 3ad16f0fc4d9d4ef5c72fa2612ff5140 +part_01467_of_04320.hdf5 344cf6b884f715af97e77ae0c2a79723 +part_01468_of_04320.hdf5 48c3fa55f13349265b3e3270dfc79416 +part_01469_of_04320.hdf5 3d4023f6a27a0dc7fd8cd58b0a8d0996 +part_01470_of_04320.hdf5 1936ae2e883ccccc3fb9b05f2fb911fb +part_01471_of_04320.hdf5 66a39b2d75a94d283ef2301330848f30 +part_01472_of_04320.hdf5 ae40d2b5f541fbbccacebfeb29431de5 +part_01473_of_04320.hdf5 6330eb7005bcf6a826fad7056b6dbb75 +part_01474_of_04320.hdf5 69347b7101fdbf4d327c2b6697a63fe6 +part_01475_of_04320.hdf5 67dd3ccba2a3365738bf884ab7748369 +part_01476_of_04320.hdf5 338f66dbc279235c16e8a21c31badbc2 +part_01477_of_04320.hdf5 97458e818203d8139eb5d5c617d36f3d +part_01478_of_04320.hdf5 e607cc64fa6b57e76a9b33ba7b6bbae8 +part_01479_of_04320.hdf5 0b5702cad6b3e2c58951876f965ffa59 +part_01480_of_04320.hdf5 713449319b4925daa37832c193e9d3bd +part_01481_of_04320.hdf5 e53fda2d1dbf4d523ba4da282dfca20c +part_01482_of_04320.hdf5 30da27ac375750314009d41b9a8ea8e2 +part_01483_of_04320.hdf5 56ab61295435d9739730b7e8ac088a13 +part_01484_of_04320.hdf5 f06632152e2b8a7207b87cfe2927a824 +part_01485_of_04320.hdf5 fa85b4bf976ba558a8ed0d4b6181af96 +part_01486_of_04320.hdf5 736abcedbfca2ee69c3794c58a2c8cdf +part_01487_of_04320.hdf5 f962631e7dc0f0d6fb56942d2c7f7418 +part_01488_of_04320.hdf5 31591e01d34dbce7a32df6724ab9a8ac +part_01489_of_04320.hdf5 21135548e7b2917048950ba91711a9c3 +part_01490_of_04320.hdf5 fa394dc79745998c64067f31b3fa1fd7 +part_01491_of_04320.hdf5 545ed3f7e7c648e73e0a015f652b52ec +part_01492_of_04320.hdf5 337ef10553f981a9cc998c0775172ffa +part_01493_of_04320.hdf5 f2275efddd2d1b423bd5793cb7c3f76c +part_01494_of_04320.hdf5 9f1db46eec241287e4ffa63f7ec11281 +part_01495_of_04320.hdf5 ce3d89b818f42640b91b234868471acc +part_01496_of_04320.hdf5 1f22536ddc8de843d8cae3b73dfe44c2 +part_01497_of_04320.hdf5 f6d3f9b13c6946870e8a7e56b235166e +part_01498_of_04320.hdf5 dfd41cfe889e976c57a6710be2bf7e46 +part_01499_of_04320.hdf5 1a896711af0d15beef0e76aa7928fb5e +part_01500_of_04320.hdf5 58c838b031bcc58a84b6b8004249ea34 +part_01501_of_04320.hdf5 d2f2875848df3b1664c490c188a2f5ab +part_01502_of_04320.hdf5 e1bb70d3d97c12389d7ed0faae83684f +part_01503_of_04320.hdf5 3207f23b8c4fa0922ba3240d5efee4da +part_01504_of_04320.hdf5 6f3780f6bc921df5b148d029c4a46c91 +part_01505_of_04320.hdf5 27718923d48d5725f29f360112d723a5 +part_01506_of_04320.hdf5 596d20349bb5457bec8c50e10ec18549 +part_01507_of_04320.hdf5 ac21c2caca7bab447140b6ee1db4235f +part_01508_of_04320.hdf5 ceebff01fdee8e1ebf2a4504945e0621 +part_01509_of_04320.hdf5 6d82ced780424bbef8472499b67016be +part_01510_of_04320.hdf5 f8a4c540023a997277187734d6e1632c +part_01511_of_04320.hdf5 a9da2f34a6ab1d7c2585e1fc2dbdc868 +part_01512_of_04320.hdf5 d2ef8632033dd390a714d9101dffae89 +part_01513_of_04320.hdf5 29efc14f0271e2c1d21bd1b1e025f03e +part_01514_of_04320.hdf5 65e52478aee7a88b6ba01217930b4d3a +part_01515_of_04320.hdf5 eec4d5c0df1766c357e370db0f5eaba2 +part_01516_of_04320.hdf5 bda6f75dc605516303b116e10c86051c +part_01517_of_04320.hdf5 6e26c635f58d206f5581cc3f0f757666 +part_01518_of_04320.hdf5 5b8af273e755ae6d4e7bf537b8878d2b +part_01519_of_04320.hdf5 4abc97b4c358ef6354ee4b83b52789e5 +part_01520_of_04320.hdf5 a6c11fae9261358bc0527d2d91d2e1aa +part_01521_of_04320.hdf5 df6a6640c2cd3cce4ea77d0d16260184 +part_01522_of_04320.hdf5 617176e19d223b67aa5fd53b7044969b +part_01523_of_04320.hdf5 d84586efbb4f4c7122b566b88106dab7 +part_01524_of_04320.hdf5 d5c3e210d49ffc6308b55efcd1eae460 +part_01525_of_04320.hdf5 44d6d1b4183e91c86d405adec67e0208 +part_01526_of_04320.hdf5 9f9ed19ae9aeee1f9b49ae25753acc81 +part_01527_of_04320.hdf5 7fdf6da413b3321948138ba831825ba5 +part_01528_of_04320.hdf5 87c6c33b4ef15f7c168929aa504313ef +part_01529_of_04320.hdf5 ab09343357642cd73b1d315774b0a281 +part_01530_of_04320.hdf5 e0dae68eb6eda8fc0e5d4b6c5b5781e7 +part_01531_of_04320.hdf5 1ab3c5a1f9e73ebccbbcebee00092458 +part_01532_of_04320.hdf5 0660dfa808edc7c453d81c3a656c6256 +part_01533_of_04320.hdf5 eb86fadc72e0adf43897f5da1406d05f +part_01534_of_04320.hdf5 b1517cda54b9127e812faa6028f9b0b0 +part_01535_of_04320.hdf5 cb9309e6b2700adc41408bd1280e1e91 +part_01536_of_04320.hdf5 96d5e91f3c5bc714d8153e04bd3df516 +part_01537_of_04320.hdf5 b64fa6b8da21b158c7a4f374b2e1e6ee +part_01538_of_04320.hdf5 fd6296cf26424b6a3e0ad814d0cc43d6 +part_01539_of_04320.hdf5 03b5e064784e23ad593469a54702a186 +part_01540_of_04320.hdf5 db55c8a71ac6149508cd55686427e388 +part_01541_of_04320.hdf5 5bdced51b57bfc4e41c1fd4a1a931ae5 +part_01542_of_04320.hdf5 ea08d1a9cb47447cc1342e90a9c7f2a7 +part_01543_of_04320.hdf5 5c3f90ac8a2dd83149348fda128cc922 +part_01544_of_04320.hdf5 5421bae39afab618dbd40678edd2bb36 +part_01545_of_04320.hdf5 ff411c9889e28ea4abbcde4266acf6ea +part_01546_of_04320.hdf5 7b7f146583ed024c6b2ddb4c891cf9e9 +part_01547_of_04320.hdf5 2768c1441d65e0b90298799b32c64b89 +part_01548_of_04320.hdf5 b8ae4655bca83206b286a6aeee6ee320 +part_01549_of_04320.hdf5 b6d7b5a8e999c2ffd450bad11c1333f5 +part_01550_of_04320.hdf5 2b9913ade19e6e792ba3e51d86df847b +part_01551_of_04320.hdf5 b8fb9259c364c6ac8f4113e3ad240ce8 +part_01552_of_04320.hdf5 9e2c71653aec9878ae3cbbd02a806384 +part_01553_of_04320.hdf5 385bc0b9b3a811beec4538dde20dd4b2 +part_01554_of_04320.hdf5 171aa48f60b6eef346b9af1e7379e02a +part_01555_of_04320.hdf5 7fc876549d484c0fc38ef59fb7efca03 +part_01556_of_04320.hdf5 bb4ee3e890aada6a2698316a6c0ce060 +part_01557_of_04320.hdf5 893e6d7ba9a0253d90997123afb539e8 +part_01558_of_04320.hdf5 3781ac8fb599796c4f2e0d38acb06f22 +part_01559_of_04320.hdf5 566ce7a53072f39cc78061a0c7955641 +part_01560_of_04320.hdf5 758785acf9d9bb6bb5fc5b7267505e5f +part_01561_of_04320.hdf5 3c001bcd1b7aeb9729ed2af8259efd43 +part_01562_of_04320.hdf5 a00f6df9579e26a2d1a27e164c4af493 +part_01563_of_04320.hdf5 9693b4c0b6256c605a587bcf36db126a +part_01564_of_04320.hdf5 8ff74524271c15c9c0c6de3f6eead81a +part_01565_of_04320.hdf5 04036387ebdc13eb775fcbb6c8576418 +part_01566_of_04320.hdf5 1eb145b5cce8dff88e3fb258828b458d +part_01567_of_04320.hdf5 bfaa76d2f37b36195ab5a6c4472a5c69 +part_01568_of_04320.hdf5 f866115b82aa0bff357cb137caf4f780 +part_01569_of_04320.hdf5 401aeac6b21043bdc32464e3cd979541 +part_01570_of_04320.hdf5 7dadc48a6f2b160c1efb915253eaa830 +part_01571_of_04320.hdf5 50d07d40e69c41f2dc5f76d46cd0ae1d +part_01572_of_04320.hdf5 2325183fa57788670fcbdaa3c2741dad +part_01573_of_04320.hdf5 7cea9aa4d254c37e424f6e86eec116b4 +part_01574_of_04320.hdf5 e714540812134bca3f8ba9da36807c7f +part_01575_of_04320.hdf5 77e705e580f401adc25cdebf4d78eb64 +part_01576_of_04320.hdf5 d222a790f990d24bde95f1cb633a4b05 +part_01577_of_04320.hdf5 8371d9046af0880949eba752fe7bda78 +part_01578_of_04320.hdf5 d18ebd21d3bb2821185195214b6c70fb +part_01579_of_04320.hdf5 10eb11cc71c8b3be4d7e01ccf54b0706 +part_01580_of_04320.hdf5 860dca352b3b566d04688e43ab80413b +part_01581_of_04320.hdf5 20326cfc4b68d3bc8187a58e80a2c879 +part_01582_of_04320.hdf5 c2f2ece0dd961bee3ff9ea4342aa0a3b +part_01583_of_04320.hdf5 291ffd1ac57628ed094381eb4a2f20ad +part_01584_of_04320.hdf5 357b828f2f263023efd31744fc3ec3cc +part_01585_of_04320.hdf5 5d5f188ff681593a2270df995b3cbb72 +part_01586_of_04320.hdf5 6123850f706fb482edab4ce0d44d51d9 +part_01587_of_04320.hdf5 601f5d6b4ad4ac982780c0c611f2b091 +part_01588_of_04320.hdf5 c30ad0753eddea3e4e36509a2d6d3f0a +part_01589_of_04320.hdf5 1427cb876f7596e06bf693fc45ba94eb +part_01590_of_04320.hdf5 b911239b43cd087c5604e1586e20a2bf +part_01591_of_04320.hdf5 909feb06bdacd02033dbfe2b764ecf7b +part_01592_of_04320.hdf5 87265c14eb46f6121fe51f565239b77f +part_01593_of_04320.hdf5 d74ccc8607d87a70fd559a9109ee4b82 +part_01594_of_04320.hdf5 17aaa796aa6fe34723bba71c13033d9f +part_01595_of_04320.hdf5 027edea96c4add9faa7535f117d89c03 +part_01596_of_04320.hdf5 ed171f709eaec57f711b8c1171cbd9ab +part_01597_of_04320.hdf5 b99463d6df204af2377a685fc5afba14 +part_01598_of_04320.hdf5 377af9d6d90c35d001134a8ae687abd7 +part_01599_of_04320.hdf5 7b4d2fef765ae719e3dfa18d4b9181b7 +part_01600_of_04320.hdf5 163b3bb8ddc51a8003406d04088c521f +part_01601_of_04320.hdf5 7ee3d8d0911e54482c0eddba54bfecf1 +part_01602_of_04320.hdf5 e56a86d2abcc4f6d16334668121be8fe +part_01603_of_04320.hdf5 460a7d4d73d93a541088e88c63308a21 +part_01604_of_04320.hdf5 0f798464e056410e6bab3782a98dedb5 +part_01605_of_04320.hdf5 feb4c3bff75ea2cea1430d553064716c +part_01606_of_04320.hdf5 fd16570db7489892494fec66fe8b6604 +part_01607_of_04320.hdf5 9e1a33d66f468583f07e3e8b7290d1bc +part_01608_of_04320.hdf5 60d331e2a87e8fe1788b23206d1b8a2c +part_01609_of_04320.hdf5 6a6bcf2fdb7595a51e8a5efb2a072f3d +part_01610_of_04320.hdf5 7b23f0b13db0811b4ed1e0c7783ad6f9 +part_01611_of_04320.hdf5 abb851988a53ae2326b0b7cf38866dc2 +part_01612_of_04320.hdf5 6fd4fefbf45dded407f994a8abebe36d +part_01613_of_04320.hdf5 907d017bea80e8e7ffcc6f608ec539d4 +part_01614_of_04320.hdf5 cf4fdedad0871c25e31c25ce756eb90f +part_01615_of_04320.hdf5 9ceea17c6ddd8884323639371ce33b8b +part_01616_of_04320.hdf5 8b04b869f76e1a4cf6a1f2a89e7aafd0 +part_01617_of_04320.hdf5 c8a4341f4001a6ea84d20bb3e87b5fa9 +part_01618_of_04320.hdf5 cb9d132a37df650eb2bb2deaf21bc27f +part_01619_of_04320.hdf5 338e4d1e15b704694418dcc8732beb75 +part_01620_of_04320.hdf5 af406b9ade77a866c98041914bfdf6b5 +part_01621_of_04320.hdf5 b5024fa50622fc2528ac4435bcf33cab +part_01622_of_04320.hdf5 c734481e2f934410927700a47f54871f +part_01623_of_04320.hdf5 9164fcd95cf44d7bfbbb022f0153bb5f +part_01624_of_04320.hdf5 388e8f793d72c399d4b6a29b4ac27e29 +part_01625_of_04320.hdf5 e190f26094074cefefc57b97adc1316a +part_01626_of_04320.hdf5 bb5c848d03f3e0b366ba0b4d8d983be5 +part_01627_of_04320.hdf5 823aa5aaf6e48f691e3bb5ad2512ce7c +part_01628_of_04320.hdf5 1b10017d865e0d969ab861bced461766 +part_01629_of_04320.hdf5 5c506d8cfb1f1a32e5eb4dc9df2734f4 +part_01630_of_04320.hdf5 acf64031762ecf421d0ef9e841dfacbc +part_01631_of_04320.hdf5 86d8f963433f188e0ee68aa54c3f7ebf +part_01632_of_04320.hdf5 e6553ed16d9d42a6aef98abd63b8548c +part_01633_of_04320.hdf5 a8500cc45995c9c7351ad23d581334ad +part_01634_of_04320.hdf5 e79003a51545460ac39b99d332b918dd +part_01635_of_04320.hdf5 5c9645d1f39b090a4d76cca6276c8de9 +part_01636_of_04320.hdf5 a0d8a080fceae10f4a428a4bf7b60d02 +part_01637_of_04320.hdf5 71a875fa393ab9c2537768bf42214821 +part_01638_of_04320.hdf5 483effeb1681841c5a74198bd11ef153 +part_01639_of_04320.hdf5 385e8637cbb00f713c07c6c4ce26938b +part_01640_of_04320.hdf5 4f41bb1cff38c7db9bea8e9bda663d4d +part_01641_of_04320.hdf5 d7e515f8120580a43e13176c5b309b74 +part_01642_of_04320.hdf5 9a52a4d3c980c82e4c8d546e63c88ef6 +part_01643_of_04320.hdf5 caa4c4b9ed006553b479cd67f039db0c +part_01644_of_04320.hdf5 30d3e5813e4488100f3e58c359b8bbfd +part_01645_of_04320.hdf5 c7c6e4135cf9505087cd8108f2f126da +part_01646_of_04320.hdf5 b2b8090cc215f8a580c5ae4cb33b05aa +part_01647_of_04320.hdf5 06493e376423b8bded1365165e96a949 +part_01648_of_04320.hdf5 4ef9f52ec8cbc0534f1047ef91f7e6ef +part_01649_of_04320.hdf5 cf88b47f19b0536fabc19b0422eab7c3 +part_01650_of_04320.hdf5 e4e6354f7aed08bc1bfb4e622cb9c26e +part_01651_of_04320.hdf5 c3b717bccd8fed83ce549c5eabc9342c +part_01652_of_04320.hdf5 19a195971685a20e52d9e21f2b05bec9 +part_01653_of_04320.hdf5 d6469afade1f6ce70233eb082bd80bca +part_01654_of_04320.hdf5 e46ce4efe35a96549cbf8fd24a517fa2 +part_01655_of_04320.hdf5 23e9d833d5f43bc59adb3717f630aff7 +part_01656_of_04320.hdf5 61e7acb68aaae4db1bab9a8766b0b0cb +part_01657_of_04320.hdf5 07f78f0914681fc602499a0f5d74589f +part_01658_of_04320.hdf5 c07573e92ce0c504d30956e998bc9226 +part_01659_of_04320.hdf5 ce9be6c3198659225cea2b0a951afc2f +part_01660_of_04320.hdf5 4f76d5d9c66572547a4744fc6610feba +part_01661_of_04320.hdf5 e1f816980b77262c333204308ce8b2cb +part_01662_of_04320.hdf5 a4e92f238796afec2cdc8c89721e069b +part_01663_of_04320.hdf5 a43b14830589b45d211406bb0a7570c0 +part_01664_of_04320.hdf5 6f2fc28c3bb55aa3726ef49f37197d1a +part_01665_of_04320.hdf5 318f0b41538399c05d741619ea5f97a5 +part_01666_of_04320.hdf5 e1fa46eff076b71aedb3e05c9750dad3 +part_01667_of_04320.hdf5 f9a4e64149c64e55c1d3fad025cb3b44 +part_01668_of_04320.hdf5 098bce10edb3274253faa58441a4518e +part_01669_of_04320.hdf5 02e09736de6c4194679e33104f6f764b +part_01670_of_04320.hdf5 8d60fd67ebe7713ef3594ac0dcfed55d +part_01671_of_04320.hdf5 a413827ec4dd522654cf7f52b97cfc53 +part_01672_of_04320.hdf5 4b0ea6a63dca6f2cd3dc0c9571dcc2c5 +part_01673_of_04320.hdf5 37045138780cfa999a6a530520134538 +part_01674_of_04320.hdf5 08a4dcabfc1c98637f3d73d5b9fb0d1a +part_01675_of_04320.hdf5 5ece0bb7cde489cae4620a5678411ea7 +part_01676_of_04320.hdf5 37afd67f8ffaedf684458ff6a2e9a6bd +part_01677_of_04320.hdf5 d88e75a1fbd30b8800c55aab22909307 +part_01678_of_04320.hdf5 f06cee90b0b55ab21d7b5d91f69bc4bc +part_01679_of_04320.hdf5 d152678c5e61c3be36090b35ca693962 +part_01680_of_04320.hdf5 12cd90758b16d214a85dc0341df1c5ac +part_01681_of_04320.hdf5 41320862e9efb7913615de76f151ce25 +part_01682_of_04320.hdf5 821c9e198f6433cd687d099caaa85ecd +part_01683_of_04320.hdf5 0d71ceee816e5f89c4ce9c604d023efa +part_01684_of_04320.hdf5 cf6a83b55200963b96343355fd0c7673 +part_01685_of_04320.hdf5 b2f1ac3086f4f761f7ea8e13c92c15be +part_01686_of_04320.hdf5 30c0c61507f74c73577c7fb7d0145bfb +part_01687_of_04320.hdf5 1674d0884b2b9f52321257d78fc04a75 +part_01688_of_04320.hdf5 f82d132e65f034755f63fafa63fb1e22 +part_01689_of_04320.hdf5 41f11c8eb76c1f1afea38465068a2905 +part_01690_of_04320.hdf5 22bc8221024119dccc2ff51e045141bc +part_01691_of_04320.hdf5 22d945fe6f2389ac47badda4f644e85b +part_01692_of_04320.hdf5 43bd3d92770494a918c88898dabf2943 +part_01693_of_04320.hdf5 67b3ea30aa20f46f8a5502c43fee3239 +part_01694_of_04320.hdf5 777e2c3470f85d7216f6715c33938205 +part_01695_of_04320.hdf5 1cb44ecb12523a85f49918cfc9159885 +part_01696_of_04320.hdf5 123520fd2cf7729fa08b2be4f9c643bd +part_01697_of_04320.hdf5 ba8698b605b015e82bc3caabb3f08c51 +part_01698_of_04320.hdf5 5287bce4b7d2ae582e689406ea68e981 +part_01699_of_04320.hdf5 08420620932477d95a8af6db818f0caf +part_01700_of_04320.hdf5 ba5e94fc829866a9c2aca1821a0384a8 +part_01701_of_04320.hdf5 fd365803155c27dd4385d0a0fafabec3 +part_01702_of_04320.hdf5 17e1e11d4ea92db95ce998b5af2d58f8 +part_01703_of_04320.hdf5 3bf0e43505ed8ea5bcb8269faff1b06e +part_01704_of_04320.hdf5 b8270f96b74a681dd121dd8d10b4a1c1 +part_01705_of_04320.hdf5 ab8055b69340cd7101d50ef6cd445811 +part_01706_of_04320.hdf5 e3a0550f972a872e1e81e80c519a3506 +part_01707_of_04320.hdf5 6aadd47b17856374d29b6b888b142417 +part_01708_of_04320.hdf5 f12c2e838640010849cd5bac0f9276e5 +part_01709_of_04320.hdf5 747d2d8211be0b523a602f82c24d166e +part_01710_of_04320.hdf5 723e584f530c42e3600d073490286ac0 +part_01711_of_04320.hdf5 2fbc1f50aaeb447d5c752a6fd16b2fbe +part_01712_of_04320.hdf5 381cd182837164c83b41a407568f995a +part_01713_of_04320.hdf5 40dde5423eefdbe41ffedd75cbd727aa +part_01714_of_04320.hdf5 c9eadbcdf829ff357b41c9265b9f5e5a +part_01715_of_04320.hdf5 b50a536c72e205d0c5c4e07faaa19e3e +part_01716_of_04320.hdf5 bfe4d17855c17d8f49b787f33ac3aba0 +part_01717_of_04320.hdf5 551d1f7797ccf44d54f2a7742ecc118b +part_01718_of_04320.hdf5 ac9fbebaec92bc45e6ae3e5474799777 +part_01719_of_04320.hdf5 41fae03074b8d525d4b1c2325c3481dc +part_01720_of_04320.hdf5 b4752ac1536de8063fbc1e95c7e428cc +part_01721_of_04320.hdf5 cd9e7ebcffbbf5407ba83dce2eebcaf8 +part_01722_of_04320.hdf5 caf6b4c2fec3b580d63e7eeaec5c667b +part_01723_of_04320.hdf5 9d93acf0441ab1c660111851b0ff7e7c +part_01724_of_04320.hdf5 fb47f7b1eabd251b207a90d7eceb7ef3 +part_01725_of_04320.hdf5 5fc4b51a2f188d9388455eb32c7c67ec +part_01726_of_04320.hdf5 dc1c7cede7fddc7dd7a917191352185b +part_01727_of_04320.hdf5 d0fba5da107ce7f2904c59eded579564 +part_01728_of_04320.hdf5 05d223394fa4a415079100ac91237240 +part_01729_of_04320.hdf5 8787878288ef40d92472c23214d862d2 +part_01730_of_04320.hdf5 8c16c08578047b4b80c2724ec25ccdb3 +part_01731_of_04320.hdf5 4fd110697d3abd198c9e0fdfe71102aa +part_01732_of_04320.hdf5 dc13d8935b13791811bb7aaef04373e0 +part_01733_of_04320.hdf5 1d1255e6b17f6b442353e2024ab56127 +part_01734_of_04320.hdf5 d5fe8552fd8e7feed5980821a50b483f +part_01735_of_04320.hdf5 0ac3301dd1c38c368be5a139e38f25c3 +part_01736_of_04320.hdf5 546d02536438cd13076b0becabd70482 +part_01737_of_04320.hdf5 c77097d8eb7a8361e2c4045b6fe41280 +part_01738_of_04320.hdf5 0b6c5c596fbe2ffbc9f12028e9c6711d +part_01739_of_04320.hdf5 3f9c56838e63a802e0111ca2840ac0ca +part_01740_of_04320.hdf5 384e7c9c5d5d827dbd54629d5b1fdce0 +part_01741_of_04320.hdf5 ce3db02e180a7071156891b6fb5f8754 +part_01742_of_04320.hdf5 6f7f2893f9eada6f31757db3e4f2c26f +part_01743_of_04320.hdf5 6f88a0b87881db356adaddc617769259 +part_01744_of_04320.hdf5 a46ca8d3933760b470431f2d364af9dd +part_01745_of_04320.hdf5 d0dfe57b5a457eb8e6a733e392279a91 +part_01746_of_04320.hdf5 d70cbfff2f2ab10326f32d5a18fe5c4b +part_01747_of_04320.hdf5 3fbda3877a80a8ca9cbac52b23b53cf5 +part_01748_of_04320.hdf5 46818e7961714edb50582e18a2eb6e55 +part_01749_of_04320.hdf5 28fcf862e37e44c60b8de9d41e217e96 +part_01750_of_04320.hdf5 b99c66ec38079311f26e6f0abb68d964 +part_01751_of_04320.hdf5 43096de4d276c867b6d55902fcdac67e +part_01752_of_04320.hdf5 e5ca94dee329876855f2e7c281379482 +part_01753_of_04320.hdf5 281c07519ba6045b7619e655a0101548 +part_01754_of_04320.hdf5 8c1dc86505ff060778eff33814032d43 +part_01755_of_04320.hdf5 eb4fbab2c44b8deb41634064d59f6fa2 +part_01756_of_04320.hdf5 1d646634d7ab0b3913175bedb50b88b9 +part_01757_of_04320.hdf5 85843d4d49e458cc4bbed0e640326f3b +part_01758_of_04320.hdf5 6ab6167c8996c562c7db049ba2ff71c5 +part_01759_of_04320.hdf5 1519a8c604ce7fee89bdc991e6f26159 +part_01760_of_04320.hdf5 bb5b17cc4809398c5cf35d07e1d3c468 +part_01761_of_04320.hdf5 ff36a512548037ead602cfdd8befabd4 +part_01762_of_04320.hdf5 f633f2944fd5b39a51c2ebacfe0147da +part_01763_of_04320.hdf5 8b77d5799dcd3ed9379ee71c5c6bee9d +part_01764_of_04320.hdf5 2e0b9afe68f04279ebe98b102a6791d0 +part_01765_of_04320.hdf5 75246ac17c39b49f0723e2ae51b77fac +part_01766_of_04320.hdf5 2c4c1a2928102d0d238bc95de42e3b30 +part_01767_of_04320.hdf5 b36ea7c7a263341a02268987f6f5168e +part_01768_of_04320.hdf5 5ad71280d925a254e4d6bfdb6baebb41 +part_01769_of_04320.hdf5 a8245cdbde8d5c81a333019db17f695b +part_01770_of_04320.hdf5 46e0214848309bab4129d76021ec3222 +part_01771_of_04320.hdf5 37a1091b017d96f4c79d7db67645d6ee +part_01772_of_04320.hdf5 b076b5150c2709f53b281359ac0d0693 +part_01773_of_04320.hdf5 8892a5dc588929f8cf68d8d862b9d599 +part_01774_of_04320.hdf5 618028b6f5aef1794a848b6c54aad3fa +part_01775_of_04320.hdf5 4eb3c7d52c884bb2febdac8e6254e628 +part_01776_of_04320.hdf5 61cfe8eb146e029d20d3b94c2e905ee4 +part_01777_of_04320.hdf5 3812a5fccf0ba1ba74415cf475c5f289 +part_01778_of_04320.hdf5 dbd482202f5a784a4ea98c62662c59c6 +part_01779_of_04320.hdf5 92d3dc7717510d1d2570ebc096cf5e00 +part_01780_of_04320.hdf5 da52c1270b0d5199388f4d26e913a627 +part_01781_of_04320.hdf5 0f87a752a309202eb6063b9bd0f47276 +part_01782_of_04320.hdf5 0f7d4fe0be65daf6b573487bfd5b59d9 +part_01783_of_04320.hdf5 2b781e921802dece0e9ad6bf70bd118f +part_01784_of_04320.hdf5 a66333a08a225265ed3495d7fc88640e +part_01785_of_04320.hdf5 46a0b3fbe7b1c609a46f3c1474bbb064 +part_01786_of_04320.hdf5 ce66cfe08f5d1cfdce5dc6b16fcc0f72 +part_01787_of_04320.hdf5 e8ea32b7c41abd1c7cbb1b09eb66e25e +part_01788_of_04320.hdf5 eddda70b654146d55d8f1fb7fa4cd652 +part_01789_of_04320.hdf5 4df7e6067de690e67316836b68905344 +part_01790_of_04320.hdf5 485bb46b02d4da77622b5cb29fc5ff7e +part_01791_of_04320.hdf5 fa7e9d5261ad67d1a336b8000fa29a18 +part_01792_of_04320.hdf5 9282e2ae64b7f642bbc8e913f44945a9 +part_01793_of_04320.hdf5 08e906ef89b20516485f0859e8f47cd6 +part_01794_of_04320.hdf5 798e26e442fde73da7d2d94f16f00a4e +part_01795_of_04320.hdf5 30c5142c5b7cd7f466bdfd6cf102e1e4 +part_01796_of_04320.hdf5 85199f72f6f7507751528f0f6803cb94 +part_01797_of_04320.hdf5 69f089ab2b95e617703591cfd879144e +part_01798_of_04320.hdf5 1264d31bf2ceff42df1a7127c949eca7 +part_01799_of_04320.hdf5 2dedc4b848fc8d5e36eb57621ce12bb2 +part_01800_of_04320.hdf5 7f2b865b60514605025f4bcf2b6e1eca +part_01801_of_04320.hdf5 9876dd966e45026dcc99840176c7c44b +part_01802_of_04320.hdf5 3fc8056e511f1273d8fef4f3affb616a +part_01803_of_04320.hdf5 fbc7f81f1e25d0ac3acac22126be4678 +part_01804_of_04320.hdf5 bc70401e9381c11f5cd31609b8e53aae +part_01805_of_04320.hdf5 22aa53091bf24d1b17d0b97d9fb2a7db +part_01806_of_04320.hdf5 b9be848b54e260873ac09dba23c57e8f +part_01807_of_04320.hdf5 802b84c682caceefc284bb7b0408ffbb +part_01808_of_04320.hdf5 9bead2f03643112ab17e046e8b849d33 +part_01809_of_04320.hdf5 1812e04e6c764e73b2bb21489b047e09 +part_01810_of_04320.hdf5 9df2a7ee8e13451dc72be8c3d461ba00 +part_01811_of_04320.hdf5 8f6d863a0a136c30b80e2652dd489dba +part_01812_of_04320.hdf5 eba4ab388e24e6622825292d7a65cfef +part_01813_of_04320.hdf5 86378030271088cc0b11be1c41fa508c +part_01814_of_04320.hdf5 8bfa8ade9f22fa26e01ccbf72c901a48 +part_01815_of_04320.hdf5 3a55f70787444a7dae392b4a95cfbdbc +part_01816_of_04320.hdf5 709d4a7f39404587253b616333f9817d +part_01817_of_04320.hdf5 5564b140061e71438336d0b055c56a64 +part_01818_of_04320.hdf5 9c5099cf5e0367ebb7eb437c9cbea2c8 +part_01819_of_04320.hdf5 ce24d5e971a0ceb60ed734158739871b +part_01820_of_04320.hdf5 43d47049b67286bf96055ae04338b903 +part_01821_of_04320.hdf5 bc115cbe455e6c95ec34a58b8f0af223 +part_01822_of_04320.hdf5 8fdebf31ffd6d386dd26c3a36a2c787a +part_01823_of_04320.hdf5 974b5920aa958ff7ba1abc11c7129623 +part_01824_of_04320.hdf5 2bc3a6b7939df252a57e383c25a10e46 +part_01825_of_04320.hdf5 3295d64a1685aaff448e5c02a6137e6f +part_01826_of_04320.hdf5 99f42f03ebb2683262074f7c6c85446a +part_01827_of_04320.hdf5 bb5f93a1094ecd2141f130ed6e0abc07 +part_01828_of_04320.hdf5 8ea3a789d1d6137668457e1d9c00dbe2 +part_01829_of_04320.hdf5 0970cdb1d8d254e371a8e0e249ceb468 +part_01830_of_04320.hdf5 353d7e737d8d93099a8b2b6df9b14f0e +part_01831_of_04320.hdf5 bbcd4f0ae80acb7386c2537d15df2e8f +part_01832_of_04320.hdf5 d97dcdc22e337ed4fb68b7e0635abd66 +part_01833_of_04320.hdf5 98918098ec6d3d33b0bcf745b8f9f01e +part_01834_of_04320.hdf5 a5b391d0c58fbda63710f83d36ce3a81 +part_01835_of_04320.hdf5 9fafc0504033b85d99e9ae25c3817176 +part_01836_of_04320.hdf5 c15c6310222207e3aa5c028ee59ddd2d +part_01837_of_04320.hdf5 0ebdaac0bb8304007d8b320c31a4379b +part_01838_of_04320.hdf5 20298de05b9387a93f56c28f496aa316 +part_01839_of_04320.hdf5 ea87a920dc0dc97ff86bf87ffba7ec5f +part_01840_of_04320.hdf5 c037f52ecb8af032828111a5ad4f1ca6 +part_01841_of_04320.hdf5 9490744fa669c00d861898ed3793790f +part_01842_of_04320.hdf5 39495ba97e551f9eb1ccbbcb8370c678 +part_01843_of_04320.hdf5 c91e251fb8aa5d4a707365534429b448 +part_01844_of_04320.hdf5 0cfdd6c46f70de70e127f0e09ad122a3 +part_01845_of_04320.hdf5 f8634d9c54e1f809d46286ebcbdc761a +part_01846_of_04320.hdf5 2c9cc80c39b8e0ac3b839410dc6280fc +part_01847_of_04320.hdf5 1269709471cf8b75ddc3d925de0f02d4 +part_01848_of_04320.hdf5 6f80d65e1b061765875c2bdd6daa2684 +part_01849_of_04320.hdf5 3479a1fe0a15763f83a64371c209133a +part_01850_of_04320.hdf5 9e531106def9dc7953144d611c8449f2 +part_01851_of_04320.hdf5 366b2c12cb2357f12e52fab643160610 +part_01852_of_04320.hdf5 3f796833bddfdc229fb5ee9ea94e2027 +part_01853_of_04320.hdf5 871ff313beb98c7fd94d2cb95f497ba3 +part_01854_of_04320.hdf5 1d25a5dc7003c12202f08a62409b31b5 +part_01855_of_04320.hdf5 81648a9fe165f1cea4cf4b61977dc925 +part_01856_of_04320.hdf5 622d21c4982858f9aa0d87031d6bd2b7 +part_01857_of_04320.hdf5 585646718419e3fa99e7299b2c70b1fd +part_01858_of_04320.hdf5 6dd691b4ef236d5da6946f4f214c8522 +part_01859_of_04320.hdf5 a9121855577c24a62a45a3f54dba063d +part_01860_of_04320.hdf5 46066380447beee717c52640d9935df3 +part_01861_of_04320.hdf5 b629e4fde7c216ffdb4d37858438e8fb +part_01862_of_04320.hdf5 7abf404e854e9dd1e5a4d46125c74d80 +part_01863_of_04320.hdf5 1c75d30b44eff64c2c4e7d358b1e43cb +part_01864_of_04320.hdf5 d47ba11b3cff0672dd6a786730fa8308 +part_01865_of_04320.hdf5 c12478b09309aad38b453aeaec0d333e +part_01866_of_04320.hdf5 e354cdb92802fe3a6fc2f4add2ee66bf +part_01867_of_04320.hdf5 59c9733651f434c1bdc6c67329739bd6 +part_01868_of_04320.hdf5 810bdd038e17680dfcb7997e4fb08ce1 +part_01869_of_04320.hdf5 8f41f247f69b5917d7a3ea0bc6aa4232 +part_01870_of_04320.hdf5 dfc222561ac7cc314e9d3c8329b088b9 +part_01871_of_04320.hdf5 1680ec952f4dda83167e8fc54694bf64 +part_01872_of_04320.hdf5 412f7b3df9e14e4d440e835f73f02687 +part_01873_of_04320.hdf5 8e3a2c32dc0f7de55a5eadab167fad9e +part_01874_of_04320.hdf5 4e1f56c36a55407b1d61f9b460a947aa +part_01875_of_04320.hdf5 5451eab9a5a7ef16a0d26250e21bb1c9 +part_01876_of_04320.hdf5 d85e2a520d0735def97e88c2c32093bc +part_01877_of_04320.hdf5 748161d584e0e9f0338bcd33bd76e092 +part_01878_of_04320.hdf5 24aab56379fc4285ce494ef25350b09c +part_01879_of_04320.hdf5 affa515b970a940e037b99820fb9f15c +part_01880_of_04320.hdf5 3268646399192aa48b9d049f3ddc4b3a +part_01881_of_04320.hdf5 3aaffb633ce8c55333a56271a0b5b6b3 +part_01882_of_04320.hdf5 1bb699aee2247f2607654be4d62326a0 +part_01883_of_04320.hdf5 f6363c37a959a97a124ec4d80c8c96ec +part_01884_of_04320.hdf5 a3c88b95e66f2afd78e587fe7687003a +part_01885_of_04320.hdf5 cacb49d63a0b61ccad8e2a9398f71d23 +part_01886_of_04320.hdf5 456a3f7b4604414e6db09e061211cd07 +part_01887_of_04320.hdf5 fa1e3a175f17e50fc945f34e6b9e0a4a +part_01888_of_04320.hdf5 1a23f9991ea736ebf15e802ef3a48b13 +part_01889_of_04320.hdf5 d9ce0d1b9a8e882b96d33731c5a7f95b +part_01890_of_04320.hdf5 beb6bd6fdb84c4f572543f8c70b4fcf5 +part_01891_of_04320.hdf5 bb4eacbd3a70a136231790a6124f6fbb +part_01892_of_04320.hdf5 5c82796167cce283c24112bab4a58984 +part_01893_of_04320.hdf5 1e7ef651337e01cf302940da53d2b86e +part_01894_of_04320.hdf5 a3e274261c3e133fbe4ba39456846da1 +part_01895_of_04320.hdf5 8aa4b29536c6d9f617621eb551755e17 +part_01896_of_04320.hdf5 1877afe37708e0e0ea4e937480bde304 +part_01897_of_04320.hdf5 194e286aa8745627b8653bdd3b8b13d7 +part_01898_of_04320.hdf5 51dfc212e59f3c836fa19e574e714ad3 +part_01899_of_04320.hdf5 6e630b9779d3eb41ce971d139cc00782 +part_01900_of_04320.hdf5 b6385131fd7052f08113ece773da33c3 +part_01901_of_04320.hdf5 808728e32b745df842b8eef13abb13b6 +part_01902_of_04320.hdf5 858ecb93f2e310524735197796a3af12 +part_01903_of_04320.hdf5 2bfaeaa6c2233591b859783987493ae2 +part_01904_of_04320.hdf5 c6314299f91e1fb046c7121497d1eabc +part_01905_of_04320.hdf5 7ce35858f2e91e27bacabed9d494ee8e +part_01906_of_04320.hdf5 f50786fc3c52969855176440c508c542 +part_01907_of_04320.hdf5 358359e0cf01425399e5ba5d66ba3c71 +part_01908_of_04320.hdf5 5609dfcac5e274b329569dbcbb3ff01d +part_01909_of_04320.hdf5 e4efaddcfbcfddb47a133dff7fb25adc +part_01910_of_04320.hdf5 5d778d1fb027ce0da209bf46e701ddaa +part_01911_of_04320.hdf5 23575ff0c11cf37962b4665d2ba6495a +part_01912_of_04320.hdf5 e7a9258846e4e50449eddb79a6df5a8e +part_01913_of_04320.hdf5 82495912a1e80e45ee68cc054c612add +part_01914_of_04320.hdf5 4befa7897a33124f2d2b4e8f79e1599b +part_01915_of_04320.hdf5 a07641a0f0ac1f6d30294b534273aac8 +part_01916_of_04320.hdf5 b8489ca58a1e9738dea561717bcdd94f +part_01917_of_04320.hdf5 5881a792e4217f7195ad961b01c8afc9 +part_01918_of_04320.hdf5 67ef62d21ac95376c2fcb0ff3e968696 +part_01919_of_04320.hdf5 48522d4d9ce69ca3cf5209b9e4f26694 +part_01920_of_04320.hdf5 121af1f390c4e146742b80d8f40ab4da +part_01921_of_04320.hdf5 823b363c2fe66552d811d5ce301ba13f +part_01922_of_04320.hdf5 8dfcf95379f1afe2ed7e3d5f0f815748 +part_01923_of_04320.hdf5 b2b4043b19a4bfe20786930b1a97711e +part_01924_of_04320.hdf5 508d38b7aafa2864d9545a349b29da28 +part_01925_of_04320.hdf5 73de7cb5a08447d5441ba5292ad589e0 +part_01926_of_04320.hdf5 cc95ea08b58be6827a44f937fe4850d3 +part_01927_of_04320.hdf5 6cc6e7af7eb1eaaf9735c15931ceb703 +part_01928_of_04320.hdf5 0d89c715ede73515a91c8319b0b76a2a +part_01929_of_04320.hdf5 e981bf2e004870a2af813c94940070f5 +part_01930_of_04320.hdf5 712a86ac28d20d64d5908782ef97c579 +part_01931_of_04320.hdf5 0a64ac22453657fe9f824f9334d27f25 +part_01932_of_04320.hdf5 25008aadbff088be8cd04aaa3d7d08bf +part_01933_of_04320.hdf5 3915591c7ca1558000be2047c306bce3 +part_01934_of_04320.hdf5 3e5f2739b2e520489211b289807647de +part_01935_of_04320.hdf5 4a757ade5e350f39079fe0a03331a261 +part_01936_of_04320.hdf5 6239e641b9d9f3341b9ed202b8bd776d +part_01937_of_04320.hdf5 73f577d9568e02c1400b0b0c27fb13b9 +part_01938_of_04320.hdf5 7e3e26d6a9f1f9d48545428019bfc408 +part_01939_of_04320.hdf5 be856e039449c9e521f62edea0853238 +part_01940_of_04320.hdf5 19cb3324e8e9b0b010b0b9aede2edb38 +part_01941_of_04320.hdf5 9440b1cfbc1a970d60f843a86e3fcd21 +part_01942_of_04320.hdf5 c04ea8d0fc5d4f281c62852cb3a5b79b +part_01943_of_04320.hdf5 ccf39fabc8eb656d3a212cec9ec8d564 +part_01944_of_04320.hdf5 4aa730c4d5e6ecd0e034f6d754fc443e +part_01945_of_04320.hdf5 9c306c9185d3694cecf94de5dfe51450 +part_01946_of_04320.hdf5 f5d2b6577764e89019dc776d214f94e3 +part_01947_of_04320.hdf5 139b210d11d8188ad7e61c065043dadb +part_01948_of_04320.hdf5 c0303dce60b428141bac3c68b215b3c0 +part_01949_of_04320.hdf5 b4a06003640e9fd2cc8bbf5816b7c6cb +part_01950_of_04320.hdf5 47dd08dfd15ecdb802d3d213e9e1ff42 +part_01951_of_04320.hdf5 c19fc7ecd64f9dea26bbde9f4aac949e +part_01952_of_04320.hdf5 e60487788bdbae66482d3a563b2a8c29 +part_01953_of_04320.hdf5 f36322e4d98e997b518337ea5f50d783 +part_01954_of_04320.hdf5 6e0f8e9627f248e19e65b82fd1dd4260 +part_01955_of_04320.hdf5 623763ca8f980f200385b350c9ea79ad +part_01956_of_04320.hdf5 860430e85f5bd55520aa24fad6ce00fa +part_01957_of_04320.hdf5 40beb0f45294e67968382d8a9503c970 +part_01958_of_04320.hdf5 d97cb776fcd4ca801624e40bbc2ac865 +part_01959_of_04320.hdf5 cc09728379e4cc3169d3c47a1e9a84aa +part_01960_of_04320.hdf5 d348e9ad3d8435504e829246aac6e85f +part_01961_of_04320.hdf5 9f67519716f084367f7d576c6a3a7925 +part_01962_of_04320.hdf5 edcbde998a35cc0f5ff48428d5f914bf +part_01963_of_04320.hdf5 c45abdae3ecd129e34ec7c9199e5c922 +part_01964_of_04320.hdf5 3254286c7ec5d3911e6e469e1c5fbe70 +part_01965_of_04320.hdf5 563bea1149014e803126677a5adb81b8 +part_01966_of_04320.hdf5 1cd8ec35502e63242c078f0d773bd663 +part_01967_of_04320.hdf5 fb18ab42666679a07a18c74f6a3af4af +part_01968_of_04320.hdf5 d08e79f6ea5bdfe2d0d7aedde2d49f3a +part_01969_of_04320.hdf5 9c88ff461f860172f8d74f397e35ccca +part_01970_of_04320.hdf5 978acc4e634caaffdf572ce9ca11faa6 +part_01971_of_04320.hdf5 712270fd90542c7400789c2bef49fb88 +part_01972_of_04320.hdf5 eeaa36b4a608219007af7977af69364e +part_01973_of_04320.hdf5 3325f4fbe7ff0c13119ec195bc245d1f +part_01974_of_04320.hdf5 a3f369bac6a651cffe487d3a43ebfbc8 +part_01975_of_04320.hdf5 722ea1b55092a2b9c0ccb513d7be4ca4 +part_01976_of_04320.hdf5 0a42be37dce26233131c97acd31b2bca +part_01977_of_04320.hdf5 c4a14a3e5d774378a57701e56712ef7d +part_01978_of_04320.hdf5 22c16c8702be86641d483c7c580e68c9 +part_01979_of_04320.hdf5 be1fb7e32063c93459447086724f5651 +part_01980_of_04320.hdf5 a94749307be5a75fd1eba377620d8188 +part_01981_of_04320.hdf5 bed861c1013e8f497fd47d92375e0f61 +part_01982_of_04320.hdf5 43f02cba5af795ce69f55f6b599d107e +part_01983_of_04320.hdf5 01679adfbb30c11a9dd874413241040c +part_01984_of_04320.hdf5 6aac1eab46bfb8b06f2ac09cc7551da5 +part_01985_of_04320.hdf5 0823c8b3b83b4cea018fbb20b2c72b95 +part_01986_of_04320.hdf5 a0b1cbd3a4dcac875222ee77714969ec +part_01987_of_04320.hdf5 a678227550487b8bca3f1757edd1427f +part_01988_of_04320.hdf5 94b499157f1fe6f8c98aa72c5e5da110 +part_01989_of_04320.hdf5 6def8970ef35851e1bf72373b2c17f40 +part_01990_of_04320.hdf5 a76fd748fd9b4c6cd5e669e43c25d3d5 +part_01991_of_04320.hdf5 07abdd76988e2c5617aa8694de82e412 +part_01992_of_04320.hdf5 9c979eb755ed90dc3a33af37aa30de36 +part_01993_of_04320.hdf5 5757d031305e0398b84f3c053e3ecdd7 +part_01994_of_04320.hdf5 6e67f3bfc4ce9142b5f052beb0b1812c +part_01995_of_04320.hdf5 9e9f1c6a183267783ccdede6ccb11e8e +part_01996_of_04320.hdf5 fcb34fd18132e57126f7bb8ec919dbcd +part_01997_of_04320.hdf5 0f5807e82f131c5e66f704873f5428db +part_01998_of_04320.hdf5 e272fd983614dfcfe1689a18b74296a8 +part_01999_of_04320.hdf5 de158890fa6e8c75be2ab0b73da198f8 +part_02000_of_04320.hdf5 76aac5d9d44e4c7737b13eaedd31b516 +part_02001_of_04320.hdf5 de101ffbb78e2cfdb08b2237fd01426d +part_02002_of_04320.hdf5 c9e0fcae090935af2bdcdab140751402 +part_02003_of_04320.hdf5 ff3ba8b9f66b88c89cd05d8c6863c138 +part_02004_of_04320.hdf5 9a042363b46ef5b841ce8e71b42dad69 +part_02005_of_04320.hdf5 444d238d19dfde63cf0cccc739154a1e +part_02006_of_04320.hdf5 0b4e9e69362aebd8c1983c8c35775154 +part_02007_of_04320.hdf5 7afc6ab0c0904bc650b4c4595b1bf07d +part_02008_of_04320.hdf5 d76d52045b406bd3ba0b655186c75892 +part_02009_of_04320.hdf5 04d7aa127c3ab20b12b1f2c3a02143e2 +part_02010_of_04320.hdf5 e9def7023d1d28aa5681f8f51e7a86ea +part_02011_of_04320.hdf5 853c393b593947f319af784e513c40ff +part_02012_of_04320.hdf5 e1e86ff5e5477cc3f4a7ea02d49d45fd +part_02013_of_04320.hdf5 ffc6499f02a6a229cb8c41819c8f8953 +part_02014_of_04320.hdf5 61b74dd0778162c6d40486a94fed2d86 +part_02015_of_04320.hdf5 3f48ab38d85d0cbc640b07325cf51460 +part_02016_of_04320.hdf5 04ba7783baddb2dc31cd97139b8ed5e2 +part_02017_of_04320.hdf5 a2deff15bdc39c8f1f18b909fd0fed6f +part_02018_of_04320.hdf5 73b8e6779c33055fe9eeb388b6b851fc +part_02019_of_04320.hdf5 9c4d34dfbbf5782bbee8852fdd6cbacd +part_02020_of_04320.hdf5 384e370371e7215281bf8ce4d84317c6 +part_02021_of_04320.hdf5 0b4bf220599440d632a033319fa5d7e6 +part_02022_of_04320.hdf5 99b09d9ebc3a65b7289a33458bfdccad +part_02023_of_04320.hdf5 98fa0dc5ebc02f10d42ae48c51ef58b6 +part_02024_of_04320.hdf5 afc269135538f4389bb4d66867d76324 +part_02025_of_04320.hdf5 c5b0501ca881f85f18810aa3a4197e89 +part_02026_of_04320.hdf5 a0a48bb0906c4110a2039c2b166e788a +part_02027_of_04320.hdf5 9c2438f9ae724d45432363a77978391b +part_02028_of_04320.hdf5 7a86031b589202129c14a080306e2383 +part_02029_of_04320.hdf5 c7ee65c4050bbac20af4fd163584b7f8 +part_02030_of_04320.hdf5 df1086ac132c3a3bb9dda3b54c3d0540 +part_02031_of_04320.hdf5 251a27cbcf02b340ac1c4766eae19dec +part_02032_of_04320.hdf5 9015d17158364767a0a38481b3a71e70 +part_02033_of_04320.hdf5 3e433193f8c7b4c671e38afb70c3fd01 +part_02034_of_04320.hdf5 32f13902f3a10ededb925ee6a301763e +part_02035_of_04320.hdf5 483e90da158247aebcf0a4cfc06560b0 +part_02036_of_04320.hdf5 a74b7071954df41ba8b0559398cb5cfd +part_02037_of_04320.hdf5 601356f7da8ff9a064aa998db33c805a +part_02038_of_04320.hdf5 1438e3ce793e54fa4deb1e6ccf936aa8 +part_02039_of_04320.hdf5 796c42de238609c9f10a956913cf4bfd +part_02040_of_04320.hdf5 064e140d4704441c35c883766656c5ac +part_02041_of_04320.hdf5 323c64b1eaa3df42da3f8af5e3b67245 +part_02042_of_04320.hdf5 6c31b7d3415a5497b0d58cc3fdd84445 +part_02043_of_04320.hdf5 88b11d44e12eb0dbb6bfd7636dde702f +part_02044_of_04320.hdf5 c8a70a6b6b8fcf98919de94edf8d841d +part_02045_of_04320.hdf5 3a665edbb6d9c67ddbdcdf695f286530 +part_02046_of_04320.hdf5 167d9e885b62f4edb2a9f478dc1fa4b6 +part_02047_of_04320.hdf5 80274ca1f9a763217ee08d4b2cbc2c71 +part_02048_of_04320.hdf5 5b26e544e752b647653979bf7dbdcf51 +part_02049_of_04320.hdf5 ef078912598daaa43a3c0aef84becb4d +part_02050_of_04320.hdf5 244dccfdc641e1dce4a9f1e62c1a7fc0 +part_02051_of_04320.hdf5 bba973b80a4caf817d1571d2871b65f7 +part_02052_of_04320.hdf5 5cfb33c5a5454adbc8017c965f796edc +part_02053_of_04320.hdf5 1e7565d3fd28a577e8e8ce2b8f52b280 +part_02054_of_04320.hdf5 d882ea04aaee89ff714f33b0dce2aa6c +part_02055_of_04320.hdf5 e7c91dcf5933bc1aa51a3d38c539fdae +part_02056_of_04320.hdf5 0433fc8eac3a32c917762a727263a208 +part_02057_of_04320.hdf5 34e945631a03fdf308945a974c24241a +part_02058_of_04320.hdf5 026cb6331d8e00dbd9c2daeb10be3440 +part_02059_of_04320.hdf5 c67ae07d9a968e42a745f0a4ad8d2174 +part_02060_of_04320.hdf5 8caab2cd003d088fbd1d6b763c7efb66 +part_02061_of_04320.hdf5 c1b5b335d47a7831d8c15c05afd278ae +part_02062_of_04320.hdf5 6a7b1811de76ea6f926276f95895fd45 +part_02063_of_04320.hdf5 6f73c6779a35a759b4934dc86a7e8413 +part_02064_of_04320.hdf5 23a6504536ce98e32cc38a86cad0970d +part_02065_of_04320.hdf5 e17f4dfb1f031f38099a44572f0afe7c +part_02066_of_04320.hdf5 7296c9b1d8814960a2ccde2a1c23846f +part_02067_of_04320.hdf5 6994bcdf0a88a8516789fd55fbe9a043 +part_02068_of_04320.hdf5 2369b66028471344307558e9dc069914 +part_02069_of_04320.hdf5 1055aa25239328ec70c795ab332b2e8e +part_02070_of_04320.hdf5 bb7b7a796018e7ec2b91f495258f62ad +part_02071_of_04320.hdf5 721c6b0ccefa7b3cd7c15b78ed66c659 +part_02072_of_04320.hdf5 ee3788d736f2699df502e92b896d318c +part_02073_of_04320.hdf5 cb43d0f38164fe3baf211467290da225 +part_02074_of_04320.hdf5 01b31badfe40ed7a3733c35a11aa6034 +part_02075_of_04320.hdf5 e6350a1378b085ed699235fb851acb3c +part_02076_of_04320.hdf5 6088dcbaf33de3f0b0651d18600cdf51 +part_02077_of_04320.hdf5 a75c9a9ecaff412ea1590779311cdc95 +part_02078_of_04320.hdf5 e4a957ce2167a9040ecf74379fd06224 +part_02079_of_04320.hdf5 37f0608578435151fd9637683cb236b7 +part_02080_of_04320.hdf5 96c72b827b8701a058f8d7722998f132 +part_02081_of_04320.hdf5 ee1221aae7c552a69117a9314c1a770d +part_02082_of_04320.hdf5 60ed91247463886968163759118ce5fc +part_02083_of_04320.hdf5 d6a8ddebddea21e894a6437201564d15 +part_02084_of_04320.hdf5 7ab3f041df895fa463c5449b1eb840d4 +part_02085_of_04320.hdf5 b69987e7bc42c7c57487e03b87f78c4d +part_02086_of_04320.hdf5 32d7572e9d48a308f9990474a9a996b6 +part_02087_of_04320.hdf5 92ba51dc8cde2d180781d343cb1b2182 +part_02088_of_04320.hdf5 a4140415be708b0df1dd5218bd1e021b +part_02089_of_04320.hdf5 375e7c4f7f58c6fd9200baa1c75a469a +part_02090_of_04320.hdf5 89c06db38f79328309bc4807c4217c4e +part_02091_of_04320.hdf5 0a181d42b618e7cc7e674d0bdd2d890d +part_02092_of_04320.hdf5 4c7d10464f4a68c20ccce2d577924560 +part_02093_of_04320.hdf5 0d7804219e12f8f11d417cda9ece7c9d +part_02094_of_04320.hdf5 709ae007d89d54386587dd2bfd045bc4 +part_02095_of_04320.hdf5 f091cbf1f0002697e4fd9c6efaba08be +part_02096_of_04320.hdf5 b5a3cb85b07fa4d438206291b8b30b77 +part_02097_of_04320.hdf5 c6057115cbf9b45afa21202f35548270 +part_02098_of_04320.hdf5 835ccd793239f96f25d8d05b9dd6cc7b +part_02099_of_04320.hdf5 77a3aab5bdcd159ba4ee0765341e7580 +part_02100_of_04320.hdf5 cc8785bc3f76f60dd16e0815a67bc604 +part_02101_of_04320.hdf5 8440e479cbc84341a0addbec7dbb1885 +part_02102_of_04320.hdf5 9593ebcee4a0abe553fd8e47bac7e663 +part_02103_of_04320.hdf5 7995be9bea0fe854e4c0265fd9f1b400 +part_02104_of_04320.hdf5 ab0fc2d0c745f907638412a389d489fd +part_02105_of_04320.hdf5 a988b23b69401db5e348387909c04318 +part_02106_of_04320.hdf5 ed5017f26b07e92b2c7aaf3bd132a727 +part_02107_of_04320.hdf5 3df8ca14013503cb9666dfa61b2da3aa +part_02108_of_04320.hdf5 a3f21c0fc5f5b60238e3307f4103f69c +part_02109_of_04320.hdf5 1141087f485bac0b1908f061b9464616 +part_02110_of_04320.hdf5 375e1f7de8df9768d71b9ef232289e09 +part_02111_of_04320.hdf5 8ac9827359aca6621f6d260dc20b4c15 +part_02112_of_04320.hdf5 1215bbfba267f2cf7da2e6e8519642b7 +part_02113_of_04320.hdf5 e518e818f543633441844077cae2540d +part_02114_of_04320.hdf5 e2a3d56bf8d5faa265d7e77ec38475b5 +part_02115_of_04320.hdf5 21c20fd696cdb5d3c30bd4a4fd31428a +part_02116_of_04320.hdf5 8abc6175c305d04fd92f5a4288e285ef +part_02117_of_04320.hdf5 100a145eecc03c369d60e6ce72112bef +part_02118_of_04320.hdf5 2c8e63987d62a725062d1f30b0d63777 +part_02119_of_04320.hdf5 1fd5e6fd0732bd1b063771b49a0aa1d6 +part_02120_of_04320.hdf5 faa2bef348cadde865ef9eadb996d885 +part_02121_of_04320.hdf5 a5cd85b3bff9fe04cd76eab727c1a750 +part_02122_of_04320.hdf5 adcc9e985e7a05fa4b9d0881d1b975c8 +part_02123_of_04320.hdf5 21886ea99ffdc86c6f7682fea5545eec +part_02124_of_04320.hdf5 d55b247e96cf6b7e491ee64cff267602 +part_02125_of_04320.hdf5 bcfd6e1df8b600559522450a493bac5c +part_02126_of_04320.hdf5 5dad146759a28f09d580ba52ed3de9b0 +part_02127_of_04320.hdf5 e63c637c83921f40e41de724918f0be9 +part_02128_of_04320.hdf5 b96e40f7c4330cfc492eb22a9ab9cbe9 +part_02129_of_04320.hdf5 74688e03353c81264cb3107770c45cf8 +part_02130_of_04320.hdf5 12f03016d5b0823623e175dab8a88e61 +part_02131_of_04320.hdf5 0c0c2bbbd378e20706ae7bb8b063a98d +part_02132_of_04320.hdf5 b9ac133d31634088b3a6ec3ede229390 +part_02133_of_04320.hdf5 499b46de044efd271ec88c857ba1d920 +part_02134_of_04320.hdf5 b5ca14e53e6c00bf64724d86bf0c1704 +part_02135_of_04320.hdf5 1cc85794d58e468fba94a601dd11eaef +part_02136_of_04320.hdf5 50d69f20e0e18efe1ed03667388564b4 +part_02137_of_04320.hdf5 e62885a68ecd7b4a57f7c0c7bffccc75 +part_02138_of_04320.hdf5 c1fa260362d12946d8a9f817a682051e +part_02139_of_04320.hdf5 b4c17385c221134f65177b7595ae901d +part_02140_of_04320.hdf5 bb8e24debc2a2f446f046d4ad7d87077 +part_02141_of_04320.hdf5 011c87f663b446be9a7ec9c868a0ae68 +part_02142_of_04320.hdf5 0e82ad99c548f014eac6691f3bf7d868 +part_02143_of_04320.hdf5 eb8366b6ae3fc5d7f85556c406efde53 +part_02144_of_04320.hdf5 804875df8f76383e2d7707741a56667b +part_02145_of_04320.hdf5 869a432eabed268f5ac2c48111406576 +part_02146_of_04320.hdf5 3cce59b91684dcec94fc017b50736d2d +part_02147_of_04320.hdf5 06c7dd8952ea3677cc0ee5eb5d0e8744 +part_02148_of_04320.hdf5 d76aa6801a0c9716bd134670528257b8 +part_02149_of_04320.hdf5 108361278136fe06c2a623b323955c77 +part_02150_of_04320.hdf5 24aec13f051c9d886efea1c3db963769 +part_02151_of_04320.hdf5 21e1bc557cf2b9e9292d1ab3a7795714 +part_02152_of_04320.hdf5 34ee62cfb903fd1d97dcfc1d86fc8af0 +part_02153_of_04320.hdf5 787751028ad904e181654be2ee358bc8 +part_02154_of_04320.hdf5 be6001c9882707b84dd77b4effd8efb2 +part_02155_of_04320.hdf5 fe61b68ab6c8d49d0448be14fef0a3a3 +part_02156_of_04320.hdf5 708495db6560c6b5be322c952152ecca +part_02157_of_04320.hdf5 f7184c5769f85923c9e547832016c2c5 +part_02158_of_04320.hdf5 8d2bac1e9465651e68ab124af76a1bc8 +part_02159_of_04320.hdf5 f0746de9bece83f2ea84ff7e5f08182e +part_02160_of_04320.hdf5 36e9a9fc6c6ef84461b499ac36141f0c +part_02161_of_04320.hdf5 1dae4847ab866f74852063622f58dc50 +part_02162_of_04320.hdf5 fb21fb59b775d3d76b5ff9055ff75685 +part_02163_of_04320.hdf5 3a3e681f8291c32f2aa1483f282d6a3d +part_02164_of_04320.hdf5 daf875d07190c334727f4a2c24eca4cd +part_02165_of_04320.hdf5 28f720ee00bc1f632cd8edb27f7c1dfc +part_02166_of_04320.hdf5 5dd949fe6fa83e43e946287793f91e0e +part_02167_of_04320.hdf5 5ff87a63e17d4e8c8a1e324c9a2d31e6 +part_02168_of_04320.hdf5 38c5823f90db070ae97422cbcb013e5b +part_02169_of_04320.hdf5 3424fea77f91fbb8f3b3157fe56e840c +part_02170_of_04320.hdf5 4068717499156c602f8a61d7162f14f2 +part_02171_of_04320.hdf5 25ff168349d030766bb7ce3c0ec8ba84 +part_02172_of_04320.hdf5 4293b537fe08f137484f36b40117fa54 +part_02173_of_04320.hdf5 fcb9a201b9e898c9b72f10f44948a4a2 +part_02174_of_04320.hdf5 b143944cfe4ba58a438c75adae358189 +part_02175_of_04320.hdf5 a65aec2cdef241264aff104247c4dd2c +part_02176_of_04320.hdf5 338bcb78d335fe290913f22120edf66b +part_02177_of_04320.hdf5 c2a699a00ca22c5ecdaa2aa55f0de779 +part_02178_of_04320.hdf5 176a5919bd9b8fdc6f9113bd00102300 +part_02179_of_04320.hdf5 30d71e1e5221fb93b30670467e7e053b +part_02180_of_04320.hdf5 496f89c120751cb71535eda202c9c4b9 +part_02181_of_04320.hdf5 e9de0a21a72f129ffc51294ee88eac67 +part_02182_of_04320.hdf5 2d320ae7df0dca4988c40a854bf74bfd +part_02183_of_04320.hdf5 c823e861571b13fc2fa2cfcb8c6eb243 +part_02184_of_04320.hdf5 8b96a2ce61e87fd816500da9020f5c34 +part_02185_of_04320.hdf5 39ec93c0f9cf8ee7e82ef8e5880aa93c +part_02186_of_04320.hdf5 80d09c430751c44c557c3f8d3359503c +part_02187_of_04320.hdf5 0431f009fb69db4aff032746336430d9 +part_02188_of_04320.hdf5 e763208019dfb831336bce5a51db6999 +part_02189_of_04320.hdf5 db7163c4fb682ed1903021c9c79c6bd0 +part_02190_of_04320.hdf5 cc5e9f72ee880ef782532d9e961b5418 +part_02191_of_04320.hdf5 bacbd9a67baf1b2c37439a2c51be8e64 +part_02192_of_04320.hdf5 3882a5c4177b56116984e5fe8b0b34a3 +part_02193_of_04320.hdf5 690970131020799bc26dffc9b5ec65d6 +part_02194_of_04320.hdf5 271e87e06819534ca1f2550cd0d3f5da +part_02195_of_04320.hdf5 e1a33bd8ae7acad3bcce4ad6db365288 +part_02196_of_04320.hdf5 452562ed7fd7371b7bc96bb22f4f31a3 +part_02197_of_04320.hdf5 2e4259b76ef2b4c62ce481b4d38fc145 +part_02198_of_04320.hdf5 39e969f5873317b88978921f1eab419d +part_02199_of_04320.hdf5 22776fa618307292b7e79c61006db2e9 +part_02200_of_04320.hdf5 30ca3644f26c99710cb9dcf33c2b3638 +part_02201_of_04320.hdf5 fdb25b0fd198a041a63d1fcd4e3c1e12 +part_02202_of_04320.hdf5 eb3ba00936caa0d3d6b61af48d67ebdf +part_02203_of_04320.hdf5 c6d679c1baa3eac867a47c4f99835b8f +part_02204_of_04320.hdf5 55d4c2db2652a93a8bf814b0501ac00e +part_02205_of_04320.hdf5 3264d75eab8293a43d9a5e748885271b +part_02206_of_04320.hdf5 1b9ca0e81b442c9116ecc965471ca52a +part_02207_of_04320.hdf5 57b71d8988b8536c1655b6fafbe3e08a +part_02208_of_04320.hdf5 79f70d1d633c631dacbaef6ae26bef3a +part_02209_of_04320.hdf5 867a726597229c3cf0a76d12c6462adf +part_02210_of_04320.hdf5 2b2890ae005b11792fe2c6236d3577f6 +part_02211_of_04320.hdf5 e506c00c3c45f49e7bcfaec79d6fd99c +part_02212_of_04320.hdf5 f4d85b769c691714f8bb699925054394 +part_02213_of_04320.hdf5 5d48d98925c350dc9d96f7f060110214 +part_02214_of_04320.hdf5 9a2220d5b5ab3e44afb58e97efda8487 +part_02215_of_04320.hdf5 175a73d126311ac906a18f9e9a3252e8 +part_02216_of_04320.hdf5 378c378874cadb9d44de7b9e6c1ca739 +part_02217_of_04320.hdf5 48340e7e636827e633a827b3a7346321 +part_02218_of_04320.hdf5 c8a02832bdfce8c78167188482f3bbaa +part_02219_of_04320.hdf5 68f14a0dd263c80c9f3274e92ea4e38d +part_02220_of_04320.hdf5 d7532d363d0fbc6169e61b895811bd90 +part_02221_of_04320.hdf5 9fd7866bac01905d9e8e955d21656a35 +part_02222_of_04320.hdf5 850d08918f35c83e9faa38854f526e15 +part_02223_of_04320.hdf5 2ca4d319e80712704ad06c6878b8ef97 +part_02224_of_04320.hdf5 7fc30b3d6cccf2c363b473d8b4d969bf +part_02225_of_04320.hdf5 c35a852aff21442402e4dedbbbf33e31 +part_02226_of_04320.hdf5 5aa626f0c09dbd71148c17b5fb96c787 +part_02227_of_04320.hdf5 d4acec0b1194d9406b1a74a7153de982 +part_02228_of_04320.hdf5 9477ffdc8e5bf8a7c4b8c702cb9fd60f +part_02229_of_04320.hdf5 8790cdda1084180d694fe5358da1300d +part_02230_of_04320.hdf5 75e0910f31dfc8c169dc68a7efdbecee +part_02231_of_04320.hdf5 a26e7a38a7472b7ebd180c1a2f824cf8 +part_02232_of_04320.hdf5 40fc0029860a00f92c641487006be5c1 +part_02233_of_04320.hdf5 67d9d759838cd0d89b25a08afe71feb5 +part_02234_of_04320.hdf5 d7403bfd037200dd2973f0184fc5c381 +part_02235_of_04320.hdf5 9eb20842ceca246a11b209e031d90d19 +part_02236_of_04320.hdf5 2d181dee048ef5496a1390659af02657 +part_02237_of_04320.hdf5 8ac589fbff3c085294a60c4aa5ceac00 +part_02238_of_04320.hdf5 0d01655692c6092723e2e6f4666f8f54 +part_02239_of_04320.hdf5 c0bf1d6fac0712dcefb1907720548409 +part_02240_of_04320.hdf5 a3cdbafdec0e885b73db0866bc0a2093 +part_02241_of_04320.hdf5 be22058570d43405f921e9398dd8d1ec +part_02242_of_04320.hdf5 f34e3d7514a47f2b68e343871135fc78 +part_02243_of_04320.hdf5 06c3fe2041bdfc21daab59cf8cdc097b +part_02244_of_04320.hdf5 7cecd3c0360255abfc126eedf70294d7 +part_02245_of_04320.hdf5 e5806b0ce8c87635f543cae3a4a7a141 +part_02246_of_04320.hdf5 2912dbe4cda02d93a76d68c16bfcf954 +part_02247_of_04320.hdf5 db0bc5719656d19ac7833f1e10fd8409 +part_02248_of_04320.hdf5 79fba5575f2fe5c30b88952289c16179 +part_02249_of_04320.hdf5 c1f7d72f4022bbca6b562b57c3453654 +part_02250_of_04320.hdf5 0399592a73e98f512f642d5dc7a909f4 +part_02251_of_04320.hdf5 792d2ad699dc97439eb7c1b6a4e1bacb +part_02252_of_04320.hdf5 df2e999ea5f59ab39c4fe733f21c5244 +part_02253_of_04320.hdf5 d0da07be0eef6c36adaf6c2c7d916b9c +part_02254_of_04320.hdf5 798b99c453a065d7762412e8d4acb894 +part_02255_of_04320.hdf5 db128c183869e0bb93338ac7918765aa +part_02256_of_04320.hdf5 10e5f2821a8c6472e52835a7bd2fd007 +part_02257_of_04320.hdf5 c9c79c9baaf4a3a8c273760ae44c84ab +part_02258_of_04320.hdf5 db404c91085508033b75ca9593f5da34 +part_02259_of_04320.hdf5 33f41879d82596b9367152016327bfdf +part_02260_of_04320.hdf5 9ddf2ce02bd6e53237c4a8891238af75 +part_02261_of_04320.hdf5 13073fd2a3870831e116b85d39890b76 +part_02262_of_04320.hdf5 0528c325bc0e338761c71ed21739e691 +part_02263_of_04320.hdf5 717dab4e7e6016a1a3bbbf7e018de44c +part_02264_of_04320.hdf5 ebf5fa80d48fdfca07c9bf70d9703b05 +part_02265_of_04320.hdf5 c68ee6749d341bee92505e4049958abf +part_02266_of_04320.hdf5 763afc85390bc9c74575a1c4d95da5e1 +part_02267_of_04320.hdf5 ca3b20687191b184a683f34d9a5da5be +part_02268_of_04320.hdf5 91e6d5550103d3f160f4dfa81ae3e34f +part_02269_of_04320.hdf5 f39c09b8827748873ae2aa6e64606165 +part_02270_of_04320.hdf5 bbc4fa01090eced3dc0c16825afb1b7a +part_02271_of_04320.hdf5 ad8a81d7b86dcdf2a95f1691108c1e39 +part_02272_of_04320.hdf5 6ea1bc6d07d860690adc794cdb0d58b7 +part_02273_of_04320.hdf5 32dbda0982cf9bec31c51c8483a234a0 +part_02274_of_04320.hdf5 c3cbfbc3342b1e027528b0c2eb9c404b +part_02275_of_04320.hdf5 ba781768f0edc6b7c74f4f6b06c48c14 +part_02276_of_04320.hdf5 b07c4939dc66e03b2d1b584d2110b483 +part_02277_of_04320.hdf5 76bec46ed030c226196f1fd4bc0af2c0 +part_02278_of_04320.hdf5 0cc180a31e4f37f77461cecf8dc52831 +part_02279_of_04320.hdf5 d58d89b32fca98a7d9fc03c1226da670 +part_02280_of_04320.hdf5 aafac64764003808b5f8469b072e04e4 +part_02281_of_04320.hdf5 0909134df98cdd1c5b877c2317049310 +part_02282_of_04320.hdf5 2d56495b5aa09ae33725c284acf2da8c +part_02283_of_04320.hdf5 784b189f320c3ca29b8b97dd78102fff +part_02284_of_04320.hdf5 258fd4f149e7682b1e73919a76cd1766 +part_02285_of_04320.hdf5 fb8c0877ddbf16bb9c99a71ca64c0aed +part_02286_of_04320.hdf5 507b3fe9d3b67964709aa21c30871ec4 +part_02287_of_04320.hdf5 fbecf7f30bb673df05a21bd1b865de4b +part_02288_of_04320.hdf5 f1f93ad5933a32d37f35dec1273cea4e +part_02289_of_04320.hdf5 c86763e7b510c5f7beba53e965f6bf64 +part_02290_of_04320.hdf5 44ded8e43b4660e4e5cf8cccd2ca59cd +part_02291_of_04320.hdf5 1953ec29241b2dc7154f1ff0c538a4d1 +part_02292_of_04320.hdf5 e477fd92b3cd557b863953e39265fe09 +part_02293_of_04320.hdf5 a2c4e953c9db0192c9cbd6b3406d5bca +part_02294_of_04320.hdf5 6ae9fddc80c7990d7f83b7a95a27bb47 +part_02295_of_04320.hdf5 a147b71198876dcb60ab7a0eff69f436 +part_02296_of_04320.hdf5 2e686f8b2077673b62d48bc3426eb409 +part_02297_of_04320.hdf5 7e1dee09232b2387b2d3146b646720b3 +part_02298_of_04320.hdf5 7e84f1a3ecb3e508dcff53cb501c7e42 +part_02299_of_04320.hdf5 880b9f6a9b62fe7ea4d4a2408111f9e3 +part_02300_of_04320.hdf5 369d2fa0d3bd767cdaf59bcc0d3ea1a3 +part_02301_of_04320.hdf5 84678dc0f593630d0356ab76bfb5c505 +part_02302_of_04320.hdf5 25c1bfcbc432dd1c0fe9a00d62b88c0e +part_02303_of_04320.hdf5 478fe28e514262b78b9798d8c6a7c42e +part_02304_of_04320.hdf5 b23cbc3d526cbbce1d0451d8357f4953 +part_02305_of_04320.hdf5 8810d8683f8d325aed01bc2cb2cca90b +part_02306_of_04320.hdf5 066c67aff9c5f6d918e580070288d5c5 +part_02307_of_04320.hdf5 d66776103a8a41a9b5b1efed1efae49d +part_02308_of_04320.hdf5 e071c5b73a3476b904469d8dd1c3da1d +part_02309_of_04320.hdf5 48ebca04d6f59d71e69f2c51bc713b1f +part_02310_of_04320.hdf5 8d3b0d86d9f832337e24126d7e60d8aa +part_02311_of_04320.hdf5 cd8643b1319de76903cda24f3cb472c0 +part_02312_of_04320.hdf5 e0d5c0069db1d166e19685a58801a9cb +part_02313_of_04320.hdf5 2033c5b9cc021512b33fd3b24fb1f442 +part_02314_of_04320.hdf5 70a916f52eafca43269d012cb5bafbc8 +part_02315_of_04320.hdf5 e67ef839391d5f553ec68a929b82d28d +part_02316_of_04320.hdf5 b83af2b141f9e3acaac862df0102274e +part_02317_of_04320.hdf5 12465ef6cf1f81adbbae1145ae5ef0b3 +part_02318_of_04320.hdf5 d43a809d5c72fd4802ce87ddc166ba2b +part_02319_of_04320.hdf5 b505a1f77db00d3ee90a9286f3171672 +part_02320_of_04320.hdf5 94f5c890aba94fce1d78bc38e38b6ca7 +part_02321_of_04320.hdf5 dacb14807db150d614ed11cbe18902bc +part_02322_of_04320.hdf5 8bf876cdeff05dac7e601257b178f4d6 +part_02323_of_04320.hdf5 17eaea903ac089d04a231f22b8b3f086 +part_02324_of_04320.hdf5 d73fbb9e97447fec22aa8d3851baeeee +part_02325_of_04320.hdf5 49772fe37a9790e91afbffa93ec0a499 +part_02326_of_04320.hdf5 179c126cb8049efb1c143c20778d40b8 +part_02327_of_04320.hdf5 8a7608e83ebb2a1eeb15296b10dc5782 +part_02328_of_04320.hdf5 2098a32c5be7ecb070aadf1cd900957f +part_02329_of_04320.hdf5 54b78e1b142045c487b6394c62f292a7 +part_02330_of_04320.hdf5 afacd1c544782d9e3c1c1256afac4e2e +part_02331_of_04320.hdf5 d063b481cd2f297d669bc93a78ab664a +part_02332_of_04320.hdf5 c0e0b78a068d9fba74e29178236c47b6 +part_02333_of_04320.hdf5 e231494243c9f333e30fbebb60b1ccd0 +part_02334_of_04320.hdf5 0e4b6874f0b33bd0b4508bced8560724 +part_02335_of_04320.hdf5 ee445e300bfbb26a0af85ff357535b7c +part_02336_of_04320.hdf5 3dfa81e7d60a176210123d97bc0c9216 +part_02337_of_04320.hdf5 622412ea5dd2687ac5bc6f1536734905 +part_02338_of_04320.hdf5 f3b36977c465c60849db89e95a5af8c2 +part_02339_of_04320.hdf5 39f55582bf674c76abe92d69eb0ca282 +part_02340_of_04320.hdf5 34949991667f94a2236629896d52cd35 +part_02341_of_04320.hdf5 c13cb2d3cd4f52ed5f98fbee5f5c96b8 +part_02342_of_04320.hdf5 63526d948cd9268162d5061c6180689f +part_02343_of_04320.hdf5 f175ca83f02342d152849e29e2482f78 +part_02344_of_04320.hdf5 23fbd280c52010a001510623389501ea +part_02345_of_04320.hdf5 9cab064f94c1319e6d551797a8189616 +part_02346_of_04320.hdf5 bd6530aec2b4d990eb9f8e241553cf7e +part_02347_of_04320.hdf5 ea6ebc2fdb6ccff43053c076a23756a0 +part_02348_of_04320.hdf5 eb4ae8d94511205edebbe252def66cf4 +part_02349_of_04320.hdf5 8b95ab227700d592715d537bf87e1896 +part_02350_of_04320.hdf5 712add3d4eb873da959dc4b2e579db47 +part_02351_of_04320.hdf5 73c054f8ea657694066248c27fb9ec62 +part_02352_of_04320.hdf5 eac586ea2a6268c58b16425072589d64 +part_02353_of_04320.hdf5 e0cf8e2e48a3aca9de36f272ddc895af +part_02354_of_04320.hdf5 f5e1c095691e84761f3a3d6fdb848e48 +part_02355_of_04320.hdf5 5dafd39914f0df02f3bf2be30f0352c8 +part_02356_of_04320.hdf5 e894f4f045c9ba5ad4ee967f23ed4d88 +part_02357_of_04320.hdf5 26fbadef3fbdc78f3c4744fed7a3255b +part_02358_of_04320.hdf5 d1a2109e8bcaf4ed471205b2f53c9f53 +part_02359_of_04320.hdf5 5c142d0aedf20ecd50591282d75dcd91 +part_02360_of_04320.hdf5 0a19d83160134505cf52763a7cbc1ac3 +part_02361_of_04320.hdf5 0e1d55c6088189931d33c43e975c62b2 +part_02362_of_04320.hdf5 af48d610ce188ca2a1093f85135fe318 +part_02363_of_04320.hdf5 637d3f253b23806fcd2467ec37cb5ca7 +part_02364_of_04320.hdf5 aac684dc8a95645bde7d08143b53178c +part_02365_of_04320.hdf5 b9c4276b17154374b655fa53c764708e +part_02366_of_04320.hdf5 845764797ed1b2f14cf3227645ad3f21 +part_02367_of_04320.hdf5 c03d1e310a9a21eb28ddfcb9c4b0bb81 +part_02368_of_04320.hdf5 6ae767243579076225dff4fc2d2d56da +part_02369_of_04320.hdf5 3c343a2a46a92d077d21efea0fa99a44 +part_02370_of_04320.hdf5 3d0c590495b3d7d39544c2fe82bdf850 +part_02371_of_04320.hdf5 763807706e0cb44c5489f43caed0bd7c +part_02372_of_04320.hdf5 db0b1b28594e3a481b459ec03df26e29 +part_02373_of_04320.hdf5 15979ce517dbcbe686e84cfed7d212b3 +part_02374_of_04320.hdf5 14329c3d3ee7209d1de51f1fa49db68c +part_02375_of_04320.hdf5 226c79d97aa85c330671ae1fb9ec2355 +part_02376_of_04320.hdf5 d21f0647a8c15c7da5bb9a4782b8b333 +part_02377_of_04320.hdf5 9123d40440eea3a582ad9d6acc173acb +part_02378_of_04320.hdf5 603bc50360f6e906ebd5bc46745fb6f9 +part_02379_of_04320.hdf5 639f4142bff2236af7a958f0fafe6eda +part_02380_of_04320.hdf5 2ef68f66efe6d6c070bcb863079adf12 +part_02381_of_04320.hdf5 d89ef01d42fbf0ded79fa0fc960eafa6 +part_02382_of_04320.hdf5 c633a2320c81a7d7c7c4c1973c97ee24 +part_02383_of_04320.hdf5 f6f2bdc29f24a42aa51c5efc11abcc86 +part_02384_of_04320.hdf5 dbb874a2ed93af8666e0fc87f4724869 +part_02385_of_04320.hdf5 8666aba075e87d01802206f804478204 +part_02386_of_04320.hdf5 3d88866a2cc4f8e20638644e6d6402c5 +part_02387_of_04320.hdf5 fe329edeadc5d363bb671d0d628175c0 +part_02388_of_04320.hdf5 013780866758161419f92dec6df34720 +part_02389_of_04320.hdf5 82e7709f7e66180bc37c81bce5aea4f4 +part_02390_of_04320.hdf5 9c33f52b24ed4f139b40e579a2883b1e +part_02391_of_04320.hdf5 712eb79aa5e617ff735917554b093be0 +part_02392_of_04320.hdf5 101a338c432ad5ef31d1e1787e7cdefc +part_02393_of_04320.hdf5 8fd6d0d8013ace43022d8a6a5f3fd473 +part_02394_of_04320.hdf5 311a44aa32b5617c4b3d1c2cae47bd7a +part_02395_of_04320.hdf5 0497015e87cd40db318d51497d8a261f +part_02396_of_04320.hdf5 c8a46205a80bb8e94561846e5bda4a15 +part_02397_of_04320.hdf5 23698f9052defe1b8e1dd5d398702918 +part_02398_of_04320.hdf5 d68c6d2d9bd17719d5cbd2a4d77ee7ed +part_02399_of_04320.hdf5 29e92065ebdb0a5a61944cbc47c18932 +part_02400_of_04320.hdf5 bb2d31ba3d05186de74130dbaa5d6557 +part_02401_of_04320.hdf5 874b4a2b07993c3446688ca1042d2964 +part_02402_of_04320.hdf5 867d50ce2ca6adc80facec76e181c2f8 +part_02403_of_04320.hdf5 6176242b8761d119f0a0ff7b24a28614 +part_02404_of_04320.hdf5 99245dc9d5754e1be9e581857813568c +part_02405_of_04320.hdf5 9476966a597c25c8c410f5b60e2a9c2f +part_02406_of_04320.hdf5 f5e7b2f862d7781d48209ac143f33609 +part_02407_of_04320.hdf5 286622e9611a98abfc13c61460993048 +part_02408_of_04320.hdf5 3ff753b3142a56c12eca429ccd1c6878 +part_02409_of_04320.hdf5 71f5ae73517e5fac3580c33ce890076c +part_02410_of_04320.hdf5 046550bccb41d7ff94a4417036656247 +part_02411_of_04320.hdf5 909a7fa08bfac550170aea15e8f9c2c3 +part_02412_of_04320.hdf5 fbb57944ad8c52dc329d0e4dbadf8ba9 +part_02413_of_04320.hdf5 79865df7bfca3abff64cc39727beb257 +part_02414_of_04320.hdf5 fdcbc10b1102edebc9b696cefb4b8922 +part_02415_of_04320.hdf5 3e47a00c16fad5501b57c8013c5a5dd8 +part_02416_of_04320.hdf5 c6d895c5d2de333424a7dba52098f27d +part_02417_of_04320.hdf5 21851d63342d767b833174ec16d8d4f4 +part_02418_of_04320.hdf5 dcfcb565ffa2b894236e2a3c44e3321a +part_02419_of_04320.hdf5 01650a0007daa651556dbbf948caddf4 +part_02420_of_04320.hdf5 bc7f60844c7cfb99807d73d17577080f +part_02421_of_04320.hdf5 3d360399cf31dc92ab43aa4649ffea20 +part_02422_of_04320.hdf5 794d789d4a0b32e2372a69994782e3b7 +part_02423_of_04320.hdf5 921fb97e9e618d8bfb4c8e2d1400bdb1 +part_02424_of_04320.hdf5 cfef7da4a125dc8c8a23d197a55d6746 +part_02425_of_04320.hdf5 54c30f9140f6f16ab4f20ad1f019dc3a +part_02426_of_04320.hdf5 63586e22851e996a28eee4753a8980b2 +part_02427_of_04320.hdf5 9e4188cdabf9d6a6c11bc04df658de37 +part_02428_of_04320.hdf5 548badfa1b65232c5195dfde62201ae1 +part_02429_of_04320.hdf5 761a0653cf9d4585d1c20517a32e7f27 +part_02430_of_04320.hdf5 9d488cf9b05c0b26677d8063c3a7e02d +part_02431_of_04320.hdf5 c50fbe061826c63e9d10ca695c955148 +part_02432_of_04320.hdf5 6df9e085c95fe0a9545df3fd020fa57a +part_02433_of_04320.hdf5 38b7ac3c2f2c4e45d78ec5b87bb9b99e +part_02434_of_04320.hdf5 f398872e4df3fde3c1eadd14b0ffd031 +part_02435_of_04320.hdf5 d493d01708dd24f6242a0a1453a4129b +part_02436_of_04320.hdf5 1e95a3ca1d66f785063fb1d5833043ac +part_02437_of_04320.hdf5 5ed5d9e10162803489399e3d331fc8eb +part_02438_of_04320.hdf5 f5b927b5901bacbe27d0335599d9eae0 +part_02439_of_04320.hdf5 1a97c5861e19beb4e75b753abf9101e2 +part_02440_of_04320.hdf5 144141ae3b07a99327b686aae3e275b2 +part_02441_of_04320.hdf5 5dcad80891b599ac5d1980aade5a5b1e +part_02442_of_04320.hdf5 f9db640d45d3a9c0a23c0d65e2b7c6ca +part_02443_of_04320.hdf5 73ee3364c06a6c8369bc4aa9986c429c +part_02444_of_04320.hdf5 8b93bc07735fbfd447554f16e730fffd +part_02445_of_04320.hdf5 7ad9ef8f55ee4ca2a6a12b84cc777b63 +part_02446_of_04320.hdf5 166ec052a459316baee36fefa28537b4 +part_02447_of_04320.hdf5 6c86af2f22a980bd61a0edf18ed6ec66 +part_02448_of_04320.hdf5 248ec8475949014bb926eff1425e7079 +part_02449_of_04320.hdf5 7607a08710f4544400e86a77d0e32fc9 +part_02450_of_04320.hdf5 2d003e66b0117c0de80d2344c3b8edb9 +part_02451_of_04320.hdf5 14b5bdac7d2f40277aca180649ac8f17 +part_02452_of_04320.hdf5 60335a1a8a81ffa8c345a7b8d6156a7b +part_02453_of_04320.hdf5 07c51863414494dafb14f2849fae9b1a +part_02454_of_04320.hdf5 33e8d0341dd281d0aeeb42a3db3d2c11 +part_02455_of_04320.hdf5 08882fc47cd4e2403b6712e98b24d29c +part_02456_of_04320.hdf5 652d8c0b37e2fda38671872426696726 +part_02457_of_04320.hdf5 ff9e8078ca38d4f8205615b7ab0ff0f5 +part_02458_of_04320.hdf5 46bf9590a8bc74169b224a3f356ab3c2 +part_02459_of_04320.hdf5 3c795022a9d3312b661cb8f28459b0cd +part_02460_of_04320.hdf5 5766a6249267ec82a4ddf126e386cf13 +part_02461_of_04320.hdf5 391d50dfb4adbae86e8180954ffe19a7 +part_02462_of_04320.hdf5 8a3d7bc90322f5306fad79c11d41f83e +part_02463_of_04320.hdf5 0db110f1c29a5f7a4c2395c4b034093d +part_02464_of_04320.hdf5 b950961ae53389f0b47ef73be30b1fd1 +part_02465_of_04320.hdf5 4c9aee8bd13e9e43005a7e09dac250ad +part_02466_of_04320.hdf5 be75484836d37278638ab9cc8ef44b93 +part_02467_of_04320.hdf5 1f292c31419b1a1a78cabdbe9eadaa05 +part_02468_of_04320.hdf5 5d3aca7784f9a345156c77e6a5fc20ba +part_02469_of_04320.hdf5 5afcd3a4b5a6f4d5fd80ccccccf68c45 +part_02470_of_04320.hdf5 572edf179687ed207e20edf1a102b1f8 +part_02471_of_04320.hdf5 e1127c3d149e83aa4b9ce35c6a4eaee8 +part_02472_of_04320.hdf5 fabc382ac01dbf5095f48af1a09bce66 +part_02473_of_04320.hdf5 b637c725158b37a44d6b9516c17b841c +part_02474_of_04320.hdf5 f3af7cc4ef2fbcad381a6f08cf1b3507 +part_02475_of_04320.hdf5 18e638e89e83587af9537af00dc63670 +part_02476_of_04320.hdf5 a5878ca49db864bc3f4f1a7b2531d79a +part_02477_of_04320.hdf5 0ffbc9396f474e7debff34b5dc1d7444 +part_02478_of_04320.hdf5 8f313cc9ba97acae45d34d95c27a7fe4 +part_02479_of_04320.hdf5 1fb19711fe5dbe78a75ef49b78cb8534 +part_02480_of_04320.hdf5 5eb11f43a3a6db5d9ed637159af23ca1 +part_02481_of_04320.hdf5 07be5f3afdd9742e4b0c9542fa9853fc +part_02482_of_04320.hdf5 03d5cb3383d43deaef8dcb7aa7bd41e1 +part_02483_of_04320.hdf5 32fdd0032b2485d1f8a7724e8722eff2 +part_02484_of_04320.hdf5 911406cb2e391d1ad44b9521a1fd5228 +part_02485_of_04320.hdf5 8f329fa432d236dd08e84f5d591c14c3 +part_02486_of_04320.hdf5 eec1a3b0390c7854bdba1e50cd2b2053 +part_02487_of_04320.hdf5 8aff820948d477b7d2ae3ac39e8538a6 +part_02488_of_04320.hdf5 f1072ed65516a556c817dfd6b9be9234 +part_02489_of_04320.hdf5 4034153be0dccca5a71a7b54ec2360d1 +part_02490_of_04320.hdf5 40c108540bc5fa5d73918abec9df48d3 +part_02491_of_04320.hdf5 d3f29be14dced188a286841e25bc9a3f +part_02492_of_04320.hdf5 9fe462ca80226ce89336b76147e472c2 +part_02493_of_04320.hdf5 263954ede01e15eb295682b5d90b5f4d +part_02494_of_04320.hdf5 cb80a0542e7eb50481db3783d9e46ccd +part_02495_of_04320.hdf5 79a652e3af9e3943e6888e22fdc10d26 +part_02496_of_04320.hdf5 e69d30023da52d517ac15a30717f3953 +part_02497_of_04320.hdf5 925762b402fbd4f68ae3ff0fc69fcb35 +part_02498_of_04320.hdf5 3a8d1df7a468b152793ecfaa20de877f +part_02499_of_04320.hdf5 9cb85dfb71c93db34453765ec2e3f15a +part_02500_of_04320.hdf5 4dc2e9abb1e0bcb096f9ad9663732db1 +part_02501_of_04320.hdf5 4e6c7e5e395be3399286440b301f2103 +part_02502_of_04320.hdf5 2f1cf47c08919d2540c41617b99a1367 +part_02503_of_04320.hdf5 545cbb12745d12cdd90c2e04bfc42e23 +part_02504_of_04320.hdf5 0ad5148af6368601904181328b0bd3b8 +part_02505_of_04320.hdf5 4d2583d528858ad4bf14828b803fd8d9 +part_02506_of_04320.hdf5 9df36064bfe938767ab1855b5867839b +part_02507_of_04320.hdf5 976ac7f7fba0b8038950d176d4bb598f +part_02508_of_04320.hdf5 9be731c042a0463d8e2abb75c9257604 +part_02509_of_04320.hdf5 3711d5049dbcc3fd76c460265fbf1fcb +part_02510_of_04320.hdf5 4e08bb4e53d61c702c7fd7445db83202 +part_02511_of_04320.hdf5 92d18483bab24c98e7376ef53e1029ce +part_02512_of_04320.hdf5 f1a473cc078f00d35edda022347e4ce5 +part_02513_of_04320.hdf5 7a1e587372eaa3c152bb55bbf1f299a4 +part_02514_of_04320.hdf5 a298dd30758532a5234999e061c9116f +part_02515_of_04320.hdf5 69eaf7dc9fba803a8069c90542b018f1 +part_02516_of_04320.hdf5 94d84aead1e4b70df112a54382eaba89 +part_02517_of_04320.hdf5 3c32f085e839862bd9d624a0c302529e +part_02518_of_04320.hdf5 7617bee4f445918b4aac1d9c32db45ed +part_02519_of_04320.hdf5 9dd6e395474b50db5025a8af7cfeba9a +part_02520_of_04320.hdf5 d194d73809ccf11180fdc5021b6c50ab +part_02521_of_04320.hdf5 c29eeb7210503e74dfd805549ba79d21 +part_02522_of_04320.hdf5 f03aa9e376a02cd322a19aefa8f30eb9 +part_02523_of_04320.hdf5 ddd5b9ac4d23665f0db08d24ad8e41a0 +part_02524_of_04320.hdf5 d60c4516180ee394f69fa81e7369fad7 +part_02525_of_04320.hdf5 144db70db3dba67db59578a2f60fb5e4 +part_02526_of_04320.hdf5 013b5f68dfd54da45a1df134a0c871be +part_02527_of_04320.hdf5 1750d1f9332645a0720e8c4bda06093a +part_02528_of_04320.hdf5 4f533abea76debe2f06273212ef19c14 +part_02529_of_04320.hdf5 943c215aceb60db2f0f9242a7c2bc386 +part_02530_of_04320.hdf5 2172b0f4499aa91cc479c773e40973c5 +part_02531_of_04320.hdf5 b2762f81e3e482341ba6537fc8b23f55 +part_02532_of_04320.hdf5 4ca29bf6758f2bf2c5afdef47a7314ae +part_02533_of_04320.hdf5 719b9e70ddd8bae691ab8c43e3078e9c +part_02534_of_04320.hdf5 71068e69813c3838f20105e08c7433ce +part_02535_of_04320.hdf5 779c3bbe68e4c6df6de4d19008db7ef4 +part_02536_of_04320.hdf5 f514a7d89e7159a9b773c0047cd8e1a6 +part_02537_of_04320.hdf5 cfca96ff3e1552d104d1b32a98ba2dc0 +part_02538_of_04320.hdf5 7f88c0e79453befbb00f8c37c59b55f8 +part_02539_of_04320.hdf5 0ca1f7297e0f3e6f4dcffe0338b1b95e +part_02540_of_04320.hdf5 750b086c7102bdcc640215f507f0bbd0 +part_02541_of_04320.hdf5 003f8d549ebdc334ffb497e3a2aa91e6 +part_02542_of_04320.hdf5 16590df25f2f83cde4c5c5dba729d0ce +part_02543_of_04320.hdf5 4dc441051e2f34c959442d865ca956e1 +part_02544_of_04320.hdf5 76d6e92afe069d3e0f619c174d649e2e +part_02545_of_04320.hdf5 35c1dfa3f2d970f08ec40cac19a01194 +part_02546_of_04320.hdf5 c701fe17aada842402ae96589b3b9653 +part_02547_of_04320.hdf5 09e7f0439b57407511d959d0699359b6 +part_02548_of_04320.hdf5 f8e03f7d66f3bf0466cc09a9447ad79d +part_02549_of_04320.hdf5 5d85b55167ec3b2525fb300a6e269f49 +part_02550_of_04320.hdf5 1591bf196ae9bfdb1f6bf9916d1a6534 +part_02551_of_04320.hdf5 70b727884ae5a741d886f2b5bb48fe0a +part_02552_of_04320.hdf5 70dc4b4132c15b2f47a4c28f0b8301b2 +part_02553_of_04320.hdf5 1ddf10ef9fe2644c778278274de3e7ee +part_02554_of_04320.hdf5 f0445a9a3b6b3c709c8903a4a741463f +part_02555_of_04320.hdf5 5a03599d7343dae8b8190c89a79788bd +part_02556_of_04320.hdf5 82c9fa58f6b092f3bb8806d73f9deb1e +part_02557_of_04320.hdf5 0002576d7067dca1c54994aee8d16bcf +part_02558_of_04320.hdf5 7afd4d01eea21e792e2540aba20675ab +part_02559_of_04320.hdf5 da7dcdcacfaed69f6c0b79bb3482c760 +part_02560_of_04320.hdf5 239cd3868f8235b6cd0213b9244975cc +part_02561_of_04320.hdf5 451fcc6efc7cad3a7db3dff09d45b8e5 +part_02562_of_04320.hdf5 291f48f15ad6789bf7161666fc1ed8c3 +part_02563_of_04320.hdf5 dcab22a819370cc8605282d714743fe8 +part_02564_of_04320.hdf5 2c788578a354881b8b92b873659af013 +part_02565_of_04320.hdf5 3274c1ae1afd0888098e01bfa2868a03 +part_02566_of_04320.hdf5 3fc81d963b5255a232ab5c0d8a01441f +part_02567_of_04320.hdf5 d93d0b3139d1bc35e68a6e760df22a6f +part_02568_of_04320.hdf5 a06c2a84f7e7923a49ee6606e4c25791 +part_02569_of_04320.hdf5 927b8dbd0fdb3fd0406fc2449746da37 +part_02570_of_04320.hdf5 02ba8a654cd890fcc449e4b1466a8d10 +part_02571_of_04320.hdf5 666b9917f06ffcfcba4c961f4dcc89f8 +part_02572_of_04320.hdf5 fa83c2f3f06f003868caed764c129424 +part_02573_of_04320.hdf5 c4fd1e7b291b4d5f83fcb090c673e6de +part_02574_of_04320.hdf5 99493842d6b811d7ed23277173cc9e4a +part_02575_of_04320.hdf5 6e1912521f20cf3a586c61aef50cfd53 +part_02576_of_04320.hdf5 6dae780aadb2079cf5ec697cb840c7f1 +part_02577_of_04320.hdf5 28c8798694ac1421dcff96d399b8c9b3 +part_02578_of_04320.hdf5 9138a722e8b9a09f57097dbb3807708d +part_02579_of_04320.hdf5 d4070e365321c902b3e4e23ee53095de +part_02580_of_04320.hdf5 9f5c7b305a7c08beb18ca84a6da5bccb +part_02581_of_04320.hdf5 d0deae47a54d070c80a94d545b6f0fd2 +part_02582_of_04320.hdf5 e21b2f048f2272bd454afa3f114865dc +part_02583_of_04320.hdf5 5fbbe26e92773de28bd2a890084ca8e4 +part_02584_of_04320.hdf5 2becb6ccd6a1b3e18d730141cf14979a +part_02585_of_04320.hdf5 3a79f8610f2e0e202ac1da3d98811975 +part_02586_of_04320.hdf5 2a36ab972e3f1e01cfb714374cb1a27a +part_02587_of_04320.hdf5 dd356bbccd7cea5d7197b6814c2414fc +part_02588_of_04320.hdf5 c793eba65efd3fc33c0fc22a68b1e1f4 +part_02589_of_04320.hdf5 60b24f9542960fe0ca102600e427c551 +part_02590_of_04320.hdf5 419839125fc355a207a47e1017e1cc66 +part_02591_of_04320.hdf5 6d55aa53cbdfabc4e9f71413bd43bb91 +part_02592_of_04320.hdf5 d8e4cfb97327f3b34493cd25f804e8da +part_02593_of_04320.hdf5 114557c4be8684b41eaa1b3c543432ab +part_02594_of_04320.hdf5 f261daeae4cea914592dcb4be2831b94 +part_02595_of_04320.hdf5 2f0e46aefdfe4722036478c496f3dbf9 +part_02596_of_04320.hdf5 1e3ba034cb07cfdc01eebea1e6676e56 +part_02597_of_04320.hdf5 ea2b4d2e21aee08a26e04ab25c398731 +part_02598_of_04320.hdf5 49c6839dd7827a88dfe043c985c6abcf +part_02599_of_04320.hdf5 b3e79d7fb7e5e41a907bdf44aa96f66c +part_02600_of_04320.hdf5 d6d92040a094506180b380729de0e2fe +part_02601_of_04320.hdf5 5ab500b94ecfb78d6b85697c81bf4f1c +part_02602_of_04320.hdf5 1aaf640d9d875edd83396ebe4a3915b2 +part_02603_of_04320.hdf5 3944848a711c19b92620e831b4d9001f +part_02604_of_04320.hdf5 abf974799c9d1c2047407fbb17ca35b8 +part_02605_of_04320.hdf5 c639b865ec294273fa49be718dbd324a +part_02606_of_04320.hdf5 01b750f4f0854c290d4b243e6d73d06c +part_02607_of_04320.hdf5 b01261549fef01a6916396aab440a635 +part_02608_of_04320.hdf5 db6f913b7fe2a8d7a85cf102098f004e +part_02609_of_04320.hdf5 b3dfaf16b6731055ee1476f7a827c55e +part_02610_of_04320.hdf5 81911cc8b7234f5b376d96e32da953db +part_02611_of_04320.hdf5 174b3d2d2bb33822aede75c17856e14b +part_02612_of_04320.hdf5 78780c238265e99b9d56b83e351458b5 +part_02613_of_04320.hdf5 d7f84ca99867e69e6440178f8812adbf +part_02614_of_04320.hdf5 c6a1eff6b8f2df56d43e062241a04fd0 +part_02615_of_04320.hdf5 a4d5b28ff898fb8bcd1181aec8cd6476 +part_02616_of_04320.hdf5 1af97d9565c998b0e6e798e72d77a997 +part_02617_of_04320.hdf5 4bdf356aad4fcc501ce2ae6f79fee3f7 +part_02618_of_04320.hdf5 e649334e48bf04771a73a5fd3c04d107 +part_02619_of_04320.hdf5 15f2b0ce3a6611c45895aea64090ce23 +part_02620_of_04320.hdf5 2527416b0f5635147f91b648a7cca710 +part_02621_of_04320.hdf5 220f9fa7cb9f0d1ef556f4fc1cb2b414 +part_02622_of_04320.hdf5 093c8fb21931a9343e59ccdbc702deff +part_02623_of_04320.hdf5 ed97fcec71e2c0a8320fabd7b0fec6c7 +part_02624_of_04320.hdf5 40da2a3e2346810d21c1157b24ea8117 +part_02625_of_04320.hdf5 90ba9db930c59dcd1e534bc777cd4ed3 +part_02626_of_04320.hdf5 dde385f4d7d5c06ba09dcfb6bb2c38df +part_02627_of_04320.hdf5 9ee9b95075a9377abcda6da474f4bd75 +part_02628_of_04320.hdf5 3e308d2d743c193e123722563231f1d2 +part_02629_of_04320.hdf5 d590c5b9b419ccdd6b2ecd1625210233 +part_02630_of_04320.hdf5 557a1ea48efa7820c25fe3ea7c73261e +part_02631_of_04320.hdf5 2e37bf4d8f3f2329e357227082fe7a4b +part_02632_of_04320.hdf5 24d1677d549c1f1fe4cf3fea543076f4 +part_02633_of_04320.hdf5 8c759b96f250c78aa4cf5fa41d4935bd +part_02634_of_04320.hdf5 50d0ed6d3b83981f841fe6b67cd63c1c +part_02635_of_04320.hdf5 a48b1d24f402f473ca94d078a5b26b53 +part_02636_of_04320.hdf5 c732b17aa971e328280992d054838114 +part_02637_of_04320.hdf5 5431017e937bd83bb042ae409d54b9e0 +part_02638_of_04320.hdf5 09f85a68625be080da6de54ced63f32e +part_02639_of_04320.hdf5 4a62ca59114803671bd0a32276129aa9 +part_02640_of_04320.hdf5 54a8da9b9fa3c221b125559957d0db98 +part_02641_of_04320.hdf5 2a5714b613af73f0545cb7ba7e026ae1 +part_02642_of_04320.hdf5 1dfe18f120a1dfdff90bdda98d0b109f +part_02643_of_04320.hdf5 436bbd588e087ae8127bfeaf9f3bc631 +part_02644_of_04320.hdf5 7c2608e5b532b5a1d5bfa53f26e9ef48 +part_02645_of_04320.hdf5 da48c07f1474c4b1bff20be3e1ca00f5 +part_02646_of_04320.hdf5 8d4108382a6eff0394dc4b7c5282094c +part_02647_of_04320.hdf5 3ba06cb34dad1ba369d32e0606b8c734 +part_02648_of_04320.hdf5 2fe05322c0341e22cf07bf1c33f31a7a +part_02649_of_04320.hdf5 196ad9f6423f77fa93fe3358f1d8616b +part_02650_of_04320.hdf5 1bd7acf399ddffb69f2fe85a317a44e7 +part_02651_of_04320.hdf5 5320f56a6a273da32f410ca6e7e1caa0 +part_02652_of_04320.hdf5 b72540939c2a09a05d6ec02019779196 +part_02653_of_04320.hdf5 848a9a363be802fdc6eeefed5b36452d +part_02654_of_04320.hdf5 e4c872a5601c69dcd40f1113fb5b67e7 +part_02655_of_04320.hdf5 c8c3fec5816db529ff639b9b3da49377 +part_02656_of_04320.hdf5 1561ff6afe88f19d7ef08c0e9958d894 +part_02657_of_04320.hdf5 2ceadfe4bcdfcdeff2c6dc83e6264f6b +part_02658_of_04320.hdf5 afc691e32171db1ebb300914f2917844 +part_02659_of_04320.hdf5 1527e0bb2badb0e4a849c6ac1d7db6f7 +part_02660_of_04320.hdf5 5c29c07f83d7629d0e6359f260898a24 +part_02661_of_04320.hdf5 d4af7830b5c8da1e423041dd2dbaebcf +part_02662_of_04320.hdf5 8e7198cef896a390c99981f148dbdd85 +part_02663_of_04320.hdf5 17837c1905c4d61e4255acc1ff06ed8e +part_02664_of_04320.hdf5 eb7b5c1478794e9a0fa7117608d64a52 +part_02665_of_04320.hdf5 ad5404b126c118504dffa8193e9d752d +part_02666_of_04320.hdf5 c448179a5fd68b3e331f84d3c55f8c84 +part_02667_of_04320.hdf5 7a8a261fc1f2a54405ba426300dda499 +part_02668_of_04320.hdf5 184024161ac5708e3aef9c9958a173e9 +part_02669_of_04320.hdf5 3fe2c3f50d5094883903b3d62219755c +part_02670_of_04320.hdf5 e5ec4259aa1377419042c761e7bae5a1 +part_02671_of_04320.hdf5 6800f4900f34f63761f5cdc9c33e3c93 +part_02672_of_04320.hdf5 f381cc1459e106315e648b9dfe9e24cf +part_02673_of_04320.hdf5 c814669ead39e23b045b0969e0cd9c15 +part_02674_of_04320.hdf5 7d0775ba4921b4a4bf0d128622d7a630 +part_02675_of_04320.hdf5 982f3eda55bc30356a8c942976447e5c +part_02676_of_04320.hdf5 8274e9c9bc8a53b5df6ff8cde38c03c2 +part_02677_of_04320.hdf5 e16eacc350ae1944651abb89c3fe0460 +part_02678_of_04320.hdf5 8d81a784c613513b807a44bcfda53b4b +part_02679_of_04320.hdf5 ef8bd6359c092cdb83f4c502460b2655 +part_02680_of_04320.hdf5 277157358c2bfc47a532ec22e2f5537b +part_02681_of_04320.hdf5 03f3d4e92244dc7850b08844eb54fe0f +part_02682_of_04320.hdf5 3b67c209b78b96ff6489760aea5b5d60 +part_02683_of_04320.hdf5 94065a75eda7bc472aaee64b46ccc92c +part_02684_of_04320.hdf5 67fd6e8055452deb085499871e78a7b7 +part_02685_of_04320.hdf5 17e0eb189f2d11449d1e05cc9712c1c1 +part_02686_of_04320.hdf5 7ac4386666053c24e4d90d44210aa472 +part_02687_of_04320.hdf5 82848ccd5216916472993fa7c6f1f383 +part_02688_of_04320.hdf5 1432c2338c8133762096e2b1403001ce +part_02689_of_04320.hdf5 0a9a43c888dc09dcf4f4a71010fd658b +part_02690_of_04320.hdf5 ae367b47bab09bf6753167eca70a4f7b +part_02691_of_04320.hdf5 0d04839b03513c046e6ca86d9b0c7fc5 +part_02692_of_04320.hdf5 19b8c6bd06f5dc59b35973cb58b1e890 +part_02693_of_04320.hdf5 839fc1fad65b616acd97426c6c332268 +part_02694_of_04320.hdf5 8a385358c4d298e92d9cdd2345aecc07 +part_02695_of_04320.hdf5 91ba9a9976adcbe80729577ba6ff4b01 +part_02696_of_04320.hdf5 bee49873a060df365614c866c2e7252a +part_02697_of_04320.hdf5 f5acf6c757d0805ae5c47a32b1087b98 +part_02698_of_04320.hdf5 480455b4b4c782a703fb89f953b37c27 +part_02699_of_04320.hdf5 98bf9cc4b97c494fc5336d838931b985 +part_02700_of_04320.hdf5 286c6069dd546c54ba9b34a7f3c06f0c +part_02701_of_04320.hdf5 6bc7bc6425561ba3424598d5642b444c +part_02702_of_04320.hdf5 08b7e4df9a8b6398656f57a0cc2ba999 +part_02703_of_04320.hdf5 74204cf7452116e76f80dabccaf3a2c4 +part_02704_of_04320.hdf5 05e76e7f403cb5ff88d62113d0fac166 +part_02705_of_04320.hdf5 227e8b3084bf3d0ec756b9e02f85c3b7 +part_02706_of_04320.hdf5 7a796f59c7fc79b9fbb6ec23972ce2d0 +part_02707_of_04320.hdf5 ef6c7d569c71e70e01ef05bb7f0c738b +part_02708_of_04320.hdf5 55a7587352a434e282090dad6cb88b83 +part_02709_of_04320.hdf5 f26c2f464ec5e7b6c09c8fd36c3ce767 +part_02710_of_04320.hdf5 929b3dcd5b4ce53edfb018aa778a0715 +part_02711_of_04320.hdf5 bd833bee11095ef45906a68e4d020a18 +part_02712_of_04320.hdf5 c6e0f3fc13dc354c234972edb1da719d +part_02713_of_04320.hdf5 9c27e79ed9c7ee40117a37cc76d6f6dd +part_02714_of_04320.hdf5 cb2364ede4f83ec1821d670605543a2c +part_02715_of_04320.hdf5 c01609e4a368f95bf10a1d281580271b +part_02716_of_04320.hdf5 1e9dfdfbc67eb5709dff3577137db053 +part_02717_of_04320.hdf5 e6abe09417f029863272a8d882d5725c +part_02718_of_04320.hdf5 8e5a9389606b7f7fb264447eea9999d0 +part_02719_of_04320.hdf5 eff46e1a1d22abddeec07e1f56fe6af3 +part_02720_of_04320.hdf5 425497da9c08c7f7083ab3a627d158d4 +part_02721_of_04320.hdf5 b553845b02bb3927d5debc3122dbf83b +part_02722_of_04320.hdf5 7099bd3aff5aa6667e58eb9a8b30ca79 +part_02723_of_04320.hdf5 64c160abb3f7336429ca74977cd93afe +part_02724_of_04320.hdf5 696cbee310dcdd4270d618b04f940862 +part_02725_of_04320.hdf5 af4ace72d1879a71535e5d56031fb37c +part_02726_of_04320.hdf5 d3f7b3b25b738fd1a22ae09dc7fed4f6 +part_02727_of_04320.hdf5 63fbd040984818dc5e1da719b5c76b48 +part_02728_of_04320.hdf5 aed6ca7c68754915495ab0d1f39c8213 +part_02729_of_04320.hdf5 4fe767fbf37a364b640503f956a6925c +part_02730_of_04320.hdf5 582811292ff8b321bd8326f4b9674610 +part_02731_of_04320.hdf5 6d3f7b07c9f7e93486144a0200e5e929 +part_02732_of_04320.hdf5 c9b068bf3f04505a855a4a5aa70fb52e +part_02733_of_04320.hdf5 d544548e015554670c1495a8e58fe453 +part_02734_of_04320.hdf5 6ebac97769c3482ff74c5fbfc7266de6 +part_02735_of_04320.hdf5 7997f5d9311bfac427e795bbda7dda72 +part_02736_of_04320.hdf5 f8f14ecbedd891d0fb1109f95082ea88 +part_02737_of_04320.hdf5 c86b4a76f5625e9ea6c5ef0bcea8be72 +part_02738_of_04320.hdf5 af00326908e0b15bfa0c039615ea92e2 +part_02739_of_04320.hdf5 4b70f0e32aeca5725f68b70f1f5055f8 +part_02740_of_04320.hdf5 4cbbdbba92287835d56ff36ca4859bf2 +part_02741_of_04320.hdf5 ecfbd0144e15e0c62e604085ee097dd9 +part_02742_of_04320.hdf5 5a9d4176dd63558e146848e2d6ffe045 +part_02743_of_04320.hdf5 7d4f4b405b0cbd80bd37ca5407da4856 +part_02744_of_04320.hdf5 ca2ae2453132d495f50367f620ffb6ae +part_02745_of_04320.hdf5 21ffff9efd2c9c529629f903f24b2595 +part_02746_of_04320.hdf5 6f82d7f3599df99e4a777a9fb89a1965 +part_02747_of_04320.hdf5 801164833d6470a595c76aae7d76a3ad +part_02748_of_04320.hdf5 f4ebf719ea3da014f456b898b2c3063e +part_02749_of_04320.hdf5 bd4d0d0dc8dab6b7e4392c8f31dc298a +part_02750_of_04320.hdf5 6d7a9795a89b66f45e6aed0769f81698 +part_02751_of_04320.hdf5 832b64ee0053f04be43524fd3ba41adf +part_02752_of_04320.hdf5 e9f249439afe08aa40afbd700e380392 +part_02753_of_04320.hdf5 963824f6e614ade28978782d1075746f +part_02754_of_04320.hdf5 3ea59560fe1d78d6749006436fa155ba +part_02755_of_04320.hdf5 32bd314491807bad33a528f4409964ad +part_02756_of_04320.hdf5 e94684d82ce5339f464dd11a5dea73cd +part_02757_of_04320.hdf5 eca935cd007e1f3aa6bde2e9aced1681 +part_02758_of_04320.hdf5 345477785683cf6f5eea5230e1338709 +part_02759_of_04320.hdf5 ebfd113b80fc568dc868466fd4510ce9 +part_02760_of_04320.hdf5 cc38293819146a38c322ebf33c5e0c23 +part_02761_of_04320.hdf5 1b44b3bf224036d0241d1b6839851738 +part_02762_of_04320.hdf5 afde8ff0d87e5da919d377fd2e91bb2d +part_02763_of_04320.hdf5 c713d7bdaa3ea4e3ab3271ece0926e48 +part_02764_of_04320.hdf5 dd50ac6041910bf44a8b79846f66dbe3 +part_02765_of_04320.hdf5 9236fcda9d5a61699c6eb97104abe93b +part_02766_of_04320.hdf5 d9161bbb9560f3150136ad875c0aa188 +part_02767_of_04320.hdf5 2e58fa2887ed33ced6eb8ceeb6cd729e +part_02768_of_04320.hdf5 3a1bbce8efcda9198b3212611011faf7 +part_02769_of_04320.hdf5 3cf14fea22341029a6e3eab2e8578634 +part_02770_of_04320.hdf5 58bb452e31d5737fb174613f2e3c299e +part_02771_of_04320.hdf5 3928b658f88b17b972d1098180d5ac1e +part_02772_of_04320.hdf5 d829061c61259841fa0957939a97f928 +part_02773_of_04320.hdf5 60f0fb7e58161668b431e8018f1780a2 +part_02774_of_04320.hdf5 c82ff76ec41cbc5ae12a35fd726496af +part_02775_of_04320.hdf5 d35bb82cf4f647673df9882f7bd5a5a0 +part_02776_of_04320.hdf5 e3d489109cdbe01ad37167cb00e11cd6 +part_02777_of_04320.hdf5 7ae3dcb71e65f0edab109b1af1739789 +part_02778_of_04320.hdf5 a997de6747c46588dbad2280dfd9d1c7 +part_02779_of_04320.hdf5 5362c57d8d161c9bdf6fb71c104c91dd +part_02780_of_04320.hdf5 e9924fd57413adf1b959b692462d832d +part_02781_of_04320.hdf5 f3ffebef3f87da3cb1198dac39ef1ba4 +part_02782_of_04320.hdf5 b31b5ae23099af9cb1af60d3640d15dd +part_02783_of_04320.hdf5 6690cc98be2aeb077501663e2c8f75a7 +part_02784_of_04320.hdf5 126f079d11ce4c4d8c9eecedf9d6a0e8 +part_02785_of_04320.hdf5 8976f10ad4f4789dc710676bb198ca35 +part_02786_of_04320.hdf5 a924296e83e66bbaa8fb5188382a0725 +part_02787_of_04320.hdf5 ca8c74b65a89d45d34efa9785e0ecdbd +part_02788_of_04320.hdf5 a1804efaf1c7ab8c211ff9368f22685d +part_02789_of_04320.hdf5 a222f0460f581c90bda9317a8ee0f786 +part_02790_of_04320.hdf5 f8c1d937d058078041f5b59ba3af4704 +part_02791_of_04320.hdf5 3db8e255a62e35b6ca0fa8d6c72d5ceb +part_02792_of_04320.hdf5 f6b951e6465590ae0cbad0d068a3e525 +part_02793_of_04320.hdf5 a4704989b3a8808bcbdf97ce69752815 +part_02794_of_04320.hdf5 3d66340c5d60f4dee2a93f3a57bdff82 +part_02795_of_04320.hdf5 bc951a41f43b876b13ed301fdacf25d2 +part_02796_of_04320.hdf5 a4ccb045b57b68533e66a8f7d794e375 +part_02797_of_04320.hdf5 cc7098f7b3c47f30b7537060f143997d +part_02798_of_04320.hdf5 e07696cb1309a7f4c43c4f6e22fbdb53 +part_02799_of_04320.hdf5 3a6e7af0626c6918d987ea47cca83209 +part_02800_of_04320.hdf5 cb8f2a56b76fc46310e384b98f116030 +part_02801_of_04320.hdf5 51ae3c3b05c6569c7fce0aee1d1fa464 +part_02802_of_04320.hdf5 a6f99b56554d04ae94d5ed021839ca73 +part_02803_of_04320.hdf5 f2b8ba8073997cad4529afe89c7767e9 +part_02804_of_04320.hdf5 f664840ffe1679eba30878fb3941f01a +part_02805_of_04320.hdf5 71b3e264a3c66e5e024b643b53362fe2 +part_02806_of_04320.hdf5 4bbb517f81262f536a48528c21c6136f +part_02807_of_04320.hdf5 249457fcd2a4cac7ce9032a4215eca09 +part_02808_of_04320.hdf5 d8352dd6054c7a0b3b7f375275cc845f +part_02809_of_04320.hdf5 ed154e11c99fcee0ebcb3b6e5a497d60 +part_02810_of_04320.hdf5 61f403716d62d282deb97a487f003dee +part_02811_of_04320.hdf5 8c6250ec42dd14576645f2234fd75795 +part_02812_of_04320.hdf5 d1a10ae958f7927c769d46622912fc02 +part_02813_of_04320.hdf5 723f8e0e77b641dae93554a5a5cf3583 +part_02814_of_04320.hdf5 bfb7f13ef5aeae4c13d6bf908c01ddee +part_02815_of_04320.hdf5 77fb4c1bff637237b9a53ec2ca82bf16 +part_02816_of_04320.hdf5 52d4df5c894d47922bda531016502c60 +part_02817_of_04320.hdf5 fd676b790dbcff9cc0c2245dcd8acaf4 +part_02818_of_04320.hdf5 6cff25034c9f5348a58ab0240cda256b +part_02819_of_04320.hdf5 252d7643658a8d55c4f9a1230748de71 +part_02820_of_04320.hdf5 e098d19d642b8f248c79682d67c73770 +part_02821_of_04320.hdf5 47772a8dc7b1de88634e28af1a14e5ac +part_02822_of_04320.hdf5 0bcb57ace2e441b8c5680ff65c448c3f +part_02823_of_04320.hdf5 30cbaf1381a25a926a99284d93d1771d +part_02824_of_04320.hdf5 0abd7325708e69391cbbe2f28125e758 +part_02825_of_04320.hdf5 742526ca8fc8125c1c57539d397c6cde +part_02826_of_04320.hdf5 eaa4f52ef3245d4aeba44c10e5f2d2a5 +part_02827_of_04320.hdf5 d674997ceec58914f11913c33f884aab +part_02828_of_04320.hdf5 27f9194ef77338ee81c5a4da9531ae6d +part_02829_of_04320.hdf5 ac43aa55f86825b4c60b7965ef8a98ed +part_02830_of_04320.hdf5 d510382872e8fed0dea8ede51b9f3268 +part_02831_of_04320.hdf5 536db7106553b08d6bf6091963ba5170 +part_02832_of_04320.hdf5 620027d81c130910d0e698f381a9a785 +part_02833_of_04320.hdf5 3dc02a05ab91c70d3ed50c4b2c550c6b +part_02834_of_04320.hdf5 b8200fe886da36a81d461dbf0bc618db +part_02835_of_04320.hdf5 849276fbda122c439a6627bf1166a095 +part_02836_of_04320.hdf5 5b13fd0b91b5e44210d11736a3c2ad57 +part_02837_of_04320.hdf5 417891c22c102938088e96307991478f +part_02838_of_04320.hdf5 f487b72936ee30904e28dd42bdfa9d6f +part_02839_of_04320.hdf5 a57b3a966bcbf10c144b1676f36c7eb0 +part_02840_of_04320.hdf5 1e4f8c04897ecb5d661994d183921b51 +part_02841_of_04320.hdf5 3d5915f0c5c20cc75ea53c30594de39a +part_02842_of_04320.hdf5 63ce350021cce6a2c896aa4fdab7377c +part_02843_of_04320.hdf5 cbdbf3010531e595cb650ca3fe74a7fe +part_02844_of_04320.hdf5 d26bf383cb5ffe7d2b8de21fe89a7ee2 +part_02845_of_04320.hdf5 ae359ce1262738e069c5b27e657e1289 +part_02846_of_04320.hdf5 3fad65aa779deda174b10ce3598f624d +part_02847_of_04320.hdf5 0a2bd6de3a6a843a336765dd578247e5 +part_02848_of_04320.hdf5 984180ed74ec30e67b5c6eb947ecfd99 +part_02849_of_04320.hdf5 2a558cd0bc640fe5678f3b323bfed066 +part_02850_of_04320.hdf5 290084be36ad334ebd8e33744b9005d4 +part_02851_of_04320.hdf5 7b9dc8ed3c11b97129d47522b75f4c29 +part_02852_of_04320.hdf5 14e59f0099f3fecc4edcf5abfb671c30 +part_02853_of_04320.hdf5 d7da88c511b4fa3f9aac0c62a2c715f0 +part_02854_of_04320.hdf5 d3ac2dde6f2708793752519a75203377 +part_02855_of_04320.hdf5 a06fafa3a3306c2ee692ae58d04e9b45 +part_02856_of_04320.hdf5 c0e342e1110dabe3bfc00b168f283fa3 +part_02857_of_04320.hdf5 d072ffeffef8d3b71eefb395abd0f399 +part_02858_of_04320.hdf5 d5f133d4c9e6b7bcc550ebbeea5f067b +part_02859_of_04320.hdf5 1760598bbb09b765f17c5d3ea1e2f319 +part_02860_of_04320.hdf5 c5f985fc7783ad1f8903b74242b80c89 +part_02861_of_04320.hdf5 01a6a9feea64f627a6ad78ae3c43e8b5 +part_02862_of_04320.hdf5 a539d4e6d2f7dfcfcf14074668e48323 +part_02863_of_04320.hdf5 91aff7d4c4186a7fec2e27cead8afae9 +part_02864_of_04320.hdf5 47673f5f4a2473e278cfb20a9e85ae5a +part_02865_of_04320.hdf5 a35a06cb33853c6446b3d48719d703be +part_02866_of_04320.hdf5 b6db803464e83c1e229ef055fb67d233 +part_02867_of_04320.hdf5 4aaadf8b0a8561a0503901831f24948e +part_02868_of_04320.hdf5 f683f25764a03ef990b5734b8eb7c136 +part_02869_of_04320.hdf5 688061f3e81201ef15b9c2b1d68a0200 +part_02870_of_04320.hdf5 322ff8e40f1e1daba57ed7569d7918d0 +part_02871_of_04320.hdf5 4d265901a6338c27c78f8fc26214e47b +part_02872_of_04320.hdf5 d088f044ff2bddb2edbd472d36f32d9b +part_02873_of_04320.hdf5 48eb8527b6628e7a42b51bacdb412803 +part_02874_of_04320.hdf5 fa997eb475de9f7ed6cccdc36b6a010b +part_02875_of_04320.hdf5 df1eeef800a7f905ead5890846cd41d0 +part_02876_of_04320.hdf5 9e1868d79c7fee39e4cb9bf7482f3447 +part_02877_of_04320.hdf5 d60d53ca4ea6d7381aaa29112e595b4c +part_02878_of_04320.hdf5 92320a52e9b44e5dafc5179271af5eba +part_02879_of_04320.hdf5 dd1e310323e7044feff96d7dbf627844 +part_02880_of_04320.hdf5 9641535856ccbfd44811286bed3dd754 +part_02881_of_04320.hdf5 9eec07fc5c2e3379f9dc42aa0289a13e +part_02882_of_04320.hdf5 f538ded5a6efd3bf7fc23339b7ce4940 +part_02883_of_04320.hdf5 8d435ea2ed9fd03cfbba5c86438a2805 +part_02884_of_04320.hdf5 94a8f1fadb2160b6affd2d0b939f3486 +part_02885_of_04320.hdf5 1a65037c7477ad998cc141596224c801 +part_02886_of_04320.hdf5 8e098d6eefc17b0eb78ade5448cfc244 +part_02887_of_04320.hdf5 3b877bf4dec86c377af67a29f2b5190b +part_02888_of_04320.hdf5 5d7182de58ce6c85335510a5c08fca2f +part_02889_of_04320.hdf5 7685ae87bbc98602bc11d9b1bd4ab1b3 +part_02890_of_04320.hdf5 46e93fc52e91294120dfdf70c6350f56 +part_02891_of_04320.hdf5 8f195116a81e7136374f5f25780869e6 +part_02892_of_04320.hdf5 67efb2943a838abab36c267902a0f425 +part_02893_of_04320.hdf5 2893d8f82435f7639d83876f91820a36 +part_02894_of_04320.hdf5 469b2823dcdcf909195ee1d2a65c34c6 +part_02895_of_04320.hdf5 71535d5da6a654f7c80f485ee86331fd +part_02896_of_04320.hdf5 f8183cf69e134c615ffba4b142423694 +part_02897_of_04320.hdf5 15032fc4e92c8ba5407b61ea9938c0cf +part_02898_of_04320.hdf5 13a535bfccff287802ae87300819496a +part_02899_of_04320.hdf5 283317c2deaef83331273cbf85432394 +part_02900_of_04320.hdf5 c941a8dda65bbf16ea2c7fb069373301 +part_02901_of_04320.hdf5 c5b8830940a9d0ffef0e7927cd9230a0 +part_02902_of_04320.hdf5 19d8b563c1a734d78210d4f5d1330c32 +part_02903_of_04320.hdf5 2923777ff8e0681383583f55b61f8466 +part_02904_of_04320.hdf5 b403e4e22cf3244c065939f52ccd6420 +part_02905_of_04320.hdf5 2b9935269af85333f2df398fe645eef0 +part_02906_of_04320.hdf5 b507a348a0c6fb74a68a5e0f903892d7 +part_02907_of_04320.hdf5 a450e823aef0038d6d5a733d193a6d56 +part_02908_of_04320.hdf5 9be3a3b1488f9e0de8cb90410ae202b0 +part_02909_of_04320.hdf5 e5ed4da84b654e548ce0cff4d5e66b6d +part_02910_of_04320.hdf5 69a281e37b578674214ecae1d5ff87e2 +part_02911_of_04320.hdf5 92d6dff5c664fb0872efcb5b747c94e4 +part_02912_of_04320.hdf5 056286ceea4c7b079d60e2ecb3998290 +part_02913_of_04320.hdf5 85ae432cd5ba8d0e7f6fd92dbe1d4a6b +part_02914_of_04320.hdf5 3d80013df9e42522cb80fc4e687bff6e +part_02915_of_04320.hdf5 223d82b0461a504fda0299445857f9f3 +part_02916_of_04320.hdf5 c80b689d767d0bf89bad154e4916cd09 +part_02917_of_04320.hdf5 453dab1b191128cd18675b4933f6bf67 +part_02918_of_04320.hdf5 db1ea4c22e0d8abf2f6fad0338e46ef4 +part_02919_of_04320.hdf5 4565c248c2e22a0afb6dee5159cc8591 +part_02920_of_04320.hdf5 a877e59ca7d17fb8849532c0de558785 +part_02921_of_04320.hdf5 52af82766b185da29d0879f4940fdd41 +part_02922_of_04320.hdf5 eb444b8b70b8d0e7378242c8edfee76d +part_02923_of_04320.hdf5 8a439c7a83b6d4bd1bb314146380f832 +part_02924_of_04320.hdf5 d1b680f5182c5ae62f80a5622330dbc4 +part_02925_of_04320.hdf5 2b872178d73bffc6d64b760ceaf1147d +part_02926_of_04320.hdf5 542412a7b1fc16f9ef2b8dbf6bd11c02 +part_02927_of_04320.hdf5 b1a14a57acc05d3ad5d1cd356ac95182 +part_02928_of_04320.hdf5 51f468c665dc784274bcaa4a73527146 +part_02929_of_04320.hdf5 4e6e7a6a2a40dead6e27d3899a8a41ce +part_02930_of_04320.hdf5 85cb78404cfd54645a6ea7bf1c651f64 +part_02931_of_04320.hdf5 5a61c2618948736af0175f28220d17c8 +part_02932_of_04320.hdf5 acdca99d099fa764a8520ce29cc6df54 +part_02933_of_04320.hdf5 32802318701d234642946f70f2b7d00b +part_02934_of_04320.hdf5 d25aa99ae406f77dac54a426173ca705 +part_02935_of_04320.hdf5 86d011ae58df68c882ade0e947df3ce7 +part_02936_of_04320.hdf5 3efddecb9a7a0e2e3bcc518c88d52388 +part_02937_of_04320.hdf5 c6e222a850f0ac7a2db5f3d5554c8fbb +part_02938_of_04320.hdf5 dcfb66f4477ad42dcc2c63fc32e35691 +part_02939_of_04320.hdf5 13b365a2e0f8dbf847de5d4c60792df1 +part_02940_of_04320.hdf5 c1242cf6c2d751e8dea25a43374f2e8a +part_02941_of_04320.hdf5 0e12cde704ad22764ce375e43c248f8e +part_02942_of_04320.hdf5 b89520e4ac3644a79fecd6f1da6b33cd +part_02943_of_04320.hdf5 d734b3780d676a2efabe870f197a3cb9 +part_02944_of_04320.hdf5 bdefc95762a1b6a86df49c8074c73340 +part_02945_of_04320.hdf5 b6216d24b6b3c76eae5edf58f94ad0f4 +part_02946_of_04320.hdf5 c97c3a460b5f5e8891c92caa95a16d93 +part_02947_of_04320.hdf5 f51bbb3a8388998b442ed620f5b17f1c +part_02948_of_04320.hdf5 4641e2a9adfe1cb05c8a1311ad932eb7 +part_02949_of_04320.hdf5 36ef5684123327d760d12cc791b8d486 +part_02950_of_04320.hdf5 bac15e48b926a54759adcb48fdb343da +part_02951_of_04320.hdf5 27695536e1d9479ae13a2b42c8077cfb +part_02952_of_04320.hdf5 607c9d7fc289763c18d4962f6942fb6d +part_02953_of_04320.hdf5 9c63d3fb17b0259d85b712bc2f2a5104 +part_02954_of_04320.hdf5 5d07d94d7d24eaf3936fb918aca5ec29 +part_02955_of_04320.hdf5 1ded95ddfdf55327841deaf23aa27541 +part_02956_of_04320.hdf5 8ef8edb3a6cbeb4a8d5b825d7acb66a2 +part_02957_of_04320.hdf5 814f446e3e80e4d96f71cfbe3c5caa0b +part_02958_of_04320.hdf5 0e518741d02aec61a9be917bc84e0a30 +part_02959_of_04320.hdf5 1afcc2aa5a6b26bd400616e44867f78d +part_02960_of_04320.hdf5 d81cfb24eccf80b9b14ad829d25317de +part_02961_of_04320.hdf5 eebd869101e0359adc4815db063a91ee +part_02962_of_04320.hdf5 4546ec61dc842ea7a66d40cf64f13364 +part_02963_of_04320.hdf5 9bd2dbd99ec176ef444452b78def861d +part_02964_of_04320.hdf5 afbd470d299a54400a0dcb10b9444f1f +part_02965_of_04320.hdf5 3a8471f58b6525d68f5fac1a06d43e99 +part_02966_of_04320.hdf5 82f9ca231c3d4469f21acd1d9fb03071 +part_02967_of_04320.hdf5 0b835d72787e2247e89080c225226a3e +part_02968_of_04320.hdf5 d263d6c3839b7ea10dd647988930e78a +part_02969_of_04320.hdf5 19f05e5f8b71f7236bdfeb5b931a1f74 +part_02970_of_04320.hdf5 a8d721797ea0c2490f90cb14a64f8b5e +part_02971_of_04320.hdf5 ac9cae714a28feea54fc181ab77acd94 +part_02972_of_04320.hdf5 6f723a033cb6cd95f54aa87ccaa44df2 +part_02973_of_04320.hdf5 c1e272cf441b65232be519f831966158 +part_02974_of_04320.hdf5 7cf2bcd279a725736a7aabceb4df9547 +part_02975_of_04320.hdf5 c8fadd6162807edd1f49d39a8b546b85 +part_02976_of_04320.hdf5 0636740b41539e4ef4e3d2e646d77359 +part_02977_of_04320.hdf5 b6b6b288e4864d316bf3b79d3ac49b95 +part_02978_of_04320.hdf5 05ab7c06e7f9abebb0bcd4579d190665 +part_02979_of_04320.hdf5 2494d792607b1c4ea62ed5b4b8e795bd +part_02980_of_04320.hdf5 00f881881be0d169b936c7c455956f63 +part_02981_of_04320.hdf5 4ba005bc7a948c2611c2e35fe7a995db +part_02982_of_04320.hdf5 015694d94fda31ea2b6fb672a34267d9 +part_02983_of_04320.hdf5 bf31ceb8bebc49c66ad0b03ccaa4c13a +part_02984_of_04320.hdf5 5cb4a3832ea0cd603af4cced69e3c5bd +part_02985_of_04320.hdf5 c2fb8a6ebce540a80180f1da55d5cc62 +part_02986_of_04320.hdf5 b928bc936b97bf4eff706104ae0626c2 +part_02987_of_04320.hdf5 add08d84fc08014bf67658ad7108af51 +part_02988_of_04320.hdf5 18289770eaac0d0fd0f6b8855a3da6a6 +part_02989_of_04320.hdf5 2ab7d5b12bf030e84c7b729559fa76c4 +part_02990_of_04320.hdf5 a75bd4f8420cdf58d117b741dacd7edb +part_02991_of_04320.hdf5 5832eaba5385758f3a46f898ca1a73cb +part_02992_of_04320.hdf5 a6af4972defa59349bb13d4205ac20d6 +part_02993_of_04320.hdf5 d3222c433a4e3657480158b6cbd298cb +part_02994_of_04320.hdf5 e6cfe56d3ab6943649c2fc8f5ea2e32e +part_02995_of_04320.hdf5 65052a9055de4cb37d347016f761fd7c +part_02996_of_04320.hdf5 f913f56243d6dfbc3477be96850991d9 +part_02997_of_04320.hdf5 76d786717e01b3ac02b150ab7ae700b4 +part_02998_of_04320.hdf5 7757bf965be6270ef0654fc0fc7c259c +part_02999_of_04320.hdf5 b386b45ccb09d43831aa54cde3b44e18 +part_03000_of_04320.hdf5 e0892f2cd707b9448bbba513794def71 +part_03001_of_04320.hdf5 bb69460e54b92628d2f3fafa374f22a9 +part_03002_of_04320.hdf5 6287ce7cc44146ffb9eeae6e4586ea22 +part_03003_of_04320.hdf5 22177d8786d2ea0150c6501c34ce670d +part_03004_of_04320.hdf5 03d0fb9d313de0613fbb2b5d4bc11b66 +part_03005_of_04320.hdf5 9feeaf74accb0bddfb8394eb14adceff +part_03006_of_04320.hdf5 7f38bd9ca93b01a690143cadb390717c +part_03007_of_04320.hdf5 09c8e70bdd33d583da803021f77ff0bc +part_03008_of_04320.hdf5 ce4537ba444d8d01199c44ab7d31b581 +part_03009_of_04320.hdf5 9fa8a50a244c942a754a6f5ad87701f8 +part_03010_of_04320.hdf5 485f5ba22a8dd4824d2c20d84a9221b4 +part_03011_of_04320.hdf5 7d4936cb2de8f393e63219e44c1c474e +part_03012_of_04320.hdf5 ee8cd6de37dadf688a9ac6554ab1aff0 +part_03013_of_04320.hdf5 fd611321fdc541da3eba2564fff0d0c3 +part_03014_of_04320.hdf5 1b157810ac1e5805d0ebc2627fd76dba +part_03015_of_04320.hdf5 98ce2e2925244f142a627b0050eebf93 +part_03016_of_04320.hdf5 7262548cdfc5f08830d9d917f30975a5 +part_03017_of_04320.hdf5 57be611e936776db7f443a33011e135f +part_03018_of_04320.hdf5 cb5a9ead01ab6a37d050215171f67fbd +part_03019_of_04320.hdf5 61b264f854e7368d47f23252604b863b +part_03020_of_04320.hdf5 1c660aa72590efed5025df732050c97c +part_03021_of_04320.hdf5 3070ad648647d05b0371fe34bc06c619 +part_03022_of_04320.hdf5 d84ddbbf3d262b9c44a3d4f3074003a5 +part_03023_of_04320.hdf5 1bdd3edeab5b13069d63cafc7547bb27 +part_03024_of_04320.hdf5 7d037d7ed79345c9c21f23f9d0c29ba3 +part_03025_of_04320.hdf5 b3bfffc38a0ccbf82372d9e2eb1fad4a +part_03026_of_04320.hdf5 9ff1a92cdbb6737f3e569dfcf022f5ca +part_03027_of_04320.hdf5 35d2d66306d4cbaa01df168e6d9ae4cd +part_03028_of_04320.hdf5 983632670481907928ecb3cf9c22c5c6 +part_03029_of_04320.hdf5 22c09934e8608911b98a1688f2f57364 +part_03030_of_04320.hdf5 3e0e623edcef69bd186b3f18f95790b1 +part_03031_of_04320.hdf5 7f343e0a5143bf8fbba1a7a163adfeb4 +part_03032_of_04320.hdf5 4e77e76f38f5aa7576ce97d40d50dfb0 +part_03033_of_04320.hdf5 6c3cf5047699218c308f9b47eb3316fb +part_03034_of_04320.hdf5 7e1ed5ff1371159250c35aa8b8db4370 +part_03035_of_04320.hdf5 b37d59e531eeec4d4dd97bc71b9d3606 +part_03036_of_04320.hdf5 e5627d8c7eb3a6648c001b8351f62490 +part_03037_of_04320.hdf5 9e220025ea26f32e50ea2fbab5357820 +part_03038_of_04320.hdf5 feb64528bc1b9d9d77c3b91708c6c50d +part_03039_of_04320.hdf5 46695ee2a2c7b9a5b1285beba467dd97 +part_03040_of_04320.hdf5 3707af1a8f020c3cf9150ef7ea976eb4 +part_03041_of_04320.hdf5 b5bd494b61e61caecfe74116fd456a38 +part_03042_of_04320.hdf5 00fc4119df78c01d0992bfd330b099cb +part_03043_of_04320.hdf5 533b38d3ccc55e4ac132e94c5246e55e +part_03044_of_04320.hdf5 9a9b93c89bdd2eac89f9d7cab82bb797 +part_03045_of_04320.hdf5 d3022bb7e3a3c7967362027440d0f760 +part_03046_of_04320.hdf5 d308c134a1528faea8f56791ab2360f1 +part_03047_of_04320.hdf5 8d2c24aced8972f636eda8fc999f473b +part_03048_of_04320.hdf5 b869f2654e4c0688f59ac256b8097506 +part_03049_of_04320.hdf5 da499a0991a4aa2c80eded0405fa79dd +part_03050_of_04320.hdf5 d020932962a0c48d8809399dd7ed13b2 +part_03051_of_04320.hdf5 74f35b45993a10ef7f16873dd4c4221d +part_03052_of_04320.hdf5 eb8494222701f6e25d334ddefc53d18e +part_03053_of_04320.hdf5 a7d419ea90d239e2dd661911c6e7eaf2 +part_03054_of_04320.hdf5 1f5dde25faeb3e4867f6fd6dffb2f2f4 +part_03055_of_04320.hdf5 8e72b730a7380611833f344050ef1908 +part_03056_of_04320.hdf5 bf043666a76e737261f774c5695d695e +part_03057_of_04320.hdf5 f70f2deeada1a60ec6b28e0e8a1fcabc +part_03058_of_04320.hdf5 6372334b6915aaeae2bf7d50d4a154e0 +part_03059_of_04320.hdf5 8838b4895617d66795c2cb4ab540fd7c +part_03060_of_04320.hdf5 72919ba0fc73c4799155533c778d0bee +part_03061_of_04320.hdf5 d80510896b5a4a71b118c6ca6856b864 +part_03062_of_04320.hdf5 d501c71c0f1df28799a902c104f58406 +part_03063_of_04320.hdf5 a22519632e9d32c4f631b20c30f9b601 +part_03064_of_04320.hdf5 cbdd2006390c547acc18f212e7b37717 +part_03065_of_04320.hdf5 7cd2a53a2be46e219d74f20e9951936d +part_03066_of_04320.hdf5 54a218a04a7e435c2c5f33a93e5905a1 +part_03067_of_04320.hdf5 d9319ef5c112b4399e6f81e53b613a66 +part_03068_of_04320.hdf5 10835297613804083a72852cbe88fa1e +part_03069_of_04320.hdf5 69cbda312a2db519131db7653ec0ed37 +part_03070_of_04320.hdf5 fe7d354bb5c6038e0c1438050a72209f +part_03071_of_04320.hdf5 9b286f2b0f5e7b1ef9fdac37b0874d2d +part_03072_of_04320.hdf5 1c7efe14060600e17098a4af7b5f844f +part_03073_of_04320.hdf5 7c8d2ac965185ba6ff955fc28069ce6e +part_03074_of_04320.hdf5 76129fc0aabffac5b39083bb73df947b +part_03075_of_04320.hdf5 3e39779897beff4085b47819e01213ae +part_03076_of_04320.hdf5 14dbb867a222151706cccde663115fbc +part_03077_of_04320.hdf5 53433021ee6d6ed11e49e5dabecde7d4 +part_03078_of_04320.hdf5 cc23557b2b0f671d7db983b90d77ed45 +part_03079_of_04320.hdf5 4a13a937771e22c4e85873a5905cea55 +part_03080_of_04320.hdf5 41d5aba690081d7ddbf13fe2cae47471 +part_03081_of_04320.hdf5 d2e2352502debf8bdfa5b7e6b1932140 +part_03082_of_04320.hdf5 c38e77c80b4e13ff631f1e3391917b2f +part_03083_of_04320.hdf5 36692b37e1e4c0bf283ee41a34a43707 +part_03084_of_04320.hdf5 99f82f0e74c480d9ff985f0c7aab9af7 +part_03085_of_04320.hdf5 6bee12ad7fba4e005e7d9f6b50be3ea6 +part_03086_of_04320.hdf5 be6e95f252c040e6f9bc6447568af149 +part_03087_of_04320.hdf5 6900e34fd10433da222c19a10188412b +part_03088_of_04320.hdf5 b1211bbfdf034541f0a129104d36de8d +part_03089_of_04320.hdf5 c206c93ff68d6e3559da883d98471418 +part_03090_of_04320.hdf5 6855e95260bb4fb6c0e63f4cde9966c4 +part_03091_of_04320.hdf5 b6ea39171149bea294fac2fc3b1d059a +part_03092_of_04320.hdf5 b0bd2a72e3ad23d2f4fbf21ab5819cde +part_03093_of_04320.hdf5 92b0a88279be9a6e7afb30bb2882b437 +part_03094_of_04320.hdf5 7369682b068869957e52ab7b2a5906aa +part_03095_of_04320.hdf5 edbaf7716b24a29c27da0bba654b9c48 +part_03096_of_04320.hdf5 5bb4f8fda0ab2d3110aaec068cff12cf +part_03097_of_04320.hdf5 574d560e555d78b10d617ca69bec87ab +part_03098_of_04320.hdf5 cb791902e4bbdef254d996f77e33d394 +part_03099_of_04320.hdf5 902d3240c6e58e83368f782009e86ce6 +part_03100_of_04320.hdf5 1f74f84978f634ce0f96f70aba3c77a4 +part_03101_of_04320.hdf5 779817f840f1eb2ca662fec499683557 +part_03102_of_04320.hdf5 b3e07b3fd4d6d993ae0bbf61597a9055 +part_03103_of_04320.hdf5 34f23c560dfd04d6b10c830e53ac8512 +part_03104_of_04320.hdf5 297be38e321bc2517bf6ac123c95584e +part_03105_of_04320.hdf5 b9c13b304d8d9d3b66cfb2361ce14b9e +part_03106_of_04320.hdf5 cbba67d6b9a34e182815e6484cf7fb75 +part_03107_of_04320.hdf5 f8ba581e76ebfca72f5b2296674aab48 +part_03108_of_04320.hdf5 2f515afec387ac57adb158e2d6fd8299 +part_03109_of_04320.hdf5 093bb37f7233e0154ccadcd8350f80b2 +part_03110_of_04320.hdf5 11534a93b2b01362d2c777a786714c9d +part_03111_of_04320.hdf5 d8a051232d6bada89bc666d59abab459 +part_03112_of_04320.hdf5 810a50708933a24b9c9c0acdc5383d7d +part_03113_of_04320.hdf5 8c08a56c29b674408872b3d8f31e7dd8 +part_03114_of_04320.hdf5 312450e23bd9f9ad2dbabf98b8d49a3e +part_03115_of_04320.hdf5 e67f97e3b0801aad959512dc79fa6bf7 +part_03116_of_04320.hdf5 c4e7e21ed4b221631513ec60b21ddf77 +part_03117_of_04320.hdf5 a49f3050066c6e9964c0ab0ee3060645 +part_03118_of_04320.hdf5 a0e4623ecf16bcb933b120011e781fef +part_03119_of_04320.hdf5 b7b430a6994117408e14bc2fa8c69ca5 +part_03120_of_04320.hdf5 7790520cbbf9ab8ac195aea909ffe643 +part_03121_of_04320.hdf5 c5ea172bc1a666e5036f40e309994364 +part_03122_of_04320.hdf5 0c4568557d572fcb63680bf7a1d11a59 +part_03123_of_04320.hdf5 c4e25f14272edcfd83b8fbe924b74a7f +part_03124_of_04320.hdf5 a3c730a81ef43731535af9636d557e58 +part_03125_of_04320.hdf5 1940fc0c37f3af5729c0014e2340ca02 +part_03126_of_04320.hdf5 f3b58309577fa62b47d03e12b16c1f04 +part_03127_of_04320.hdf5 108b34c49235ab0f6ae4606668cc98d9 +part_03128_of_04320.hdf5 6a344cbd4e39e3578f3dedb6cf248841 +part_03129_of_04320.hdf5 e64ab0c5f68683b5b9346e3a3f5cba0e +part_03130_of_04320.hdf5 a242a65ef6866fac77cd9e8b9f0e218e +part_03131_of_04320.hdf5 358bd656af150e2ea06fb3ec0a4804c9 +part_03132_of_04320.hdf5 958a9207baee90f8bcc81bd1a31d8b20 +part_03133_of_04320.hdf5 5a2552001733ffa862523de7f7324dce +part_03134_of_04320.hdf5 2ac2915a4fe2ed84f0b88cd2d49a4103 +part_03135_of_04320.hdf5 536b8a22bceaf7b1027850a0ee42c0d8 +part_03136_of_04320.hdf5 b2e32db8dbaba69fe8ab5259e792dac1 +part_03137_of_04320.hdf5 b0eabdf9431555d43f0834bf354e1d02 +part_03138_of_04320.hdf5 aee3ad73fe264d20781f3e6fb2284d7d +part_03139_of_04320.hdf5 2b7ca08095cbd4f99bd7959ae856b3cc +part_03140_of_04320.hdf5 686e4705de939b04a3cfb4742401c919 +part_03141_of_04320.hdf5 67a1b3107701af2a7949ec1d7573bbd3 +part_03142_of_04320.hdf5 11d54e7adc4377394f18815d9020c457 +part_03143_of_04320.hdf5 79d42e0c4545df8c6035730836c3a610 +part_03144_of_04320.hdf5 236b077c8b85fc04805ae7a288c1bd02 +part_03145_of_04320.hdf5 49b539d6a5de8a1ef6aac8e626c652d5 +part_03146_of_04320.hdf5 58ed7ab08c5197f9b527d794d28091c1 +part_03147_of_04320.hdf5 f0ce6bbf5bc0c286e624ece02f89a291 +part_03148_of_04320.hdf5 a62a0cf6ec7d7eda4e8e1887074cd5b6 +part_03149_of_04320.hdf5 59bfb89ad442cfb02bcd06d472a5f67b +part_03150_of_04320.hdf5 31546255ee50b4aa186b9709e49d913e +part_03151_of_04320.hdf5 9434fae1501688b38adf72afb19ba6d6 +part_03152_of_04320.hdf5 7535388a2cdeef67b8030d7b0ebccead +part_03153_of_04320.hdf5 6acd93a361c5382562e4e4e4a8a94d54 +part_03154_of_04320.hdf5 32e46c187dcd20a2a006a8c7a2fa769b +part_03155_of_04320.hdf5 6008912e41241c8ff71a98ca0ae69fc6 +part_03156_of_04320.hdf5 68111c59552a87fc9d4acbb268e3ec8a +part_03157_of_04320.hdf5 f29b1c6ff9d9c191e34d7825de40fc3a +part_03158_of_04320.hdf5 4084cdbdb12e3dd3e75f1d3867c105d9 +part_03159_of_04320.hdf5 52c13c558c2c626e2a627a97e3a91cc3 +part_03160_of_04320.hdf5 a46d325eddd26c8a196e201d1ad9cb61 +part_03161_of_04320.hdf5 72da29cba44dca229ec7a7d7871973ce +part_03162_of_04320.hdf5 b1adcf6c4c61055821d4a1da744acc7b +part_03163_of_04320.hdf5 a4a548efaf67eab59d610a7a2c36246b +part_03164_of_04320.hdf5 96ec776c61c8afc52625f49f7cfe4065 +part_03165_of_04320.hdf5 72be3e91ee9b4a32ca1f040337128200 +part_03166_of_04320.hdf5 f2392e10d218e975fd709244bff5d88e +part_03167_of_04320.hdf5 519013dc4fc0c29ee9f3f1adbc2851e4 +part_03168_of_04320.hdf5 b6c56896757b0b2d983872aeb5b010c7 +part_03169_of_04320.hdf5 935160c6fddf9201ef9beabb44c71cc4 +part_03170_of_04320.hdf5 f2f494264dfd7912981a4620ea13cf9f +part_03171_of_04320.hdf5 7ff5341b5db7200b86615e175028e217 +part_03172_of_04320.hdf5 f2c1ed35c668578fe5d6d12c4eb33ef7 +part_03173_of_04320.hdf5 da141a0e4fdb9d9422f0c5b09da73c91 +part_03174_of_04320.hdf5 e313da22564b1741f6b95d155d210fd9 +part_03175_of_04320.hdf5 8e91fc7a4cff9e903f220bf8e89eac6a +part_03176_of_04320.hdf5 cf692bd1f34f76d163c7acc43e6f92a7 +part_03177_of_04320.hdf5 893e25ffb2954be3eb0b0c06aa41d5dd +part_03178_of_04320.hdf5 f58e320366c20320af8a2656f77800d8 +part_03179_of_04320.hdf5 403a6c2d8ae764875b2d305460f45711 +part_03180_of_04320.hdf5 d39de18ee21edd34ca057633f4e6474b +part_03181_of_04320.hdf5 d9cea76a683e23503497c7d2d619f654 +part_03182_of_04320.hdf5 af9060dcaa81f815853ba34067e8414d +part_03183_of_04320.hdf5 54ed70a0e3be9e423fab86a70907f5b6 +part_03184_of_04320.hdf5 cef795b8a433bd36d8e4ec6475e80e20 +part_03185_of_04320.hdf5 78b5ac8ec79248fdf1f306e53b5d6475 +part_03186_of_04320.hdf5 db121ee7606ad6ee3f97c9e677c046b2 +part_03187_of_04320.hdf5 20a72bea3f794b1db588a0eb1bd4288c +part_03188_of_04320.hdf5 ac8a1c4973138b6cb0a5259fa4d1500f +part_03189_of_04320.hdf5 8ebba2aac99355e398e9da283451c463 +part_03190_of_04320.hdf5 a980bef9248bee4c54f05fb05bda7e5a +part_03191_of_04320.hdf5 2059a37059debf694d101dd4cc78294e +part_03192_of_04320.hdf5 04aca7dce6d624327e700a4de1857674 +part_03193_of_04320.hdf5 687d1160bf980bf9a0f021dd6152896a +part_03194_of_04320.hdf5 5d7133f9aca3828736dcff646831aa4f +part_03195_of_04320.hdf5 f950a8b419d65a9045b881b877d5e244 +part_03196_of_04320.hdf5 b6f5f7e93c3dc710d4f057228c2e174f +part_03197_of_04320.hdf5 36e7f9141debb6326006b72044bc6ce3 +part_03198_of_04320.hdf5 bb47c8e08a4312c3cc4b1e4c17a1c2a1 +part_03199_of_04320.hdf5 7100cc9aaea78e68ace69b7d12350e9a +part_03200_of_04320.hdf5 57f3cb7de705ee1ce508da237849ba28 +part_03201_of_04320.hdf5 1a4217ca47d8658c03f88b9046063650 +part_03202_of_04320.hdf5 48ee7b4bdad536f618157b8f2076daef +part_03203_of_04320.hdf5 582425144bd649a8c4df6615729915f2 +part_03204_of_04320.hdf5 bf639ea9d0986b53c8029c97ec41240a +part_03205_of_04320.hdf5 07ab98526a1ff3625c8b7487df9fb3f4 +part_03206_of_04320.hdf5 c641eee2b02acd85f217c820b336c392 +part_03207_of_04320.hdf5 bd4b9abebb56818d16904e762e0a90d9 +part_03208_of_04320.hdf5 5842b6eb50bca669f73271d9714aa36a +part_03209_of_04320.hdf5 035d22626aede89e71906cf18725840a +part_03210_of_04320.hdf5 beb85629b623ffba67e5e2faea3a7f38 +part_03211_of_04320.hdf5 3bb7fa87a10de4cc28b851d12cce1342 +part_03212_of_04320.hdf5 b070ea3c42062b75e28a735d4730f313 +part_03213_of_04320.hdf5 2fd66e73c204d95db9b13196da4e7941 +part_03214_of_04320.hdf5 b80472e831ba1066ceab57141c7917f5 +part_03215_of_04320.hdf5 f87577a445ccc3891d742fc7cd8b796c +part_03216_of_04320.hdf5 cc579e4e6a1f982f1b305a4aef3af5c3 +part_03217_of_04320.hdf5 b8b35799fe91a534cc29021725a8ee6d +part_03218_of_04320.hdf5 e1e31ca27578634cd01272d47e0ecba2 +part_03219_of_04320.hdf5 77fbd30889ffe2cca8a26baf3177b4a8 +part_03220_of_04320.hdf5 ad85f2e4f967c787aa05baac7f7bceb9 +part_03221_of_04320.hdf5 d9941a583611b657609fb48b073f9262 +part_03222_of_04320.hdf5 e20dece573c5751e2695f212d109bebd +part_03223_of_04320.hdf5 09a9089455d49f6c025e028b839f33c1 +part_03224_of_04320.hdf5 8a6b1921bede647a53deecf1ccf123a3 +part_03225_of_04320.hdf5 095b2a7090644a8f6ec3c13e5ec256c2 +part_03226_of_04320.hdf5 8d9acc59e3737d67dda5059f2614b734 +part_03227_of_04320.hdf5 098519d7c3a672ba23e82bcb9c930336 +part_03228_of_04320.hdf5 ff7e6931f79245ecc2a03b4ec5a32d7f +part_03229_of_04320.hdf5 559d1ad1093ad502ab3e3d0deab86e3c +part_03230_of_04320.hdf5 804a01789bba76cfb934ccd24612319c +part_03231_of_04320.hdf5 f8c6209fde4e2bf421fcc3137a244f85 +part_03232_of_04320.hdf5 6def167526c8f36981855bd373b98222 +part_03233_of_04320.hdf5 b0e61d5e917c4666d38a669a3b437308 +part_03234_of_04320.hdf5 5cd636c880ac35f4f2fa88efc24e037b +part_03235_of_04320.hdf5 6197cfc7a6246deab4d09c59b20b5710 +part_03236_of_04320.hdf5 959d90455685b1b32023a193b1acc68c +part_03237_of_04320.hdf5 f0c3e1f27faefabe9186943922219cff +part_03238_of_04320.hdf5 8a8767600967d3b27d2c63d7d3c17440 +part_03239_of_04320.hdf5 2e701eb6328718d9d9e7ab9ea52f3a7e +part_03240_of_04320.hdf5 22ed4a861eb6f3899ecef53ccb9a4bd6 +part_03241_of_04320.hdf5 63464e0a567a055c9d7012c899bc25b5 +part_03242_of_04320.hdf5 d54cfbccc4dce9f68306c7b19577a885 +part_03243_of_04320.hdf5 23465ab391fbb418615606f4061ad5db +part_03244_of_04320.hdf5 675c8122391c8e24c8fde3e502721f13 +part_03245_of_04320.hdf5 ebff0350dc9a51eab9139c50679877a5 +part_03246_of_04320.hdf5 dab03b1e916673bbf49f145812df9b79 +part_03247_of_04320.hdf5 958783ee9c78d6a6d2b47a9242916fcf +part_03248_of_04320.hdf5 a1a763265f94c69dd614d0b683f0b069 +part_03249_of_04320.hdf5 f5e0cd0f0c404e05444bec0e9b965cf7 +part_03250_of_04320.hdf5 d62b5af99ac4f2c3f1e738dfcb51bee9 +part_03251_of_04320.hdf5 55c5cbd91c16f07316a31bdcd7d1e281 +part_03252_of_04320.hdf5 024aa2c41d21bd53afcf8fcfd5788911 +part_03253_of_04320.hdf5 5ca097748c6670cc28718c4519f5b256 +part_03254_of_04320.hdf5 8e18bba1e10367d07bf6a623fefb18b3 +part_03255_of_04320.hdf5 6ba2fceaa3ac5a3ca4b5a9977cce1647 +part_03256_of_04320.hdf5 d3204666b6fd1ef34b7f5126686d03bc +part_03257_of_04320.hdf5 4bb53373a42cc3497b4028b9cd6c63b1 +part_03258_of_04320.hdf5 56649e50f85fed1cbde6b2cad06d3c00 +part_03259_of_04320.hdf5 32b343cb47098656cc0c3437c5033715 +part_03260_of_04320.hdf5 346ed478a05f96210ea1a581d2d5abbc +part_03261_of_04320.hdf5 07c30b6c70e8c7e35aa1b12d6e8c0766 +part_03262_of_04320.hdf5 671a68626c576d03ae6a01fc18cfbbfc +part_03263_of_04320.hdf5 2bca9d27b32bcb73f1c936d901124353 +part_03264_of_04320.hdf5 44afa90e4f41dd8c7afec343e21743db +part_03265_of_04320.hdf5 2dd212dd2b84d2778260b4ec004c835f +part_03266_of_04320.hdf5 1007dabfc731893f5bd6a60dc43fb525 +part_03267_of_04320.hdf5 ae4a9a7b4f4a97f7b6fadd749b99851c +part_03268_of_04320.hdf5 711e856cefc7e0135084c162aec5947d +part_03269_of_04320.hdf5 19d803ed2d7f4a874c4ae22de0f2f880 +part_03270_of_04320.hdf5 8d9e10849714b88ca35b32d0f13edf4c +part_03271_of_04320.hdf5 a27cde131b840e7c7ecb5e046a8ad8a2 +part_03272_of_04320.hdf5 2e1b718df1be6cf59a9a5906d8b7d472 +part_03273_of_04320.hdf5 d3797b0af3692a9f72466aa850c0e318 +part_03274_of_04320.hdf5 0906f91e9d8e74c43b5ed5a32db4ed68 +part_03275_of_04320.hdf5 b6863346ecd5863a23807a8758109a19 +part_03276_of_04320.hdf5 5c4ac0621d66e6fb1a5d159bb0b8c9fa +part_03277_of_04320.hdf5 252e4d988f27e0257beac5d08a484a1f +part_03278_of_04320.hdf5 0ea0f6d67d49f6593b1abf1c4b02a995 +part_03279_of_04320.hdf5 7ff6548bedf3f7aea523779e24be64dc +part_03280_of_04320.hdf5 925b89d0f7f359eab06edc7b6f962b2d +part_03281_of_04320.hdf5 4eabea6b44e8289806de062ef735ea40 +part_03282_of_04320.hdf5 b01ac331647738687b1a47e19456946f +part_03283_of_04320.hdf5 5cb36ae1441a915d5ec22707367512a1 +part_03284_of_04320.hdf5 1530e720c5e1b1d34c7e11c216bdbe11 +part_03285_of_04320.hdf5 a18a2003fdd2038c8923da6b214d6a8f +part_03286_of_04320.hdf5 f802e72723cfd2a0270fbceccfee92bc +part_03287_of_04320.hdf5 3f7cf5f7cf7db0801f2e4601cac7de81 +part_03288_of_04320.hdf5 087a5974226a4c6a23fcc3b8a1a5075e +part_03289_of_04320.hdf5 807a2a914141b93b0ce9bb29518e2487 +part_03290_of_04320.hdf5 07e76c09f4bbc5c83dcc1c8304dcf1c6 +part_03291_of_04320.hdf5 6dcb4525567eb4ef1df7a183a3b7adb1 +part_03292_of_04320.hdf5 458222c2eb9389659477aac3fccb6c4f +part_03293_of_04320.hdf5 6333ed754c4d4aaca7f1ea18be232793 +part_03294_of_04320.hdf5 04f16db285fa292c1e544f8190909bca +part_03295_of_04320.hdf5 6c70b1e037a3cddc81b6f4bdb8dc01be +part_03296_of_04320.hdf5 686e37acd971f55c285f5e039a66cf70 +part_03297_of_04320.hdf5 61ca38f81a0d5771e0a808c7254759fe +part_03298_of_04320.hdf5 1a09f92d1cec20dad3169f5e5297bbbb +part_03299_of_04320.hdf5 a8554a3065445e4e7ee418169ec3d3c0 +part_03300_of_04320.hdf5 88ecbf2402efb2269c119c8f5f3b8426 +part_03301_of_04320.hdf5 6e9945b53c84f42a9a912766eb14eb1a +part_03302_of_04320.hdf5 d92fc588caff1368f04e38d48c4486d8 +part_03303_of_04320.hdf5 ecdcb1f532bd59826e27243e309eb53a +part_03304_of_04320.hdf5 4e0021bd0c073b69e52ef49562b89935 +part_03305_of_04320.hdf5 07f7c536e23739fdc31979e29bd6047f +part_03306_of_04320.hdf5 027592bbdd59fce959b2ecdbc8577c25 +part_03307_of_04320.hdf5 8dfce5b3812fc4a15f5164f80bef4c5e +part_03308_of_04320.hdf5 ece94af43dfc8b214b4d051a9fb7332e +part_03309_of_04320.hdf5 5cd8ddba5fc448ae91fdee53ad06ce21 +part_03310_of_04320.hdf5 e84aa4150b2fcd1c6a9de25bdceeea96 +part_03311_of_04320.hdf5 64e399a0183279aa58d95bc2ba9a8cba +part_03312_of_04320.hdf5 b873110ad6e9d3efd0f90ce5e9af8e47 +part_03313_of_04320.hdf5 ce2d943a08622fef0af6b63ec96b97cc +part_03314_of_04320.hdf5 ace0cdb927f80361c1d9c0bbd8ad0b06 +part_03315_of_04320.hdf5 a32d6eb7ae147f48c80c7dc3d322ef22 +part_03316_of_04320.hdf5 4d964ae50fe6674bd206799368a7236c +part_03317_of_04320.hdf5 ca0c985f317591043b1f18014e8165e2 +part_03318_of_04320.hdf5 acff857de937f1108c279d620895e855 +part_03319_of_04320.hdf5 385ddc1204f047464c82c75275f381d8 +part_03320_of_04320.hdf5 f56bea2b1b03846c4292f65106acbb7c +part_03321_of_04320.hdf5 9f08ae363ae9cd4d2abfd1431cb1cc85 +part_03322_of_04320.hdf5 571311f83a28e84e38b57a4fae415531 +part_03323_of_04320.hdf5 52a331cbd365b366643edf04106c2a7d +part_03324_of_04320.hdf5 4add9fd4851b3c48b07c469218b35c46 +part_03325_of_04320.hdf5 e0956b512df61f6b60a12357bc43773e +part_03326_of_04320.hdf5 62ff3975ebfdfb47f1cf91659e0e7b08 +part_03327_of_04320.hdf5 f41f4ebabfe4c661d363e9697eb471a1 +part_03328_of_04320.hdf5 dc7413ddad775ab669caaaba29409ced +part_03329_of_04320.hdf5 11abadd7823d3dad130cc32a53bcdda9 +part_03330_of_04320.hdf5 3924ebbd234b6d0b8f4a199cd10e34e8 +part_03331_of_04320.hdf5 6ffab846a77d90e37135493f1448eaf1 +part_03332_of_04320.hdf5 6f8db0af08b839adced3f7ef64b9cbf1 +part_03333_of_04320.hdf5 ec0234bfc9a37f581b192a7a886bf3e7 +part_03334_of_04320.hdf5 dab2f604d7dd278079b2170ec2d9e880 +part_03335_of_04320.hdf5 f31c8a701810bf3854770f6906986f03 +part_03336_of_04320.hdf5 553286b2bbef1a7f8a0581db6975e837 +part_03337_of_04320.hdf5 ab6fe8306aa1d301d62cbcd4e35df117 +part_03338_of_04320.hdf5 07b8b245408285fefc753307b75f81c2 +part_03339_of_04320.hdf5 2bef8d13d4e5c6e765e4ee085674a701 +part_03340_of_04320.hdf5 3ef0c8462205840d90d6cfbbfd9b9b56 +part_03341_of_04320.hdf5 011b84c9cd845688cc3028c39c378408 +part_03342_of_04320.hdf5 ce46feaa09bb122f3a4eab8fb29d7d2f +part_03343_of_04320.hdf5 ac3572664688f76ac8aba1d2dc785f03 +part_03344_of_04320.hdf5 7e9c4bd4826e3231b8b3d39f6be22651 +part_03345_of_04320.hdf5 aa2e45f324d99942d4830994c42c58ee +part_03346_of_04320.hdf5 303ea924550fdbea58f7502d1c58818a +part_03347_of_04320.hdf5 10d1e2996d4f475168e95f94dd34da6f +part_03348_of_04320.hdf5 e47fb46fd22c3a037873d41029eb5a92 +part_03349_of_04320.hdf5 f1743f9770183780aee63d778222be53 +part_03350_of_04320.hdf5 caebf3f57b24be2b6548deeea0dd7ce1 +part_03351_of_04320.hdf5 83a4733dc1d989ab069fbb6057f08ad0 +part_03352_of_04320.hdf5 136fb11d14e383a088bfc604f0edd77c +part_03353_of_04320.hdf5 6df68619bc9c416dadb6b4af5ffccc2f +part_03354_of_04320.hdf5 efc4638d95e6c7884053cfb727e0a60f +part_03355_of_04320.hdf5 73b99469fdef13c751d1ec99535c0318 +part_03356_of_04320.hdf5 df3ff813b74582721280b1653c189adc +part_03357_of_04320.hdf5 92bd8a5c2d3f71983e720c9e94cd4a1f +part_03358_of_04320.hdf5 9fa7049d69e84f1ea1fa0ca64701fbba +part_03359_of_04320.hdf5 2f9de475dd7e2a5a9ccd67e0d9d58103 +part_03360_of_04320.hdf5 c8fbc97bb533b8eb42c88c2c946d5c21 +part_03361_of_04320.hdf5 95ccd410e3d718f614bb8f4565b3380a +part_03362_of_04320.hdf5 bb172d898d8185c5e1c891477070abc8 +part_03363_of_04320.hdf5 8bc9e57239d1b8fa5065f96de668cfdb +part_03364_of_04320.hdf5 a1fb43be67cc4fc6f2b1244341820be1 +part_03365_of_04320.hdf5 a1fc47448723bfa79dd15d2604a8c6db +part_03366_of_04320.hdf5 7cbf11fe9dd73d5cfd09496e776117a4 +part_03367_of_04320.hdf5 ff324196fd3002cadcc3fd382c0f50a8 +part_03368_of_04320.hdf5 071f1d813d2c016cac363dcf0c02cdcc +part_03369_of_04320.hdf5 9d029a28f8b88f1949a9d43ee0869645 +part_03370_of_04320.hdf5 0b4b648346a99a85fc58d89fcb86b38e +part_03371_of_04320.hdf5 d1000a4b1d10993036f957bf06ac7511 +part_03372_of_04320.hdf5 5cfd81e9bd3dac82f1c90046b7820ee5 +part_03373_of_04320.hdf5 ad0142827488f103e6fa62d3273095a7 +part_03374_of_04320.hdf5 ca3b01cbb236a526de60e8615c9540b1 +part_03375_of_04320.hdf5 dc8e89ada0ac4a04b04d970d134b2c0c +part_03376_of_04320.hdf5 31f8ace477771ea68a3acb7bb4ceaf24 +part_03377_of_04320.hdf5 1c51673cdc55fa2b8e54ca9698833a0b +part_03378_of_04320.hdf5 34ad443ab45114cf28da41738bc5cb07 +part_03379_of_04320.hdf5 dcb0ff23ec7244f8ae767ec1255fa5fa +part_03380_of_04320.hdf5 394e348939ebaadd61d05a7cc7f35a0c +part_03381_of_04320.hdf5 67d6cd7c2d19066b6dd7f0b5594a0cfe +part_03382_of_04320.hdf5 43a7f0d1a561cca99d5227af8fbbeb8c +part_03383_of_04320.hdf5 586a4b5095255cfb515255ef5973c5ca +part_03384_of_04320.hdf5 c47b4e563ee909efd79087c44c04793f +part_03385_of_04320.hdf5 d98a0182ea054ff145d5442ac3a7065a +part_03386_of_04320.hdf5 3d2eef467c68b184d9c6b82f3a52603a +part_03387_of_04320.hdf5 8b47e2f3f3ee2bbe4462dac6dbd5b4ab +part_03388_of_04320.hdf5 89f622f73034c7008e9316a9a550132b +part_03389_of_04320.hdf5 dd6ada0336bbf2174f363a8495c8eb97 +part_03390_of_04320.hdf5 f8b5cfdcbcfbc857008a807a7534cc15 +part_03391_of_04320.hdf5 5a173d94cfacc56b10a67c1f26fe1e7a +part_03392_of_04320.hdf5 e3bcba06ad295608530c177e5269b467 +part_03393_of_04320.hdf5 d6ef57a52378c910f041f9a6cacb5700 +part_03394_of_04320.hdf5 422f5f804eee6fd658167b4874b26f5e +part_03395_of_04320.hdf5 b14b97bdf2110fb09a099e39ab88f242 +part_03396_of_04320.hdf5 dd5e18014a6027e275f7467c791ed164 +part_03397_of_04320.hdf5 7cbaa17c21045e0ebe48af8f0796cc1a +part_03398_of_04320.hdf5 152a7aa951e59ad08409898615031fab +part_03399_of_04320.hdf5 78f3f49795dcdc9742d9c2fe1f25dd76 +part_03400_of_04320.hdf5 3d16066d6004046e57dd2c085e020bb0 +part_03401_of_04320.hdf5 b61dfcc0f03d077e147ea721e38731b2 +part_03402_of_04320.hdf5 e23b4d9efcbfa0132a6b1513e0447dfc +part_03403_of_04320.hdf5 7e1a486550f0fa504817de1d8614c20e +part_03404_of_04320.hdf5 173819da4d476a3c00c823cf7aa2465b +part_03405_of_04320.hdf5 3634b613d6ff13cea6db8b1f5ff0c64c +part_03406_of_04320.hdf5 0862459bcd505d77dcb3e912370a4206 +part_03407_of_04320.hdf5 68ee8d6632f6c232623cd64c500e6ca1 +part_03408_of_04320.hdf5 8298ccc49c64ffcfde8c81eefb6cc7bc +part_03409_of_04320.hdf5 7725f5b39c24444acb748da679e48934 +part_03410_of_04320.hdf5 d9a3887038d974295592c655c968b7c6 +part_03411_of_04320.hdf5 b7e109a28be530cedde347eacdb94286 +part_03412_of_04320.hdf5 f327760effb983415283d0bdfce99167 +part_03413_of_04320.hdf5 de29350b4eada270ce0fea81a43e2b8d +part_03414_of_04320.hdf5 d3c9a294627f4f09f62ed5786ece0faf +part_03415_of_04320.hdf5 3690b806e1da020b7939373adc1e3a33 +part_03416_of_04320.hdf5 2ba6d64d104ea4473c40b5717665f37c +part_03417_of_04320.hdf5 af04b2045bf409ac0e042d926d84d60d +part_03418_of_04320.hdf5 7bd81e51b20ebde22d8d9170a5f2a4f1 +part_03419_of_04320.hdf5 2bd5b29626c21fd01abc2aa24a8baeb3 +part_03420_of_04320.hdf5 b7ebf33e6656dc65e3322c5994e5f7e2 +part_03421_of_04320.hdf5 bc14c26e3814117b0614c92ea5b6de2e +part_03422_of_04320.hdf5 675c739a40c0531afe59d0fe193333ef +part_03423_of_04320.hdf5 0b9c5ce2ac2882777084347fa3e45897 +part_03424_of_04320.hdf5 f2d26fe38ff2e5f9210799553f5a4004 +part_03425_of_04320.hdf5 f35cf6ccd3fd60d3517e297464cbaa88 +part_03426_of_04320.hdf5 b5d1869a59c7570dc4a65130c836ffe5 +part_03427_of_04320.hdf5 551b804fc5b055f136559c70c125553d +part_03428_of_04320.hdf5 0785418bfaaf8a959871244604c0f3c2 +part_03429_of_04320.hdf5 d562cd3e0f995a35443d65959f9a4b23 +part_03430_of_04320.hdf5 3ab80aad7ade1ebbaffa64f9b1237f4c +part_03431_of_04320.hdf5 443273787a30723faca0f489d89c6223 +part_03432_of_04320.hdf5 512cdc86d445254cdda5ee8e7adf77e7 +part_03433_of_04320.hdf5 94dcf3a0482e618d9b584dacff8be140 +part_03434_of_04320.hdf5 130ccecb2ee9108b3219a942558f8bab +part_03435_of_04320.hdf5 8050cd27806bc3d9f379de655e51d173 +part_03436_of_04320.hdf5 288239e942e894ec37f10079da6809d4 +part_03437_of_04320.hdf5 5631b52af4af1f4a9cbab9ad8a7202e0 +part_03438_of_04320.hdf5 8384b6a82f987d977e3e0abed48639fa +part_03439_of_04320.hdf5 1fcdafcd82efb3661f4d00fcd1490088 +part_03440_of_04320.hdf5 29efe102a3d4e93c95225aa41c6aed51 +part_03441_of_04320.hdf5 872fe08f588040579959500b92117d62 +part_03442_of_04320.hdf5 2d5cc8dbeeae30ebc13909bd25664f78 +part_03443_of_04320.hdf5 f0b216cab42bd7276ad1dded7eb25e09 +part_03444_of_04320.hdf5 e73a924b6a6a579a84ec0acb086c1aa2 +part_03445_of_04320.hdf5 036a46914d5f8742f50d8535a5c1bbc8 +part_03446_of_04320.hdf5 dbade371aa4e8ac8269686af98abe8e1 +part_03447_of_04320.hdf5 0735b1764f96aaa17570cf7d32756221 +part_03448_of_04320.hdf5 fc7a8a9f9d1ed5c3d9a28216be57587f +part_03449_of_04320.hdf5 737a2ed19020a38f6edf5c3230e0ccc7 +part_03450_of_04320.hdf5 8438c2f4cf2e9e0f5c69f43002e94181 +part_03451_of_04320.hdf5 ab34ea161d869ab00303074028ac53c1 +part_03452_of_04320.hdf5 04d65a0399451d009cd92ce693a6335f +part_03453_of_04320.hdf5 f17d0e35dcb1288cfb6ec59bf9e351ed +part_03454_of_04320.hdf5 595ad109cf913238a53ced6d14dfc8a3 +part_03455_of_04320.hdf5 9676b2de284df99479213affd3bfb0ba +part_03456_of_04320.hdf5 b10ef639bb63ad025f0920326dad3529 +part_03457_of_04320.hdf5 7b1b5418410e87ba4d530ac68029856a +part_03458_of_04320.hdf5 cd21b7be9c77e59a045ea931f5a2eff1 +part_03459_of_04320.hdf5 e7e2c9acd972c92dd80eea19c401c387 +part_03460_of_04320.hdf5 8c7d6a79a42459a0ab629193ff3aba64 +part_03461_of_04320.hdf5 35f962fc1a6262494425af2cdaec6b33 +part_03462_of_04320.hdf5 647e5def2dc349e187cd89b84db88f35 +part_03463_of_04320.hdf5 d3cb09bad476413f20e60eeff50d3b60 +part_03464_of_04320.hdf5 1ec6d75f3ba5c5dbe02a9adc3a1d6b02 +part_03465_of_04320.hdf5 914ceb97c52b34b5cf89aba9ada59aec +part_03466_of_04320.hdf5 0ebcac9f377e7ad513f298371e68d7aa +part_03467_of_04320.hdf5 ddb835e40645fb6ff0997ee98e547a61 +part_03468_of_04320.hdf5 df02c23da4591d351605cf7f68d9ad12 +part_03469_of_04320.hdf5 6cff36cf52ce25792704e424cf86c6b9 +part_03470_of_04320.hdf5 d99f20712224b20cd5ec2c14b8c74421 +part_03471_of_04320.hdf5 33b896be50b4261c227616847b2bdd1e +part_03472_of_04320.hdf5 ccc496edd617aff09334b60edf1314f6 +part_03473_of_04320.hdf5 a8e249e465096ddd1d91862cf740032d +part_03474_of_04320.hdf5 2d76aff9bb4f18caa8b3409f7b784819 +part_03475_of_04320.hdf5 c7f581a2b2d67e094f64b05ceb14079b +part_03476_of_04320.hdf5 406e8f8d2c2d73bc71beae2193d850dd +part_03477_of_04320.hdf5 775b52a48c489f2c12e83b9b543a8ac4 +part_03478_of_04320.hdf5 38b0ee27c5abb6a7e00b9744e468a8ad +part_03479_of_04320.hdf5 ed8c44b00307618797d6bd4d8b9ae26b +part_03480_of_04320.hdf5 d7f05bfd7f57e5ae121a3dca8511178a +part_03481_of_04320.hdf5 6d298f9efb0f46f77939923b128403e4 +part_03482_of_04320.hdf5 bfbb93b9e006ba66248b1144024fdaee +part_03483_of_04320.hdf5 a5c5c2dece992644bf5549217530ced5 +part_03484_of_04320.hdf5 d29cb1c4b130032a86fb649581b2b7c0 +part_03485_of_04320.hdf5 08a6ae1201faa6cb7797ab10b47a92e7 +part_03486_of_04320.hdf5 1cab9fc2c1408bca4ed36b6b21d5af48 +part_03487_of_04320.hdf5 9401c5dad62ebc5dce33864b0a644610 +part_03488_of_04320.hdf5 645b9c6fc23208ff5b1e16eca7fb84a3 +part_03489_of_04320.hdf5 844f662aaff8c2bcfc1ce6bbdfbdaf92 +part_03490_of_04320.hdf5 a96f897b268e4fa84764a36ed9dfaed3 +part_03491_of_04320.hdf5 b8588aa7f3336f5fee52ed8502f78417 +part_03492_of_04320.hdf5 9024befe7d76d00d6120baad9545ea2d +part_03493_of_04320.hdf5 622a22b514d420116da819e22df78b2b +part_03494_of_04320.hdf5 f2083e5b56b80a6b9e6dbe99ddf4859c +part_03495_of_04320.hdf5 fea37bd1b108e1ac9400deb7241e0df0 +part_03496_of_04320.hdf5 1729b9ed3fdfce6c36568e356806525c +part_03497_of_04320.hdf5 78118cbbfa858853a9f0cf3d3668d308 +part_03498_of_04320.hdf5 97fa8050e9dd11256f4b19ee506b1558 +part_03499_of_04320.hdf5 e263a0da7a83922646062649d61ec6a3 +part_03500_of_04320.hdf5 9ce70b95f5250be6b3c1a328dbdd7bd0 +part_03501_of_04320.hdf5 9b9350b72902ce2a24e2d3e6887b94d7 +part_03502_of_04320.hdf5 abe73a538bcee40a11c97cd8a5d76317 +part_03503_of_04320.hdf5 c62949165759395dc2a4af5e7194f690 +part_03504_of_04320.hdf5 cba47938c25c9cca32696aeb57009862 +part_03505_of_04320.hdf5 2d1297263c6b296a8520d808f8f3c5ca +part_03506_of_04320.hdf5 44b9ebeab326ad8ab0a79867488adaa5 +part_03507_of_04320.hdf5 4cf827b3ae3d2b3870602e764d095de8 +part_03508_of_04320.hdf5 9b8cee4dbaa57ea3403f04f5a32eee2d +part_03509_of_04320.hdf5 3dbacc11401ddb291c66ccb4b9ff683d +part_03510_of_04320.hdf5 df8b5185b5dfc69c3c0d0a5c8861749a +part_03511_of_04320.hdf5 3845aa9c41c934695a57490737c6fb41 +part_03512_of_04320.hdf5 fcd9791f428d9b01d9a4cd6f925f77f4 +part_03513_of_04320.hdf5 5a3eade56192e5bbfe375c5c8d6c6d6e +part_03514_of_04320.hdf5 4da2f1b51d934295ac036902c5cf54ad +part_03515_of_04320.hdf5 2ce2334d19c473db5dd6e897d599e1d6 +part_03516_of_04320.hdf5 bb6515d4e13e23e1fc9770246e2e0698 +part_03517_of_04320.hdf5 c03f32138de7d1369c82e95228914220 +part_03518_of_04320.hdf5 b337a34e4e910585bfb0650b5382eebe +part_03519_of_04320.hdf5 89631da8cd0752d532b0ee4e0861943f +part_03520_of_04320.hdf5 76dffe5009296276de3398ced80c4c76 +part_03521_of_04320.hdf5 37341962abe3afb990ed2b26ba75ee0b +part_03522_of_04320.hdf5 be38182bcd6832ca109a94c69e0609ef +part_03523_of_04320.hdf5 3edc123745bf3ac893c46b112f66a258 +part_03524_of_04320.hdf5 d3ea12b78ab6f7c7a5c1b9a85936816c +part_03525_of_04320.hdf5 fedb0184ee4b3f17da96eec27a54cbc6 +part_03526_of_04320.hdf5 edc815080a7109215bc519477933a089 +part_03527_of_04320.hdf5 70b792d0f59a9da56e1278f12f360ad0 +part_03528_of_04320.hdf5 16c68c195c739329f11d5101d5440dca +part_03529_of_04320.hdf5 b8a1c52baac42f2dda3a98d45ee4c19a +part_03530_of_04320.hdf5 267aabc9628c37550210c8b4cb33199d +part_03531_of_04320.hdf5 d54be980c9571904ee123fd29cd4b90e +part_03532_of_04320.hdf5 708f7ec39faf64c5d51d7a894d539ba3 +part_03533_of_04320.hdf5 de4a7360ad53d1fca06af5a7fd46982f +part_03534_of_04320.hdf5 948527b31de525045397963e5ff5fe5e +part_03535_of_04320.hdf5 3ff80d666ee65958a6d48855cdec612e +part_03536_of_04320.hdf5 121ed39d5255f17b8404d3713787ddcc +part_03537_of_04320.hdf5 ab036ba4c4b742adc4c34b28d599a26d +part_03538_of_04320.hdf5 5e4be3879c088f99ad35f44a9aa0ca72 +part_03539_of_04320.hdf5 25c692f729d5e880344b61f07685310e +part_03540_of_04320.hdf5 344eef0bf22fc242d3402b63d78a25b9 +part_03541_of_04320.hdf5 5904e69c4036ad6c19de36aee2d584b3 +part_03542_of_04320.hdf5 49337d1378ebe7d55663770b23526811 +part_03543_of_04320.hdf5 a0e4e110fe13fc53ebdb16b10cdedd12 +part_03544_of_04320.hdf5 5cb48dd603972efe019a865be1e6dfd1 +part_03545_of_04320.hdf5 f725bef0a65517fe37a97f5be29e04ff +part_03546_of_04320.hdf5 ea443e6a530e84543de8ca0ce14d9c48 +part_03547_of_04320.hdf5 012ee852d7c76fef1c968a01a411fdd2 +part_03548_of_04320.hdf5 517a47144f49ff825ed5456be856a648 +part_03549_of_04320.hdf5 21f5a109b138493de4850782e95f1ce1 +part_03550_of_04320.hdf5 5dff3774b97272dcbbf3a510856c572e +part_03551_of_04320.hdf5 b50390584dbd6f5c5f4544586c3ab1c5 +part_03552_of_04320.hdf5 02f6a96a4462470b0d69d522df82bce2 +part_03553_of_04320.hdf5 5d8b7818e424af70f6a642fdfda3fc7f +part_03554_of_04320.hdf5 7e6c0332f9f65e3ff043a9e54e913c0d +part_03555_of_04320.hdf5 68276b004932a155f68f2fa15aa4b9da +part_03556_of_04320.hdf5 f1e754cc8e010547fbc53015d980443f +part_03557_of_04320.hdf5 2ca5cce30c46a04ae8490fde18bf2ff6 +part_03558_of_04320.hdf5 89a9fc421ed805629881662bcd082953 +part_03559_of_04320.hdf5 2c52cb483bee47d4b81994564863f2a6 +part_03560_of_04320.hdf5 281623b8301f1b53fbd5eb174b3e378f +part_03561_of_04320.hdf5 de73a6177f9b6a29a7f31b4b1d34e4f5 +part_03562_of_04320.hdf5 370eb0e5f33aa3538c04df2f268138f7 +part_03563_of_04320.hdf5 37d693926dff66f1f5e977b76fe22cb3 +part_03564_of_04320.hdf5 0d583fb492078f3f313c815783d18245 +part_03565_of_04320.hdf5 5f3670b5672843ee99e97a20d6b57574 +part_03566_of_04320.hdf5 fe6dde02691cb228fce1ec2e21b7b2f0 +part_03567_of_04320.hdf5 199c0c7548ed989724ecef4af24481a4 +part_03568_of_04320.hdf5 6e69f1c6241be5cc0931fb6d8985f2bb +part_03569_of_04320.hdf5 e866fc10ad7eafa9e312ed69f208c09e +part_03570_of_04320.hdf5 d8c9513b16ee5a689e3ab8a605295335 +part_03571_of_04320.hdf5 289575d029d2740cf0d8afcd1df65b74 +part_03572_of_04320.hdf5 9b4d0dca61d274a465b1774de1555ebb +part_03573_of_04320.hdf5 84672e428dd5d55f662484c8fd7071c8 +part_03574_of_04320.hdf5 ffb81a719e59e3884862f5fa27780545 +part_03575_of_04320.hdf5 70b85ea11e51a2aa5a7168f0ef412053 +part_03576_of_04320.hdf5 3e422446ce00278adcff6ecbb2dcc76f +part_03577_of_04320.hdf5 60967de2464fbbc4438edbeb31c7f6bd +part_03578_of_04320.hdf5 1bff90d48b93595d4926cad9f7c59566 +part_03579_of_04320.hdf5 cfd4167a58687d2704dd310062c60f52 +part_03580_of_04320.hdf5 ddfbf08239bc2a2d3d02d0c79d4d7622 +part_03581_of_04320.hdf5 b1a475f1bee01337ac9f5cf0e7f380ab +part_03582_of_04320.hdf5 8da75d86ba3a943ef218b0634967ca7c +part_03583_of_04320.hdf5 eb2e0b8eb8a3112cce8f6e2aabf3c3e1 +part_03584_of_04320.hdf5 1027dbb5eeed83b9541398732c27dd05 +part_03585_of_04320.hdf5 dcd67884686b8223da670538dd7f426b +part_03586_of_04320.hdf5 f6d2ccfdeba1d2565eb373aeacb1b128 +part_03587_of_04320.hdf5 f5348b4f6e0c4b6d683da35a4a42dc1b +part_03588_of_04320.hdf5 685937d126e2bf95fe61604cd6b19fc9 +part_03589_of_04320.hdf5 66f3709619654874119aa4ca0630caa4 +part_03590_of_04320.hdf5 ba7033ea73ca4ef4f6ba6fb0bdff9198 +part_03591_of_04320.hdf5 08b6ca7a72256e2583a81d2f0004786a +part_03592_of_04320.hdf5 6d57b75970dfb2e3a771358d77141cce +part_03593_of_04320.hdf5 109e9c9272ceae5b7bae12e3cbc2fb3f +part_03594_of_04320.hdf5 f81456145d561aaa3a69f633af53bd6d +part_03595_of_04320.hdf5 99ae865cf21731ae3171ecba55f6abe0 +part_03596_of_04320.hdf5 83dc72592f7b69b3b5a12bf113fe96ac +part_03597_of_04320.hdf5 ee7ac3466590a5f216678230890aab94 +part_03598_of_04320.hdf5 914db9f0a4d6c78ba86936c3fccfe23e +part_03599_of_04320.hdf5 d7f1b833f9d24c9f37124173d0301186 +part_03600_of_04320.hdf5 7af074268021ce5fc5c6f2c37e6758a4 +part_03601_of_04320.hdf5 794f4476fc92598881981ee1469e8b84 +part_03602_of_04320.hdf5 02695523004475e8007686a251ef6d12 +part_03603_of_04320.hdf5 ddc3a1534063fb90688bebc0d30658f4 +part_03604_of_04320.hdf5 92b5125f35c2cbb27a1f40fcd4c34d87 +part_03605_of_04320.hdf5 a8c223f20bf34438ce5c4959f0cd785a +part_03606_of_04320.hdf5 c53bca2e6a4cf61eb484635554eef22f +part_03607_of_04320.hdf5 ada9276a8119d1eec317f2afded194cc +part_03608_of_04320.hdf5 60434a76b86f10f3e5ec5c6bef5e16cd +part_03609_of_04320.hdf5 b6e92e63ea077dc2eaec094e8f739cbb +part_03610_of_04320.hdf5 4ba507ef42fe8712285387d6c1cfdc9f +part_03611_of_04320.hdf5 df1e55c22952b017ceb330148592694f +part_03612_of_04320.hdf5 c8dff6ac6fba7a1cd7be3114fdf124bd +part_03613_of_04320.hdf5 f9d344f13cb9344a8d6b6c595c41a4bc +part_03614_of_04320.hdf5 e043c23c553f1679e8a6c10f4e4e91bf +part_03615_of_04320.hdf5 97869b24874cbbdfc390ee3e815f8059 +part_03616_of_04320.hdf5 41bf8ad4188b2705fba31e592d4085fb +part_03617_of_04320.hdf5 c71af35c562136d5e7399c7881d14fac +part_03618_of_04320.hdf5 0b204b89d93e67c374903e35619b38ad +part_03619_of_04320.hdf5 7d4a5ed0a6734764487cabd143409b22 +part_03620_of_04320.hdf5 2512590c77b589c08bc4d3627a11813f +part_03621_of_04320.hdf5 ee68e2e10f3f3133011577fc8dabbc81 +part_03622_of_04320.hdf5 8c969847f6a7c543ac641961a04ec3b3 +part_03623_of_04320.hdf5 f6d7c681b0d3cbe668e069cc50160026 +part_03624_of_04320.hdf5 52537d4969ca2a426ac4c470742e7559 +part_03625_of_04320.hdf5 b98b3b6daf12b8995f284835577f8c1d +part_03626_of_04320.hdf5 d5dacbfdaaec9a905fe17960af3a0ad8 +part_03627_of_04320.hdf5 7fc9fa94915d6fd0f9b11762d39c6cb8 +part_03628_of_04320.hdf5 f50bfd170baab70ec36aaa185e37d802 +part_03629_of_04320.hdf5 4cd699f810c9ca9106cf50991b444368 +part_03630_of_04320.hdf5 d426ad1e64c8f18ba1877a6095e2745c +part_03631_of_04320.hdf5 145eeec5c289fbf881cd0c4cf9d85d39 +part_03632_of_04320.hdf5 c37889e37ec021e261308bb1e0298f9f +part_03633_of_04320.hdf5 00235d6aba14013766ab634f1647e06c +part_03634_of_04320.hdf5 2036d2cee866f3bad4aadc8923c0c9ea +part_03635_of_04320.hdf5 52fb6c8431359d54decc24314f3ff937 +part_03636_of_04320.hdf5 60946e1222fa599ca06af426af45e7ba +part_03637_of_04320.hdf5 a05864e5a34af785f925cf224ca505a8 +part_03638_of_04320.hdf5 f4c574c28f9aeae8cc9ca1df851d304a +part_03639_of_04320.hdf5 1c5e8f97cdffe581fb369b874f55c6ae +part_03640_of_04320.hdf5 68f18d300ffb9bbc2cedea30f99e501e +part_03641_of_04320.hdf5 7f771ccd2abacf0635942e5b4e12a7f2 +part_03642_of_04320.hdf5 a07289b6c7db22a51b277fca0ac993b9 +part_03643_of_04320.hdf5 80769358ea512658f1edbc48f6cf8475 +part_03644_of_04320.hdf5 2c8b254ffa01e0087a77ad7a3420840e +part_03645_of_04320.hdf5 221346cb54c344f5129e00b15925fe18 +part_03646_of_04320.hdf5 203c46fe4e19e86cd19edb788515195c +part_03647_of_04320.hdf5 2d7cc129d0f57f3ba802c64c1748a3a6 +part_03648_of_04320.hdf5 0528681d1e4b37b9ba4b424e6a9ad5fa +part_03649_of_04320.hdf5 41619f0b8f43875f755ba873d130171b +part_03650_of_04320.hdf5 8b23e3df40c0a92c846b8810babe1d6a +part_03651_of_04320.hdf5 1606e3d90257cf8a5cf2e2b37116312f +part_03652_of_04320.hdf5 ffa5105faf180dffb94da55bd7cd8145 +part_03653_of_04320.hdf5 a53c4ba3d67618bb00f944f0f9422f85 +part_03654_of_04320.hdf5 929f5971e37bcebd099f99fc187fc68a +part_03655_of_04320.hdf5 fcf9bb85b9163fa608ad34f674b7a3c1 +part_03656_of_04320.hdf5 ce552d47e7a0e9ebb03622c201af1d50 +part_03657_of_04320.hdf5 ea48a98b5af0ffec46479b8f600bc28a +part_03658_of_04320.hdf5 a4e0f9779d7fb40cf3ab11208d4c15b2 +part_03659_of_04320.hdf5 e7dbcd5deefbb66a9efcb8dd298ea122 +part_03660_of_04320.hdf5 2c2f3e2505f07cb9f0610b80d3c509bc +part_03661_of_04320.hdf5 dcf922bd4c183a06175756793b5846be +part_03662_of_04320.hdf5 9d069f14f8f11293307166b21d27907e +part_03663_of_04320.hdf5 2faf02f3ad7db179246b7e6d9803a9f3 +part_03664_of_04320.hdf5 1c7e51e2efa8e85285c65af8dbf05723 +part_03665_of_04320.hdf5 513fe580a8e763fa1f71b4fe4cd27989 +part_03666_of_04320.hdf5 b336adc1b3463f42a414d7a1a92efcaa +part_03667_of_04320.hdf5 d86a1c3173224ae9133a5b268d66106a +part_03668_of_04320.hdf5 86c05a5650e3e24c6e98e1febb68d8db +part_03669_of_04320.hdf5 30b2ffba180121c0ea6d9003d327dae8 +part_03670_of_04320.hdf5 7a3914a176354d3f3b6dc55993dcf7cd +part_03671_of_04320.hdf5 0269d64848ea80a5729f23d1d322c5bc +part_03672_of_04320.hdf5 49da20fc8dc1bce8c4f421fec2c9bac7 +part_03673_of_04320.hdf5 ecc8d19a04abd33c0c759e8647a541c3 +part_03674_of_04320.hdf5 380f032d71b9b59f60e03b5c3e6085df +part_03675_of_04320.hdf5 bf05c1e22d5dd116273ae4076fcd7db5 +part_03676_of_04320.hdf5 6f770553616427a65e6508aeb8825363 +part_03677_of_04320.hdf5 0ec86c46e99f49abf3cd9997e8f15e3e +part_03678_of_04320.hdf5 611cea87e5fd37142c264e11a4bb71c2 +part_03679_of_04320.hdf5 127d2b0c9082ea8f34535649d61ecfc7 +part_03680_of_04320.hdf5 57e70bbc01e2a433b10f966a5c6c0b04 +part_03681_of_04320.hdf5 7ccb8d7adc7ca191ab8bd3c93d31cba6 +part_03682_of_04320.hdf5 2f17091941a59e3e00c55af96926763e +part_03683_of_04320.hdf5 4bd06d971f507e77bd06849da2bd1c7a +part_03684_of_04320.hdf5 08011e319bf4a56eb69306c210917aad +part_03685_of_04320.hdf5 199ad3e2ce8c79674c33f99bbe95e183 +part_03686_of_04320.hdf5 9989bc17e61fa3f16e5b045bd7a0080d +part_03687_of_04320.hdf5 57e516bc46b8fe2fd5dd83ce9aad8cf0 +part_03688_of_04320.hdf5 89bbcf1f4798f68b9699b2a05abc09a4 +part_03689_of_04320.hdf5 19d52fde058c6ebc7c0b51494701b4b7 +part_03690_of_04320.hdf5 c82c014da160ca5747e8171ff84967db +part_03691_of_04320.hdf5 ac8961057c509d4c4127b9580c58a434 +part_03692_of_04320.hdf5 cdf3018037a86443b3fa013beee13e78 +part_03693_of_04320.hdf5 6d319bf0fd60d6eb0f00605bf5b6de9b +part_03694_of_04320.hdf5 9e1a6684d502094ffc15d7582e7ccc54 +part_03695_of_04320.hdf5 c9bbae56a78374c17f904ad9f95a2736 +part_03696_of_04320.hdf5 07af4ee85133bc897254d6b3c9cbab23 +part_03697_of_04320.hdf5 5d1713b5832e35fbc04fe7b7b62f86b6 +part_03698_of_04320.hdf5 b1262d14e3a865194828f396ff5c5277 +part_03699_of_04320.hdf5 f0aec2c9f136c94dee40f39d573881d6 +part_03700_of_04320.hdf5 573e18c4dc4179edb6c27ff78ff2857c +part_03701_of_04320.hdf5 5127f658c2aa409cc8125291124154b0 +part_03702_of_04320.hdf5 eb104c3c491de4353f9312d95c50f9ab +part_03703_of_04320.hdf5 f3f521eb79c60fb2b082b560284711b9 +part_03704_of_04320.hdf5 8a02362153a6bb53677812357fa3bede +part_03705_of_04320.hdf5 e3d9354b3f37555e340972c5eaba3758 +part_03706_of_04320.hdf5 f25845941f84e20832b3ff2bab448d15 +part_03707_of_04320.hdf5 5bc22478bded4af90588279bb765d056 +part_03708_of_04320.hdf5 7a978c331daa53c4c40096cd757af340 +part_03709_of_04320.hdf5 850026e97ec7240930e036668d1e6dcb +part_03710_of_04320.hdf5 575212e8637f905d3bddcb19592429b5 +part_03711_of_04320.hdf5 0d7a4462e572c18b3e1cd95b7d8d3b50 +part_03712_of_04320.hdf5 146aa4b417f7ea46e56ffe76e05c6c96 +part_03713_of_04320.hdf5 efdd60038200242c55c945f538f1f6db +part_03714_of_04320.hdf5 764b44a6c131b90da888f1659cb9cab8 +part_03715_of_04320.hdf5 400de88c353caa43ab256569fc880627 +part_03716_of_04320.hdf5 1e732be2eb9fdf623248a6917d831bf0 +part_03717_of_04320.hdf5 50abef0671d9ac3efff7c1a7a5f298e2 +part_03718_of_04320.hdf5 652424ae3d35d90fc280b6c52e0c3229 +part_03719_of_04320.hdf5 98686a54daaa6758eaef12d01a1e681f +part_03720_of_04320.hdf5 f93c78249e9733e04cd07a5f1a85f12a +part_03721_of_04320.hdf5 3a4881814a695aa1458db67a637ea312 +part_03722_of_04320.hdf5 2aac63ecf893aade72321ca4c7454912 +part_03723_of_04320.hdf5 f895ddb568d1914ad17b254fd9f564a8 +part_03724_of_04320.hdf5 75aabfe71f6504ae95ec79d7fdb0d254 +part_03725_of_04320.hdf5 52447ff5e9636adf3a0f1b2cae5eeb0f +part_03726_of_04320.hdf5 1d27e9c32c282404e07d2560fe1e7c2a +part_03727_of_04320.hdf5 c445c807716586d629240734c4023756 +part_03728_of_04320.hdf5 107e88330b8b2843c0d840f93d88957e +part_03729_of_04320.hdf5 bff4fa7bebd5b53fcfbb62aef8c8e630 +part_03730_of_04320.hdf5 25b2a1f3a44bc9d93fe0ea1bedb6f380 +part_03731_of_04320.hdf5 cafff664a49ed6632538c351c7abdacc +part_03732_of_04320.hdf5 de306e9c4c815ae16eae7d5a3f3ab55f +part_03733_of_04320.hdf5 8dc55254a00ff2d39b0bf9f62aed6dd5 +part_03734_of_04320.hdf5 8adffa582ace1bb8f20e6101d1ca45df +part_03735_of_04320.hdf5 548c9af3681c7d361c7634b3720d2a69 +part_03736_of_04320.hdf5 ea02cf00749b28591518123c45424a0a +part_03737_of_04320.hdf5 a26e588bdfa292ecef0b34eaaabae37e +part_03738_of_04320.hdf5 89b8ffc84a48aff9fe2251ad924666ca +part_03739_of_04320.hdf5 d3bab0dcfbc647705df9a496a4a4b77e +part_03740_of_04320.hdf5 3dcdf3c97ec3fa96f16d4a70ce42051c +part_03741_of_04320.hdf5 3cf62dc185cc22b8941d1f963765fbe6 +part_03742_of_04320.hdf5 b9814280d7e0d76ebb8d7e4b15fc31af +part_03743_of_04320.hdf5 cb6f20a3940981c211285f64d942c45e +part_03744_of_04320.hdf5 8ee18ae2f4d22b68415c8a20897d3733 +part_03745_of_04320.hdf5 cf26e4a6c3005c3eb6d22de0b16d9b90 +part_03746_of_04320.hdf5 66f054855313d5a0169732c98cf2e775 +part_03747_of_04320.hdf5 d0a5e2c9384021977fc38cabff784814 +part_03748_of_04320.hdf5 f2d3b20edc0f9ae4ebc8ee33cb6adfec +part_03749_of_04320.hdf5 aa7c694aff12817677f1d138397b1030 +part_03750_of_04320.hdf5 2f0647eeb41c7ac74c22456e879faeff +part_03751_of_04320.hdf5 b404a6d214058adceecb912b417099cc +part_03752_of_04320.hdf5 8108713c8b471d49f59c0a93b7c5488d +part_03753_of_04320.hdf5 c326bb2e3d5bf7343309959be65a4cda +part_03754_of_04320.hdf5 af1ebad2e80476e7ab5c4e4c992a25d2 +part_03755_of_04320.hdf5 e3b25da8ab0f00bbe59ac8aab0e97b96 +part_03756_of_04320.hdf5 24fd836b812a203ffaee8fa78ef1d602 +part_03757_of_04320.hdf5 03c134f23ed186eb65013d855813bad7 +part_03758_of_04320.hdf5 cc10a22c6fe44a97ca3edde1ffbbd14a +part_03759_of_04320.hdf5 966c22f86f94f5c1e8bf7ec9f35d5fe6 +part_03760_of_04320.hdf5 aa4fbd3485c63bc8aabd6a5472e4856f +part_03761_of_04320.hdf5 473ffe69a597869e932c7c28389209db +part_03762_of_04320.hdf5 5d7ea88939a4392ce3b6c1a314a1ba8e +part_03763_of_04320.hdf5 81cceaf84ca27655da7dd17cf009fc1c +part_03764_of_04320.hdf5 e2997d752b62440ad0a986cc83fcc04d +part_03765_of_04320.hdf5 c8ac11f6588084e79b9dae36b31cf833 +part_03766_of_04320.hdf5 ff17069e9e478019453a4efa50e111b0 +part_03767_of_04320.hdf5 67277c5ce6e0986f922026cab4ea14e5 +part_03768_of_04320.hdf5 f030b1fcb91432d7f1bc0f273fb01f7f +part_03769_of_04320.hdf5 6bb26691140a8dd1c7787de245ed4626 +part_03770_of_04320.hdf5 282365e75a1a4d4897eae56e0b52f37a +part_03771_of_04320.hdf5 612e34aea6b332db91d0c2eae460cf80 +part_03772_of_04320.hdf5 d5a70c2b61c761507cbc4c2b77059854 +part_03773_of_04320.hdf5 20fa54576a2ae4a09d94372fd5a74a54 +part_03774_of_04320.hdf5 de1f94a350ef641fb7b9bd0d9ee8b885 +part_03775_of_04320.hdf5 dc1fe6e90f7cde22eb16d783d1363037 +part_03776_of_04320.hdf5 96839b009644fbf88546c71ac8b2507e +part_03777_of_04320.hdf5 fd7525e35d9703058cff821120f959a0 +part_03778_of_04320.hdf5 7b39b987dc0bdbfc30640c87238ceb9b +part_03779_of_04320.hdf5 fa9c259152d96a406f3b312a190bb0a0 +part_03780_of_04320.hdf5 ab39c80f8f19f142b9360cf0aa32541b +part_03781_of_04320.hdf5 6f74dca2e54c04f268eaa88b79b75d8c +part_03782_of_04320.hdf5 da2eaffd6fdda28078d5c28b480aba29 +part_03783_of_04320.hdf5 77c11eb7bdd2542bf0b84b33278dc61f +part_03784_of_04320.hdf5 b79f0a1c110d6db153794a05dcc519cd +part_03785_of_04320.hdf5 f2ebe60455427bca71633ddd7176d59c +part_03786_of_04320.hdf5 6b5a7ba2d79e3d51e144887457367e6a +part_03787_of_04320.hdf5 ef84b55709ee2dab373d4b553c6ed784 +part_03788_of_04320.hdf5 c6fe902e8da592bd28aaef04efc150a5 +part_03789_of_04320.hdf5 11fe130f0f4b442d1085f0c7edf8e99a +part_03790_of_04320.hdf5 439cab28b08e7e93af67f42271664687 +part_03791_of_04320.hdf5 cd45ce1f109a7c30e13b300f591f9eff +part_03792_of_04320.hdf5 06828b3fdbdeb0cfed39ac0f6d62bed3 +part_03793_of_04320.hdf5 42a66110069c4ad21c3b4a26c7acc403 +part_03794_of_04320.hdf5 063668321910eb3a9b322ffc533e4c0c +part_03795_of_04320.hdf5 1fcd6973ad07150381255e14cc7ba0be +part_03796_of_04320.hdf5 fee209799604b4eb5008efa76ae0601a +part_03797_of_04320.hdf5 4dfd556603f8053785e3303b28db5300 +part_03798_of_04320.hdf5 b687a84fc9b96c4729fd6822f0f228b5 +part_03799_of_04320.hdf5 a7d5af4af1795b6fc3f19c27b1c4e092 +part_03800_of_04320.hdf5 2cf9b7a4e5cce7b25bd31e55612e1a0f +part_03801_of_04320.hdf5 84fa231608e40e7e229e8f32e311facf +part_03802_of_04320.hdf5 fcaab7e2f8a308bbef14549e0b089350 +part_03803_of_04320.hdf5 2d21c9d79f5e0ffee78b9e3ba47b2868 +part_03804_of_04320.hdf5 a48b3d852f087923fdda7950b5c612f9 +part_03805_of_04320.hdf5 8e6e7c9592eb974a25d5eb60ad9f3ee3 +part_03806_of_04320.hdf5 c08b8e66fd3fc60b50fd87ed57031540 +part_03807_of_04320.hdf5 b13122e27ddd601e27f106a966adf2f2 +part_03808_of_04320.hdf5 7ad4aae0f77b5ed5c1f2c07d702cfe5f +part_03809_of_04320.hdf5 e3d353acc491bb56037840b073b08703 +part_03810_of_04320.hdf5 02eb4240b063204a223c90d6898eee63 +part_03811_of_04320.hdf5 724de2bdfa7ff67fdfdedefa7e31d33a +part_03812_of_04320.hdf5 4bea82db6fb627ae591d5df3e6838c57 +part_03813_of_04320.hdf5 e38c6747dc7b5ba8ef83bca7d0d8a2c1 +part_03814_of_04320.hdf5 a5be54796acd0c36e648c92dcf42c47a +part_03815_of_04320.hdf5 25888f3e39d6ac34df0d04ef6cfbb64a +part_03816_of_04320.hdf5 35c15e9275ac926e85f171f29c53cfe2 +part_03817_of_04320.hdf5 b32ef5214d56a24daa0b4f2418ab5c1f +part_03818_of_04320.hdf5 8cc008c397680cf5321c83da6f9a1984 +part_03819_of_04320.hdf5 ef09eae43823904e72a12fb0c26dec36 +part_03820_of_04320.hdf5 1a637559c36913207779ccf0b2b256ae +part_03821_of_04320.hdf5 8473f79709ba8e21fa81ceb4bc5a03a0 +part_03822_of_04320.hdf5 dadb7c1ce10007647dd94acff765ee40 +part_03823_of_04320.hdf5 3d821d87ac816993a0e03c7645dc0bbb +part_03824_of_04320.hdf5 40159b2da0f215d0c593451417430c06 +part_03825_of_04320.hdf5 b96cdbca42f77909428c44aa1baac81b +part_03826_of_04320.hdf5 7a64e3af8c1c65cf6c2e9636fab11cb5 +part_03827_of_04320.hdf5 357e85c1277a454c442ce3360180a5bc +part_03828_of_04320.hdf5 a9b3fc3f7cbc96279f844f1ff925e937 +part_03829_of_04320.hdf5 9bb895c463dc5d503a2e50abc9c4929e +part_03830_of_04320.hdf5 fe781650839a1ce5e514b83b058fed10 +part_03831_of_04320.hdf5 3f109512bc579d2c0f34579e6c190553 +part_03832_of_04320.hdf5 ab50e37c62f81b02a8b0c94bab79aac4 +part_03833_of_04320.hdf5 c8653acd0c752486199e141f2140d8f2 +part_03834_of_04320.hdf5 c80c2b7bd22103c21b12850f09406ade +part_03835_of_04320.hdf5 b7d4a6da03fd221986dc8def39a18f2c +part_03836_of_04320.hdf5 3b1eba5632993e67ccce8c7504d3ab10 +part_03837_of_04320.hdf5 446dad578c02c582a6644d0da9bd4c85 +part_03838_of_04320.hdf5 e7cb9395697955462294e63cc9946bb3 +part_03839_of_04320.hdf5 974458cc6964bd057c786fad763182d2 +part_03840_of_04320.hdf5 0b4c2e65bfaf9b045fa888eb648ef497 +part_03841_of_04320.hdf5 f001c252ab79d8f94d7e65e94a0a63dd +part_03842_of_04320.hdf5 9160859c94cc916193842ee9b375e6d3 +part_03843_of_04320.hdf5 998fa2f50d11bc627f2ee18206bca788 +part_03844_of_04320.hdf5 1d21c637bd731e8954c6b400a6f08e53 +part_03845_of_04320.hdf5 80e8a442edd8befa12a624a3ef626718 +part_03846_of_04320.hdf5 ebef716816f2dfd28b0458421c7ff4f5 +part_03847_of_04320.hdf5 3c8a76f57b5b5c1dda26094017324ad0 +part_03848_of_04320.hdf5 d6a546f57d785edea06f7389ccf92c0e +part_03849_of_04320.hdf5 902e6dc34da5117b8ac4be07ef179b24 +part_03850_of_04320.hdf5 d5c0c5fbe5ce63e90f32ed7e1ab53873 +part_03851_of_04320.hdf5 4b23264ea1d119820bcccda4a0d683f5 +part_03852_of_04320.hdf5 1793da96b4c1ef7b967709d85c3048d5 +part_03853_of_04320.hdf5 7ddc0e2ff6505986982bd566ae37ee1e +part_03854_of_04320.hdf5 891482f7620df2b96825943363c8b4b0 +part_03855_of_04320.hdf5 cf6bf6edf71253c06f495ecbcf0b7f91 +part_03856_of_04320.hdf5 67bf14bc7a06a0d22158b05b5d4eb350 +part_03857_of_04320.hdf5 267b91c54cbf08a939decb22787f0ab9 +part_03858_of_04320.hdf5 ec4ddb40143bfd6060c69c25b744f548 +part_03859_of_04320.hdf5 2ade5890936f10e6146c3340050a3be1 +part_03860_of_04320.hdf5 a6e773aad24562d7e0f65ba714f6795c +part_03861_of_04320.hdf5 ee43263d1428e622450424f3a0d9fbc5 +part_03862_of_04320.hdf5 6f342ee1fd14399c66230bc69e9edb08 +part_03863_of_04320.hdf5 77db37919f098e2795ef238447691eac +part_03864_of_04320.hdf5 65cc10807edc28c387873046ed06839c +part_03865_of_04320.hdf5 f19b43ba30b3ad61c8660ccbc8add07a +part_03866_of_04320.hdf5 be3f1fccf90a07aec5da3d84435eaee7 +part_03867_of_04320.hdf5 91949119bab133a4d935fed20d3d425c +part_03868_of_04320.hdf5 aca9699ef50cdd15ff4164d4c9f45c6c +part_03869_of_04320.hdf5 da337dee031278616c36b62fb364c171 +part_03870_of_04320.hdf5 ef5d0ae2c476d718bc75299ac7f18559 +part_03871_of_04320.hdf5 7282d43e8d1d504e128a0dbb1d121d32 +part_03872_of_04320.hdf5 66b745f09110011010ad4069b1bb88e1 +part_03873_of_04320.hdf5 52ce67df74756ffcdbe43e89c22ba76b +part_03874_of_04320.hdf5 69003ca0e5d39f85405871619ac4bf1f +part_03875_of_04320.hdf5 6833a43969fac89a94cf3c8471d4f9b7 +part_03876_of_04320.hdf5 e867cb50338eee99f6c402f45761a7a4 +part_03877_of_04320.hdf5 ae00529427f82042fd9a4abd30e0134e +part_03878_of_04320.hdf5 ce4b2de8f9b8f197693f03bd2681e524 +part_03879_of_04320.hdf5 d6df8242c77fb6610d639aeb1975816a +part_03880_of_04320.hdf5 305b23fe329121d96a521a08b29d083f +part_03881_of_04320.hdf5 cdf647804f1005130d664736c1984680 +part_03882_of_04320.hdf5 0ec6a0d3183ecd6860aed86b5dd8fd2e +part_03883_of_04320.hdf5 f1b6b9769c12805ee5039136ca7bb05a +part_03884_of_04320.hdf5 4cb9b58d729823d4a97a1107a4042f0e +part_03885_of_04320.hdf5 b2b198d0e64ecedb9798f73f7822d573 +part_03886_of_04320.hdf5 02a83ed4f27429206438df6c6c2ce8e3 +part_03887_of_04320.hdf5 ddd9b7beb6d3c647c2624f31a7b38d6c +part_03888_of_04320.hdf5 1445f1bc616e43dc243bcddfe98aa58e +part_03889_of_04320.hdf5 6b4a2b051c7797ee859db8431c83b67a +part_03890_of_04320.hdf5 84e35b784c83c45f0d007238e625ca6d +part_03891_of_04320.hdf5 4a459445bd0ff7632fc6b2be6a48060f +part_03892_of_04320.hdf5 3c49e64a4a18f61d7d476602eec6a9c9 +part_03893_of_04320.hdf5 d95fc55dd3db3e1ad0f45dcbc0316384 +part_03894_of_04320.hdf5 ad362c072df7f0986d064010ee1311a6 +part_03895_of_04320.hdf5 57c2726e91d33ef75dd0998e1bb017aa +part_03896_of_04320.hdf5 e3c13aeb28cddba191561c3532df85d9 +part_03897_of_04320.hdf5 0cc3eefc42c7b7281e161778463fc956 +part_03898_of_04320.hdf5 e20a61bc8934453e28ed56d01464f18d +part_03899_of_04320.hdf5 ee266e141817617139ae1feeb0457e97 +part_03900_of_04320.hdf5 2d252e4e69c7fead57fb2de406cbc891 +part_03901_of_04320.hdf5 bbb8ddbe1c11c78f57c16352e4afcf88 +part_03902_of_04320.hdf5 54cf7bb7ecf3789f11b91bb8333e22b0 +part_03903_of_04320.hdf5 b994df306d91755cbeaea556e8a5163e +part_03904_of_04320.hdf5 48695cbecd50980c8dcc4c743d539898 +part_03905_of_04320.hdf5 c927b71947da8c832f7a5d0a9d16f180 +part_03906_of_04320.hdf5 a9351d052f0685e8cb1337eab6aa9cb4 +part_03907_of_04320.hdf5 4bf3ec7ca3c3bd09f214b6d8a4799d0e +part_03908_of_04320.hdf5 5ac608cb94fb0a485703df08177279aa +part_03909_of_04320.hdf5 54e9aebe805a9309c13f0d0691dd0c0d +part_03910_of_04320.hdf5 90d579c850bf9ec34c39b77edb78d749 +part_03911_of_04320.hdf5 24c705a7e2c50c2ca94b7bf166defbc3 +part_03912_of_04320.hdf5 83c016003126564379986a0c767ab0f4 +part_03913_of_04320.hdf5 d223ae7b8bdea3f5a49ac909052e21e8 +part_03914_of_04320.hdf5 522f8d5e059cae70e4f627baeed143f3 +part_03915_of_04320.hdf5 3940d85acaaefacf145747631f261696 +part_03916_of_04320.hdf5 b7a29bc304eebee545e8fa2cfd4f7868 +part_03917_of_04320.hdf5 0e2521fd44b397c2d01b85cc4d74ef0a +part_03918_of_04320.hdf5 c60b19fe665f1f26882fbc451bc1e186 +part_03919_of_04320.hdf5 70e41005fa7792cdc98a4bd7fdf288f1 +part_03920_of_04320.hdf5 37a4ad25df287ad014ff22a6f820caf5 +part_03921_of_04320.hdf5 e4ce52cd4154f50695f09b209cfd8ca0 +part_03922_of_04320.hdf5 6227a65d5e4ab69d1febfb7ad22647ed +part_03923_of_04320.hdf5 72ff3e68ccc15cbe4f8bbeec0e5831f6 +part_03924_of_04320.hdf5 b9e194c27435336bf8a40fce567f8d8a +part_03925_of_04320.hdf5 823039a09faabeb5358c22e89ebf0223 +part_03926_of_04320.hdf5 0327739a6efdd6e11c8bb5d8de366108 +part_03927_of_04320.hdf5 4beeb476a7ef9be21b847dbccddb9828 +part_03928_of_04320.hdf5 296cced2f0f2131c7febd28dc6f929c1 +part_03929_of_04320.hdf5 ae5655ac38d0dc1256dade38af176e09 +part_03930_of_04320.hdf5 ff2f0aac02b92f40bd4d72fa131f65ba +part_03931_of_04320.hdf5 a2dc11c8ceb5225e309d4505f2e1046a +part_03932_of_04320.hdf5 731c7560e9c99644cb90cf3d517f9d8f +part_03933_of_04320.hdf5 450ac7f8d98b718b78beda9c12da8a3a +part_03934_of_04320.hdf5 83587c74593148f6f19705ae271baa7a +part_03935_of_04320.hdf5 f777fbcf0d2e44d9cb3eaf9f00a6a816 +part_03936_of_04320.hdf5 a01f2294545325ca14e0647a43390ab2 +part_03937_of_04320.hdf5 7b0d4d3b7f8076f6bd08d7c2a7ff05bf +part_03938_of_04320.hdf5 ff9e559080c8ec90dc86e69f9804ca0c +part_03939_of_04320.hdf5 31459514d7397111083c64951ab31d00 +part_03940_of_04320.hdf5 49329dc25c6dc6690a5dbc58a199e96e +part_03941_of_04320.hdf5 23d17ddbc26bd0db6d99e8ecd1c60793 +part_03942_of_04320.hdf5 4ad2254912dc4f5b62b81df44dc106e9 +part_03943_of_04320.hdf5 2f779d067209b482f4a2cd54f51b3d7a +part_03944_of_04320.hdf5 243b6f32f0c0a469c14670c72ef9ff23 +part_03945_of_04320.hdf5 9e6f8476cac6b216365900b7448c9952 +part_03946_of_04320.hdf5 25283f9f2f90f73afdb4b623198c6a94 +part_03947_of_04320.hdf5 04fde3ecbf8f69e02993940d5d6c9a38 +part_03948_of_04320.hdf5 54f913a9c7bb25630adcaacd7058b3d9 +part_03949_of_04320.hdf5 3ab385156861c5785bc76bc49c9e15bd +part_03950_of_04320.hdf5 9d3c86c1c07c5ca7a6594428f9c9eccb +part_03951_of_04320.hdf5 e0cd7869ed852de714a0c479af6aae21 +part_03952_of_04320.hdf5 fab2b13b0beee8df07ac8132b1155a74 +part_03953_of_04320.hdf5 31d6c56a222bc7ce889b43e25fcdd544 +part_03954_of_04320.hdf5 876fb6f41500929f5bd8bec16af16cab +part_03955_of_04320.hdf5 ae88abe55891f7fa9fb6dccc556ec607 +part_03956_of_04320.hdf5 0fd33213b911f8aabeea1e80d8a1cd99 +part_03957_of_04320.hdf5 f8d25ec6b0bd1ac170cbdb47edad5fab +part_03958_of_04320.hdf5 18947109501da9a44be3126f39168fc2 +part_03959_of_04320.hdf5 1195e47ebb142afa1957e59008be44b5 +part_03960_of_04320.hdf5 4e1c7ae6e1133ba26bd8163cc4924a32 +part_03961_of_04320.hdf5 da2ff07cfba78721a27cf882f49cdaf3 +part_03962_of_04320.hdf5 7543f321beedb621aca20e44a017ea48 +part_03963_of_04320.hdf5 ae524196fda2a6dbb78766e543f9549c +part_03964_of_04320.hdf5 b69d1aebbe3d0a12eca0e6de2490a624 +part_03965_of_04320.hdf5 d9dda8f3fcacc800d8c6e0e1a9ccb139 +part_03966_of_04320.hdf5 881349f396d5fa6f2f028a00e9c0589d +part_03967_of_04320.hdf5 e7b84de803fd01da9620f5bc340a0e37 +part_03968_of_04320.hdf5 7b68cdd64e6913590499c6b3ada0381d +part_03969_of_04320.hdf5 d2ef50c5e4683e2b54a13846b8561c5b +part_03970_of_04320.hdf5 5fafdca2372b336207662cd510611fdb +part_03971_of_04320.hdf5 e16c32dc1f2db6cbde257407aedd2602 +part_03972_of_04320.hdf5 8949b488eac79e8fa639efe2c74484fd +part_03973_of_04320.hdf5 6eb212407638a57fc0214a3a6b85e167 +part_03974_of_04320.hdf5 a1e5966a99eeac31a9aae46836e0895e +part_03975_of_04320.hdf5 c2925765d736f300c99c3a1607076c48 +part_03976_of_04320.hdf5 fdf8e923c6730acc4d85436abd6681a5 +part_03977_of_04320.hdf5 bbb8e33d66791b3620d183d536dc0343 +part_03978_of_04320.hdf5 268d4f4960c6b11e90e990b0fb9df4b1 +part_03979_of_04320.hdf5 a2f4debdfdcee5d014b6e92b91c1e6d7 +part_03980_of_04320.hdf5 82dc64052fdd9d0ef3b9034e96b0154d +part_03981_of_04320.hdf5 c499d5454a01591bea303cb92d7c789e +part_03982_of_04320.hdf5 a334bb810c73f9364fbf4b0a516d4e5a +part_03983_of_04320.hdf5 d36847e16f6ac494bc0a60ceb02c1e40 +part_03984_of_04320.hdf5 95ee66b5af3b887eddc4e3bf5faa753a +part_03985_of_04320.hdf5 b2022612be6371eea53aaaa2c1455f81 +part_03986_of_04320.hdf5 f638a704843dc1d8b27a5e30e31c8e7c +part_03987_of_04320.hdf5 641af197b856bb4ff0b75dd127a01223 +part_03988_of_04320.hdf5 9de367b52f0b8cd122b6a0f2ce950fd8 +part_03989_of_04320.hdf5 b54b11f9623a6032e704e22807dfa0dc +part_03990_of_04320.hdf5 0a6e490eb1f87da0a7d55f188723c046 +part_03991_of_04320.hdf5 b8c6048dd971cd40916daa2c8f8b3ea9 +part_03992_of_04320.hdf5 f111704fbfd40cf8e348620be9515b67 +part_03993_of_04320.hdf5 834e936a0f3d754d88995c5b56abf0f1 +part_03994_of_04320.hdf5 de6634311fc8afa724fd6c588206d7a0 +part_03995_of_04320.hdf5 56e822d6386cb7b7472b7aec1e731bc8 +part_03996_of_04320.hdf5 6f49f6343d54cdb673cc8a3216845b88 +part_03997_of_04320.hdf5 3b5402a11eb529979610347b3e23df9c +part_03998_of_04320.hdf5 c1ca97e52c50a5415ec71195c03d0647 +part_03999_of_04320.hdf5 b9234fbe8896a6e33dea73ce077b4cdb +part_04000_of_04320.hdf5 445559cc470c33bf137547dad6589d56 +part_04001_of_04320.hdf5 4dbc59293be47601dd0fb040118b2946 +part_04002_of_04320.hdf5 ea9d5b64c7bc599d167de2021a2c6f89 +part_04003_of_04320.hdf5 2d4f04d8d13a416c8314ddd833fc3015 +part_04004_of_04320.hdf5 d1d36ecd83027d9ef056a1fe41a12be4 +part_04005_of_04320.hdf5 8112e1580d7af87ea9df5a29e267ddfc +part_04006_of_04320.hdf5 9198d16b729d60a6a6f5d720901ab697 +part_04007_of_04320.hdf5 c074ab10f1c9a2898ab1fc69c0f78a5a +part_04008_of_04320.hdf5 d901f75802a7bfa4056ba33adc42b7e4 +part_04009_of_04320.hdf5 4834f4c8423c69a18b533bd4f41bd9a2 +part_04010_of_04320.hdf5 912359ca328d04ea9aa4bc1e8107f2ab +part_04011_of_04320.hdf5 487d6e643de7682d89c099f6b12d7972 +part_04012_of_04320.hdf5 0460c860bf1fba4d1650a36755359503 +part_04013_of_04320.hdf5 4308e775bc036eb0fe079ead91908875 +part_04014_of_04320.hdf5 ad6a5686e798afcfd7b4474e7bacf53a +part_04015_of_04320.hdf5 8649a3b3c30ea783a8b4f724ff55285a +part_04016_of_04320.hdf5 a7e7e33086a606db945d3d80cb36ad77 +part_04017_of_04320.hdf5 5fdb4b410db4a68de6afb0435eea7c01 +part_04018_of_04320.hdf5 ff1fec0e6a6e0498507dcfdba3d32994 +part_04019_of_04320.hdf5 faf22eda779277c31e5da3d9ab199c24 +part_04020_of_04320.hdf5 58c9f0e5ee9df9bb9b7694483399d656 +part_04021_of_04320.hdf5 3727f7a6ff68c1767700d374843a0f2f +part_04022_of_04320.hdf5 6dee4bb9bc6712c34bd4ba16bd8e9db1 +part_04023_of_04320.hdf5 1aef754fc5507f4955ba5995beb08f83 +part_04024_of_04320.hdf5 2ca3413732560eedc19eb3140a1fa6bc +part_04025_of_04320.hdf5 8e08d6e193f2781a948d922b440d14e4 +part_04026_of_04320.hdf5 3ec919fd6a8c4522056d238b604c258a +part_04027_of_04320.hdf5 1ac3f4da8f44e777cd8ba244a378092c +part_04028_of_04320.hdf5 047b1bf4c275fbff9b351b1810aa18f9 +part_04029_of_04320.hdf5 6240e679c8256dde65f8f2ecb9866bdf +part_04030_of_04320.hdf5 97f007690270ab1db3dff9f9e33cd435 +part_04031_of_04320.hdf5 cb568d27e18b89f58a82b0f2b40773f4 +part_04032_of_04320.hdf5 cfb2b5ba8a36df4f60924a1343d2370a +part_04033_of_04320.hdf5 f988b3e01876e5eaa6b0c6a54bdd53ae +part_04034_of_04320.hdf5 b43041a1e57aa583599717f77eb957d3 +part_04035_of_04320.hdf5 82f06807c5ed906b9fcb6ee231abb20e +part_04036_of_04320.hdf5 99640fc7029b49ffbb951aead4c985c2 +part_04037_of_04320.hdf5 e0f10caf7b28a2212dcc6139be520e30 +part_04038_of_04320.hdf5 975de381e52f7721fe8259c03a1bf066 +part_04039_of_04320.hdf5 70a86179d61851fca3e5f2ca9458e769 +part_04040_of_04320.hdf5 1e1723bec6815c8397f955672749b32e +part_04041_of_04320.hdf5 dd10eb724fb75324d60286f0975fdf97 +part_04042_of_04320.hdf5 4548bc5d761a075b70434d52f4511eb1 +part_04043_of_04320.hdf5 88c21a4166155578efc2440d45fd8957 +part_04044_of_04320.hdf5 59334f3415d16339a4607736d9fa2b1e +part_04045_of_04320.hdf5 360cbd55a0b76a1353d5458bb6296dee +part_04046_of_04320.hdf5 77fdc4e6b00e5de3be9a981cf04cdbcf +part_04047_of_04320.hdf5 5349f23388892fe20c39ac8e748a7911 +part_04048_of_04320.hdf5 48d1a36e056505657ca9d009ff7262ea +part_04049_of_04320.hdf5 234b92022ef5119b2f7b1c97d6d56c5c +part_04050_of_04320.hdf5 1937e05c6098c04bfcca75922b52bc3d +part_04051_of_04320.hdf5 5315e4c4249b07f5d94fbeb14a7560ce +part_04052_of_04320.hdf5 671349a66862dd7f613e031a73633d3c +part_04053_of_04320.hdf5 1b218a440823d551be856605370f2697 +part_04054_of_04320.hdf5 33d6cb33e551133ed3a89826fa51e51c +part_04055_of_04320.hdf5 b9c53403f9eb73e14902e175df104474 +part_04056_of_04320.hdf5 977598d38f9b1ddc5c19300b1a3707c2 +part_04057_of_04320.hdf5 b5fa2fb8314e0c63de4413057f3ac986 +part_04058_of_04320.hdf5 b84b91dc8cdf73a780a23e40a74646c9 +part_04059_of_04320.hdf5 09d3cd782f37112943b1bf31094b0879 +part_04060_of_04320.hdf5 f73b2d2120a9fbb60c7387379a32169b +part_04061_of_04320.hdf5 7c8bf4379f426c60706f8698a20874cd +part_04062_of_04320.hdf5 b5d15cc14aa51f3a5141dd78d18ec170 +part_04063_of_04320.hdf5 b95565704cd61d5b88688592c75aaaa3 +part_04064_of_04320.hdf5 94916ddc47cd1486f31b45bbed0ab6ca +part_04065_of_04320.hdf5 aedb244d01e71bc828ae7b599af63a8a +part_04066_of_04320.hdf5 2e8dbc21e46a9edee180e51e4caaf39e +part_04067_of_04320.hdf5 0a3e051f5ff22b90c3b683a5d7356a23 +part_04068_of_04320.hdf5 75d5903bc23481c2a104e4905271a2c8 +part_04069_of_04320.hdf5 8027d3ca86ff6e24c0088d614e16e7c9 +part_04070_of_04320.hdf5 c8e25dff4f461ac682e79d68a6dbc51f +part_04071_of_04320.hdf5 f9a18e90449b1094ecc225df9e19650b +part_04072_of_04320.hdf5 138df9b10eb3ab83215726575857ae67 +part_04073_of_04320.hdf5 38d5225488b555c8483df1fdf386ec00 +part_04074_of_04320.hdf5 c3684935f04155c3017212e974d7ee6a +part_04075_of_04320.hdf5 5a6889c41e9d4ee492633639a9c9f8c7 +part_04076_of_04320.hdf5 61524dfe2df922ed001cd4a78f642b7e +part_04077_of_04320.hdf5 bf74b853297e869ecd3c72783c6e58f4 +part_04078_of_04320.hdf5 cb475ca835ce8d87169f112456e9e568 +part_04079_of_04320.hdf5 05a2f30f86ecc314a9e9b3f6e38862c3 +part_04080_of_04320.hdf5 8ad6d7b7ca54dbb54bf784da7cd2e6bf +part_04081_of_04320.hdf5 cf430f56f7b8979e4152149ac1175930 +part_04082_of_04320.hdf5 1e27ace1f2a4356fa07eb78261c97b15 +part_04083_of_04320.hdf5 7befc54e80eee0db2e88e58eb589ad97 +part_04084_of_04320.hdf5 3529c93ef89454366165582594d6ac81 +part_04085_of_04320.hdf5 055fea98f02bdbe300a19ba24b0f9294 +part_04086_of_04320.hdf5 cb6fc5ce880c3f4588fefac4318ee974 +part_04087_of_04320.hdf5 01114db705ee069f458fe726c7a86c9c +part_04088_of_04320.hdf5 12cd2bc3ce621e253dac4d10fa742d61 +part_04089_of_04320.hdf5 cbbb8414dda5174036bf908c19c32e05 +part_04090_of_04320.hdf5 4a8c83f6ffde761cac9a05f9eeb3e9d0 +part_04091_of_04320.hdf5 a55aebeb8fefd2bebba03946deca7834 +part_04092_of_04320.hdf5 8d0acf94be1f881d624d313066c1e776 +part_04093_of_04320.hdf5 a3558f5dd0caf1a2e25b417809e41f29 +part_04094_of_04320.hdf5 a38d051fbcc823e7fe10e2ca486a9962 +part_04095_of_04320.hdf5 8f97bf44a37669bd8a9b347532ee4c76 +part_04096_of_04320.hdf5 f45051997238e1360119b2b2d0df961d +part_04097_of_04320.hdf5 13f47b5085e967f01b3dbec707975639 +part_04098_of_04320.hdf5 f0b3ccdfe5c4075b0e96ca58c958aeb3 +part_04099_of_04320.hdf5 b7efa48b6b3bb904077162129b8416c3 +part_04100_of_04320.hdf5 c3663631a2a650bd99423f47752e49e8 +part_04101_of_04320.hdf5 6de554befb6c883d57dc306cb2f97631 +part_04102_of_04320.hdf5 152b30ec7ff57c07761a19ec8a3afb6b +part_04103_of_04320.hdf5 dba67394f1d34e0a680edca1e6f9267c +part_04104_of_04320.hdf5 1cabd3c3f566ea5de135971a18227160 +part_04105_of_04320.hdf5 ed187e430f77d7c093e5ccaee0b3bfb2 +part_04106_of_04320.hdf5 708f2ffe58a93ddbe54522febb3c327c +part_04107_of_04320.hdf5 cb2b853c7592a40b780283e524deb397 +part_04108_of_04320.hdf5 4e28fd270b45a6d659a85692513e1d87 +part_04109_of_04320.hdf5 f2b39ad19bc5d505a92018f611d9caea +part_04110_of_04320.hdf5 30cdb0d12ec6c3946344e672eaf3695f +part_04111_of_04320.hdf5 37406cf8be9643cddabb0270fb7dde63 +part_04112_of_04320.hdf5 e732ae090749aa68bdf2e77faaf79581 +part_04113_of_04320.hdf5 33a86880493ada2030f292e6603e98f8 +part_04114_of_04320.hdf5 a97fecc0a0d9666d31ecc06f85519af3 +part_04115_of_04320.hdf5 33cba9ba7bd3d99ea20ccdad818697e8 +part_04116_of_04320.hdf5 c22dc8cfaa44082f11f8a2114c6bd986 +part_04117_of_04320.hdf5 7a4a512e090abce8429aa78fc85343c7 +part_04118_of_04320.hdf5 c28de92c616719a777deb1128e5bc89e +part_04119_of_04320.hdf5 95fa1fea55df1439dce883395855931c +part_04120_of_04320.hdf5 c8fd4bdb50a84389626dde1ada364391 +part_04121_of_04320.hdf5 09211ed98bc07e0dc4f525fd19ea0d9c +part_04122_of_04320.hdf5 0d93363b6d648d4e848193bbc00fef91 +part_04123_of_04320.hdf5 4be108bf927ac970d1207cf1d9fde70f +part_04124_of_04320.hdf5 f5424f2c1ea63446a940a0c584d80394 +part_04125_of_04320.hdf5 137b8d7c077fb2c9783605c5f8be1d83 +part_04126_of_04320.hdf5 b99c65f3fd814930fcfe40bdcb522950 +part_04127_of_04320.hdf5 9815d64278ace0c00ab942b523745fcd +part_04128_of_04320.hdf5 e3b723265e5f9afca4a52b6d93cc67dc +part_04129_of_04320.hdf5 78acdf39d6776a3f27dfa0b2d9adb20d +part_04130_of_04320.hdf5 f1a314a106f278b55fd505205bf8b203 +part_04131_of_04320.hdf5 0594ee14353dc365cc7a9e512b60760d +part_04132_of_04320.hdf5 afc7d02ea8ead454acfeef4f86795273 +part_04133_of_04320.hdf5 67643a9bbbb4fbc38b4070a152687366 +part_04134_of_04320.hdf5 25edba6fb3369d1bd6f5e45a6ae8e57f +part_04135_of_04320.hdf5 4529d627b263c791f0bd559fdf327c8b +part_04136_of_04320.hdf5 19a176355cbd296b4ec78ba6abf7402e +part_04137_of_04320.hdf5 9e42ae3dd58989b62bf357e045551e1f +part_04138_of_04320.hdf5 c3fa5c7ad1a6abd7c755e94e275e3430 +part_04139_of_04320.hdf5 29044e0cbdb698c40ed36789f3a7fae5 +part_04140_of_04320.hdf5 4c1d498d8a37eb39bb3dbf7d6e412f9a +part_04141_of_04320.hdf5 49dbd28623f7d21a8fe3694efe957474 +part_04142_of_04320.hdf5 58b6af78daa92fe2d6ad44b09939d129 +part_04143_of_04320.hdf5 2a16200fccbeb4dc83ee15c04b648d71 +part_04144_of_04320.hdf5 f1565f18a6b177d852df01a751ba38d0 +part_04145_of_04320.hdf5 45d0960d201e050cb05f2d5cb850d0db +part_04146_of_04320.hdf5 69080cb65bb9e651bf15154ea2760822 +part_04147_of_04320.hdf5 7bec3581a1be853a2927377014085bae +part_04148_of_04320.hdf5 c2a90499f20f04faf0bc239f5f896fca +part_04149_of_04320.hdf5 e392c47e3cbfe84deb0ddd5264face43 +part_04150_of_04320.hdf5 33651e6d9762dc3c11c2e560133cd83c +part_04151_of_04320.hdf5 fd1755ca35a1aad9f13a5e71b5bd0396 +part_04152_of_04320.hdf5 43ccc25aa8bf12267078bb30f53cee2b +part_04153_of_04320.hdf5 d85471c754943424f32db139e3747b29 +part_04154_of_04320.hdf5 6f2538ca20d344d86f1522093759bde9 +part_04155_of_04320.hdf5 c0f5a21d2004b46e2c5c9f3a7225143d +part_04156_of_04320.hdf5 2e1d0936f327d90946489c0a1ed29d42 +part_04157_of_04320.hdf5 32a7ac5dfcfe0deb2f50def3d5c5397a +part_04158_of_04320.hdf5 6915dbd84467398f47be5d602be14ac6 +part_04159_of_04320.hdf5 91ea6ed0bb408720e6529a1f03380fcd +part_04160_of_04320.hdf5 4df8bafdcc5c11787e5f8a1b3457ba24 +part_04161_of_04320.hdf5 93ffa71dca2858b19d08d1f82c1733be +part_04162_of_04320.hdf5 b921077bfd27a98065f1d9c5470d0c09 +part_04163_of_04320.hdf5 1724ebdda3db7cf0f29bc7d050e00d49 +part_04164_of_04320.hdf5 04bde4fd48b8089c57f127e42bae42c7 +part_04165_of_04320.hdf5 f2ed6467e1eba711a605f10652f3f575 +part_04166_of_04320.hdf5 821f0f2cf947302738cf8d142bcd4b06 +part_04167_of_04320.hdf5 78ab179d36bd0606dfae8236b3f4aee1 +part_04168_of_04320.hdf5 fd2de6ae50ff48b5de69b21f10557c6b +part_04169_of_04320.hdf5 09c458671ac02c1ff241b83ec93f21b3 +part_04170_of_04320.hdf5 33f3337165deb3f8375a8a1a90435184 +part_04171_of_04320.hdf5 c4a7dc0636d8795269ac92f63c87272d +part_04172_of_04320.hdf5 b0fdacda0330c66c709e4a238aef1a6a +part_04173_of_04320.hdf5 43851e92cfa9f1267ade03a2a4428164 +part_04174_of_04320.hdf5 b25214580b7aadebd688e411f250d069 +part_04175_of_04320.hdf5 b58048f83a4e3dd0daeb8af125579aed +part_04176_of_04320.hdf5 7db53acbf71176ee7a6d28f16b75e3bf +part_04177_of_04320.hdf5 98dd4de77e16c8ea3fb83768f36028c6 +part_04178_of_04320.hdf5 2e6854d2b7f30e427931920b50a82e82 +part_04179_of_04320.hdf5 d1d789988d7768e8183d81b3f19968ae +part_04180_of_04320.hdf5 4176c0a0185ebb2ed054476ed35bf8a1 +part_04181_of_04320.hdf5 ffb2413bde6f3ed2a9d9a5e5cd4a3e0e +part_04182_of_04320.hdf5 1f4235b7e4b34206cc9029bf6a1f2b5c +part_04183_of_04320.hdf5 86f3452e56df7daf9a6bbaf1fe38e8bc +part_04184_of_04320.hdf5 f4572d6143962b79a2e66c2f37f7fbdd +part_04185_of_04320.hdf5 fad0c90fb6f6cbe9b6e9576a49957da2 +part_04186_of_04320.hdf5 b6aae9068c4abcc379d55f30cf9206b5 +part_04187_of_04320.hdf5 3f106c6afd1ce1b668b016085de7ea42 +part_04188_of_04320.hdf5 a14cfcc5634f47be1fd95c67386f5690 +part_04189_of_04320.hdf5 ed356ccef3ebe4190384e489152a278d +part_04190_of_04320.hdf5 d89c6535306318b52ee80938b7aaaeb2 +part_04191_of_04320.hdf5 4e497d625c7327d2d0e785fe01abfc2e +part_04192_of_04320.hdf5 ed38dd346ba7e150642a690a7d3c83a3 +part_04193_of_04320.hdf5 ef16f841f0264ce2bb912eecd742b081 +part_04194_of_04320.hdf5 6764d63502fdf6e08ee3f79a2f8ece2a +part_04195_of_04320.hdf5 1db948c7d8104480d6c795adbfa77228 +part_04196_of_04320.hdf5 a4a40f45a8fa1a41ecbca78754214a9f +part_04197_of_04320.hdf5 629c79dee81fbf584f9498dd0776f618 +part_04198_of_04320.hdf5 8861523ef98cd2531df0ec22cc7a609e +part_04199_of_04320.hdf5 3f6eca8c4374427f89d54c80af9b8277 +part_04200_of_04320.hdf5 c6e29427ad48246035201b9edf90f8dc +part_04201_of_04320.hdf5 27dab3bd04ac923e229d0953d625c97c +part_04202_of_04320.hdf5 a2c12a8f66d67f1be03108cb507ab509 +part_04203_of_04320.hdf5 861f690f22890d2575700b83e992dfb9 +part_04204_of_04320.hdf5 9704f0e381bee0794bf04829facf4ac3 +part_04205_of_04320.hdf5 a29343e330c52237c7c73d96f038b7c5 +part_04206_of_04320.hdf5 4d7b7c9b6ac1aa81a1d9ad92db84d05f +part_04207_of_04320.hdf5 d56ccde8d54d8a2bb22f0f5569317602 +part_04208_of_04320.hdf5 605af3ac67542bad1c5843c90642f83b +part_04209_of_04320.hdf5 7942acef0a3551ce166147ea3fdf74cb +part_04210_of_04320.hdf5 ef353f4c5e5e6d139cc5babadc118822 +part_04211_of_04320.hdf5 5327dfb1aecad76db491fd8c0645c7b7 +part_04212_of_04320.hdf5 04df2414017c626629e77908a1126c4a +part_04213_of_04320.hdf5 b20451d9b67d53a1007cc98c5f75e1d5 +part_04214_of_04320.hdf5 5c6e1bf7abb64b95f1d1699737c13ba2 +part_04215_of_04320.hdf5 ca2874f090c21614bc4466a0e0a79dac +part_04216_of_04320.hdf5 5017505477dd89717e6f0ed1f4e326d6 +part_04217_of_04320.hdf5 cd0a43ea63ec74869d20b89e225d6c9a +part_04218_of_04320.hdf5 1be0ff95acbcffb3d5efa46d94646e38 +part_04219_of_04320.hdf5 82b8805f35f271c9da2067b62e2b066b +part_04220_of_04320.hdf5 f1cfd8e05d6122f9435738dc308b8d15 +part_04221_of_04320.hdf5 8b2b951ad739e991187e19d7fd788897 +part_04222_of_04320.hdf5 c9304e4e5c8463105c1c626e33c89f79 +part_04223_of_04320.hdf5 e89db5406da6251164b90c93a7d03a2f +part_04224_of_04320.hdf5 fccdf91a5a73584f7dae0b30e2a21d01 +part_04225_of_04320.hdf5 ed13bad064e021732573890d4ddf31a2 +part_04226_of_04320.hdf5 b8c2079904d51c3cda6ba45094ae8ec9 +part_04227_of_04320.hdf5 092462d158e84483147af43a9528c492 +part_04228_of_04320.hdf5 a055f4850cfd4f42557904739f18b644 +part_04229_of_04320.hdf5 9fe9427c3a6d6b9d7115f19303474e9a +part_04230_of_04320.hdf5 4c6db3e6facb56dbcd33b92ccde99bd9 +part_04231_of_04320.hdf5 0378c7beb43639332040b6a6e8156e98 +part_04232_of_04320.hdf5 64c48c558ef33dee410488acb92457ee +part_04233_of_04320.hdf5 47a775f6047e7fc6c1611bbfc0379025 +part_04234_of_04320.hdf5 7a5a070084749299e238ecdbfe57908e +part_04235_of_04320.hdf5 80e5f4f5b54012eaf28103f43c98aa16 +part_04236_of_04320.hdf5 5225d80dd5bb713ec275eb256384401c +part_04237_of_04320.hdf5 4b6cf4af37277f40133a6812b95cbd8e +part_04238_of_04320.hdf5 1e5203d7ab593859c3db623ea685837d +part_04239_of_04320.hdf5 dc02740f2c2365f06e051bb82fb2bd75 +part_04240_of_04320.hdf5 b095fef8409d46bca3b3a776a4e6a338 +part_04241_of_04320.hdf5 e0d7d0977fc44c38d3d789fe7fda5d63 +part_04242_of_04320.hdf5 c64c2baec3645be4fd6a5eef76d4ce52 +part_04243_of_04320.hdf5 d94b1d148503e8ba2f3a68fdad93fb0f +part_04244_of_04320.hdf5 1d9c80a34c8108f24748658b4c093184 +part_04245_of_04320.hdf5 40c57c257089346dee09b1629519b852 +part_04246_of_04320.hdf5 8118cef2d4d7077e48bf9199efeced51 +part_04247_of_04320.hdf5 e1b4e4eaa22ba8dce2b7d3fc478bf370 +part_04248_of_04320.hdf5 767c048e290d8d19962056c040290555 +part_04249_of_04320.hdf5 b67339c27de1d7e8dd62d1b026473c3d +part_04250_of_04320.hdf5 d1e6afae6c8066c4167354f02ee3a569 +part_04251_of_04320.hdf5 cfedcf839502235c972303e6393c5606 +part_04252_of_04320.hdf5 215fcb399e8ccde189277cc064404a0d +part_04253_of_04320.hdf5 dd2a528edb3252b4d499c0e833c0662e +part_04254_of_04320.hdf5 0781d6e437c34b823b17b2bb1d07374f +part_04255_of_04320.hdf5 5cc2ece7fea8ff6d4e69c449509b0eb8 +part_04256_of_04320.hdf5 8e77a2e66893d46f73fe78403ae17262 +part_04257_of_04320.hdf5 c9ad7732adaeb3c08179ae506a4acc55 +part_04258_of_04320.hdf5 b007e03f87fa80892160f83142122a87 +part_04259_of_04320.hdf5 2b552e7a0d7f3be916b4433726dad788 +part_04260_of_04320.hdf5 8fb97dfe73e3b7101f4ad2427fd8af0b +part_04261_of_04320.hdf5 d0b19791aab28563d3644c65d904c5ba +part_04262_of_04320.hdf5 a9f2749417c7326980f2fce7b853c1db +part_04263_of_04320.hdf5 688ea9ebd59bec5474f5a5565c3c9dfb +part_04264_of_04320.hdf5 1c99c2f627ccf5d9eae9cb6e17cfbdae +part_04265_of_04320.hdf5 25a2af9cd558583de616f2d286eca204 +part_04266_of_04320.hdf5 443ccc30c61797b0828d3e2c15b8a123 +part_04267_of_04320.hdf5 5776e15dfbde3c52b7b179666d486332 +part_04268_of_04320.hdf5 0234d049d41b7758487dc3c903f78abc +part_04269_of_04320.hdf5 6d917af51029c421ba699af5d8856016 +part_04270_of_04320.hdf5 91f82d74462ef5ce5f51c6c9e8795e6a +part_04271_of_04320.hdf5 56044a0f3795c23c9fad1c48720077e6 +part_04272_of_04320.hdf5 a370719fcb1658d2e23afc0a2aa2427e +part_04273_of_04320.hdf5 0f231177d2de34c317f50f1206b03632 +part_04274_of_04320.hdf5 191d49d69616d8ead6845a4355db5008 +part_04275_of_04320.hdf5 0b46368d317dbbeec28f7fe0c0fe43e5 +part_04276_of_04320.hdf5 c17b703366a42bf6acb07d1314bc24d7 +part_04277_of_04320.hdf5 a32dd264b3723270c3fb2f624458a97e +part_04278_of_04320.hdf5 513ed59f585d51a7c773c82cbe987f27 +part_04279_of_04320.hdf5 229cef03aa8fb8d33057ff1189196a42 +part_04280_of_04320.hdf5 2102666dcbbf86ba5b8b2e1db6dafa86 +part_04281_of_04320.hdf5 97783afc0f2960a8facb2055a3b4fd1b +part_04282_of_04320.hdf5 568089b7dea8fe6a05cfafef0be025ea +part_04283_of_04320.hdf5 d54012bc8eae80f60f4428ef002d2913 +part_04284_of_04320.hdf5 9168ae7a59359d0da51948a72d3a6220 +part_04285_of_04320.hdf5 843dff1e13aa52ae7ff163b0904c8d3d +part_04286_of_04320.hdf5 7b98123b9e895c09344ac43c9ef175ce +part_04287_of_04320.hdf5 98c6b10cbe8d01f54212c3283ec90e6d +part_04288_of_04320.hdf5 d36eeffb29f543b576ffb938f0c88d3c +part_04289_of_04320.hdf5 d6e0771d42e75a4339f18094a29e36fb +part_04290_of_04320.hdf5 852e87adad3cf4b91579eb6d150ded93 +part_04291_of_04320.hdf5 58eac227c78ffa1b660dd8e42c203051 +part_04292_of_04320.hdf5 39747b9b19b9575f37773bc5db5579d8 +part_04293_of_04320.hdf5 9776218fa66a91bdf5258317dfce928e +part_04294_of_04320.hdf5 55acbff973b8db68c42f3d272efcc93b +part_04295_of_04320.hdf5 65b2466a5d3eb35cbdf3eb52fcf47a5f +part_04296_of_04320.hdf5 44ab0caff01bce2a3ad1e9ba8aed8692 +part_04297_of_04320.hdf5 b1c757ce2fbe9e3f0ab4f094fbb8e352 +part_04298_of_04320.hdf5 ee5b942d471551bd4795cf3f9a841927 +part_04299_of_04320.hdf5 8582be6403d5910a4caf41988b7e9ae7 +part_04300_of_04320.hdf5 c90b3a15acd6fc503cc99a609f16383c +part_04301_of_04320.hdf5 741095f9bde59c1a2366ec3185c1f8e7 +part_04302_of_04320.hdf5 1640baf6f37ac61b8215895ce829f057 +part_04303_of_04320.hdf5 d3ba5eb8b1e1f1c85ca25410248aa39f +part_04304_of_04320.hdf5 1add5fdc720c520ba2ec54f6b88bd954 +part_04305_of_04320.hdf5 4af5c47bc539728421cb99a4ee436e94 +part_04306_of_04320.hdf5 6e62063ad415548daf84f956bbaef1e1 +part_04307_of_04320.hdf5 489ca988430f8d59c1d0f0369d1f5248 +part_04308_of_04320.hdf5 03ccec05c12df20508ff90d8959ce9b1 +part_04309_of_04320.hdf5 155bac59ce2085513cefeb8f56bede0a +part_04310_of_04320.hdf5 3bc4250f03b44c358714eda0542d29af +part_04311_of_04320.hdf5 86c2b6108ee7161c24fe66864b48c2c5 +part_04312_of_04320.hdf5 ee014a26063d1cd79c23af081165acaf +part_04313_of_04320.hdf5 587f8f6096a63e1d2f20973977a2f20e +part_04314_of_04320.hdf5 0004f3a2e8720f7491468b128d6f275f +part_04315_of_04320.hdf5 ae4cdb58d5dbc59c477e35ab690a6019 +part_04316_of_04320.hdf5 d273804a20c4789331b38ae667c34fb7 +part_04317_of_04320.hdf5 7efe3d976ffc9831f076fc9b1c718c1d +part_04318_of_04320.hdf5 b05aa2704fe1028c132bb42b2bd27230 +part_04319_of_04320.hdf5 76985d700c2ea35390a32549492fe914 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files.py new file mode 100644 index 000000000..321389c27 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files.py @@ -0,0 +1,150 @@ +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import h5py +import multiprocessing +import numpy as np +from os import path, makedirs +from tqdm import tqdm +import argparse +import logging + +parser = argparse.ArgumentParser( + description="Training data sharding for BERT.") +parser.add_argument( + '--input_hdf5_dir', + type=str, + default='hdf5', + help='Input hdf5_file path') +parser.add_argument( + '--output_hdf5_dir', + type=str, + default='', + help='Output hdf5_file path') +parser.add_argument( + '--num_shards', + type=int, + default=2048, + help='Number of output shards (default 2048)') +parser.add_argument( + '--max_seq_length', + type=int, + default=512, + help='The maximum number of tokens within a sequence. (default 512)') +parser.add_argument( + '--max_predictions_per_seq', + type=int, + default=76, + help='The maximum number of predictions within a sequence. (default 76)') +args = parser.parse_args() + +max_seq_length = args.max_seq_length +max_predictions_per_seq = args.max_predictions_per_seq +n_output_shards = args.num_shards +input_path = args.input_hdf5_dir +logging.basicConfig(level=logging.INFO) + +hdf5_compression_method = None + +input_files = sorted(glob.glob(input_path + '/part-00???-of-00500.hdf5', recursive=False)) +logging.info('n_input_shards = {}'.format(len(input_files))) +logging.info('n_output_shards = {}'.format(n_output_shards)) + +output_shards_dir = path.join(args.output_hdf5_dir,'hdf5_{}_shards_uncompressed'.format(n_output_shards)) +try: + makedirs(output_shards_dir) +except OSError as error: + logging.info('Output directory : {} already exists. Overwritting ...'.format(output_shards_dir)) + +ofile_prefix = path.join(output_shards_dir, 'part_') +ofile_suffix = '_of_{:05d}.hdf5'.format(n_output_shards) + + +# First pass over data to get sample count (read only the smallest array to get count) +n_samples = 0 +for ifile in tqdm(input_files, total=len(input_files)): + h5_ifile = h5py.File(ifile, 'r') + n_samples += h5_ifile['next_sentence_labels'].shape[0] + h5_ifile.close() + +# Find a "nominal" number of samples per shard (calculated to always go over by one shard size) +# Find excess samples in last shard and distribute removal of excess over first "N" shards (could be done over last, but it doesn't matter and math is easier this way) +# (since 0 <= excess < nominal_shard_size, the max imbalance will be 1 sample to minimize the straggler effect) +n_sample_per_ofile_nominal = (n_samples + n_output_shards - 1) // n_output_shards +n_excess = n_output_shards * n_sample_per_ofile_nominal - n_samples # Always a positive number +logging.info('Total number of samples: {}. Sample per shard {}/{}'.format(n_samples, n_sample_per_ofile_nominal-1, n_sample_per_ofile_nominal)) + +logging.info('creating {} output file handles. This could take a while.'.format(n_output_shards)) +ofile_handles = [h5py.File('{}{:05d}{}'.format(ofile_prefix, shard, ofile_suffix), 'w') for shard in range(n_output_shards)] + +ofile_idx = 0 # which output file +ofile_entry_idx = 0 # index into an individual data element of an output file +ifile_entry_idx = 0 + +n_samples_in_this_shard = n_sample_per_ofile_nominal - 1 +o_input_ids = np.ndarray((n_samples_in_this_shard, max_seq_length)) +o_input_masks = np.ndarray((n_samples_in_this_shard, max_seq_length)) +o_segment_ids = np.ndarray((n_samples_in_this_shard, max_seq_length)) +o_masked_lm_positions = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq)) +o_masked_lm_ids = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq)) +o_next_sentence_labels = np.ndarray((n_samples_in_this_shard)) + +for ifile in tqdm(input_files, total=len(input_files)): + h5_ifile = h5py.File(ifile, 'r') + + ifile_entry_idx = 0 + f_input_ids = h5_ifile['input_ids'][:] + f_input_masks = h5_ifile['input_mask'][:] + f_segment_ids = h5_ifile['segment_ids'][:] + f_masked_lm_positions = h5_ifile['masked_lm_positions'][:] + f_masked_lm_ids = h5_ifile['masked_lm_ids'][:] + f_next_sentence_labels = h5_ifile['next_sentence_labels'][:] + + h5_ifile.close() + + # This could be vectorized but keeping it simple due to lack of time + while ifile_entry_idx < f_input_ids.shape[0]: + if ofile_entry_idx == n_samples_in_this_shard: + ofile_handles[ofile_idx].create_dataset("input_ids", data=o_input_ids, dtype='i2', compression=hdf5_compression_method) + ofile_handles[ofile_idx].create_dataset("input_mask", data=o_input_masks, dtype='i1', compression=hdf5_compression_method) + ofile_handles[ofile_idx].create_dataset("segment_ids", data=o_segment_ids, dtype='i1', compression=hdf5_compression_method) + ofile_handles[ofile_idx].create_dataset("masked_lm_positions", data=o_masked_lm_positions, dtype='i2', compression=hdf5_compression_method) + ofile_handles[ofile_idx].create_dataset("masked_lm_ids", data=o_masked_lm_ids, dtype='i2', compression=hdf5_compression_method) + ofile_handles[ofile_idx].create_dataset("next_sentence_labels", data=o_next_sentence_labels, dtype='i1', compression=hdf5_compression_method) + ofile_handles[ofile_idx].flush() + ofile_handles[ofile_idx].close() + + ofile_entry_idx = 0 + ofile_idx += 1 + + n_samples_in_this_shard = n_sample_per_ofile_nominal + if ofile_entry_idx < n_excess: + n_samples_in_this_shard -= 1 + + o_input_ids = np.ndarray((n_samples_in_this_shard, max_seq_length)) + o_input_masks = np.ndarray((n_samples_in_this_shard, max_seq_length)) + o_segment_ids = np.ndarray((n_samples_in_this_shard, max_seq_length)) + o_masked_lm_positions = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq)) + o_masked_lm_ids = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq)) + o_next_sentence_labels = np.ndarray((n_samples_in_this_shard)) + + o_input_ids[ofile_entry_idx] = f_input_ids[ifile_entry_idx] + o_input_masks[ofile_entry_idx] = f_input_masks[ifile_entry_idx] + o_segment_ids[ofile_entry_idx] = f_segment_ids[ifile_entry_idx] + o_masked_lm_positions[ofile_entry_idx] = f_masked_lm_positions[ifile_entry_idx] + o_masked_lm_ids[ofile_entry_idx] = f_masked_lm_ids[ifile_entry_idx] + o_next_sentence_labels[ofile_entry_idx] = f_next_sentence_labels[ifile_entry_idx] + ofile_entry_idx += 1 + + ifile_entry_idx += 1 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files_to_varlength.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files_to_varlength.py new file mode 100644 index 000000000..b2fe8b502 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/chop_hdf5_files_to_varlength.py @@ -0,0 +1,154 @@ +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import h5py +import multiprocessing +import numpy as np +from os import path, makedirs +from tqdm import tqdm +import argparse +import logging + +parser = argparse.ArgumentParser( + description="Training data sharding for BERT.") +parser.add_argument( + '--input_hdf5_dir', + type=str, + default='hdf5', + help='Input hdf5_file path') +parser.add_argument( + '--output_hdf5_dir', + type=str, + default='', + help='Output hdf5_file path') +parser.add_argument( + '--num_shards', + type=int, + default=2048, + help='Number of output shards (default 2048)') +parser.add_argument( + '--max_seq_length', + type=int, + default=512, + help='The maximum number of tokens within a sequence. (default 512)') +parser.add_argument( + '--max_predictions_per_seq', + type=int, + default=76, + help='The maximum number of predictions within a sequence. (default 76)') +args = parser.parse_args() + +max_seq_length = args.max_seq_length +max_predictions_per_seq = args.max_predictions_per_seq +n_output_shards = args.num_shards +input_path = args.input_hdf5_dir +logging.basicConfig(level=logging.INFO) + +hdf5_compression_method = None + +input_files = sorted( + glob.glob(input_path + '/part-00???-of-00500.hdf5', recursive=False)) +logging.info('n_input_shards = {}'.format(len(input_files))) +logging.info('n_output_shards = {}'.format(n_output_shards)) + +output_shards_dir = path.join( + args.output_hdf5_dir, '{}_shards_varlength'.format(n_output_shards)) +try: + makedirs(output_shards_dir) +except OSError as error: + logging.info('Output directory : {} already exists. Overwritting ...'.format( + output_shards_dir)) + +ofile_prefix = path.join(output_shards_dir, 'part_') +ofile_suffix = '_of_{:05d}.hdf5'.format(n_output_shards) + + +# First pass over data to get sample count (read only the smallest array to get count) +n_samples = 0 +n_samples_written = 0 +for ifile in tqdm(input_files, total=len(input_files)): + h5_ifile = h5py.File(ifile, 'r') + n_samples += h5_ifile['next_sentence_labels'].shape[0] + h5_ifile.close() + +# Find a "nominal" number of samples per shard (calculated to always go over by one shard size) +# Find excess samples in last shard and distribute removal of excess over first "N" shards (could be done over last, but it doesn't matter and math is easier this way) +# (since 0 <= excess < nominal_shard_size, the max imbalance will be 1 sample to minimize the straggler effect) +n_sample_per_ofile_nominal = ( + n_samples + n_output_shards - 1) // n_output_shards +n_excess = n_output_shards * n_sample_per_ofile_nominal - \ + n_samples # Always a positive number +logging.info('Total number of samples: {}. Sample per shard {}/{}'.format( + n_samples, n_sample_per_ofile_nominal-1, n_sample_per_ofile_nominal)) + +logging.info('creating {} output file handles. This could take a while.'.format( + n_output_shards)) + +ofile_idx = 0 # which output file +ofile_entry_idx = 0 # index into an currently written data element of an output file +ifile_entry_idx = 0 # index into an currently read data element of an input file + +n_samples_in_this_shard = n_sample_per_ofile_nominal - 1 + +#open first output shard +ofile_handle = h5py.File('{}{:05d}{}'.format(ofile_prefix, ofile_idx, ofile_suffix), 'w') +input_ids = ofile_handle.create_dataset('input_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) +segment_ids = ofile_handle.create_dataset('segment_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int8')), compression=hdf5_compression_method) +masked_lm_positions = ofile_handle.create_dataset('masked_lm_positions', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) +masked_lm_ids = ofile_handle.create_dataset('masked_lm_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) +next_sentence_labels = ofile_handle.create_dataset('next_sentence_labels', data=np.zeros(n_samples_in_this_shard, dtype="int8"), dtype='i1', compression=hdf5_compression_method) + +for ifile in tqdm(input_files, total=len(input_files)): + h5_ifile = h5py.File(ifile, 'r') + f_input_ids = h5_ifile['input_ids'][:] + f_input_masks = h5_ifile['input_mask'][:] + f_segment_ids = h5_ifile['segment_ids'][:] + f_masked_lm_positions = h5_ifile['masked_lm_positions'][:] + f_masked_lm_ids = h5_ifile['masked_lm_ids'][:] + f_next_sentence_labels = h5_ifile['next_sentence_labels'][:] + h5_ifile.close() + + ifile_entry_idx = 0 # reset input reading index + + while ifile_entry_idx < f_input_ids.shape[0]: + if ofile_entry_idx == n_samples_in_this_shard: # shard is filled up, prepare a next one + + ofile_handle.flush() + ofile_handle.close() + + ofile_entry_idx = 0 # reset output writting index + ofile_idx += 1 # next output file + + n_samples_in_this_shard = n_sample_per_ofile_nominal + if ofile_idx < n_excess: + n_samples_in_this_shard -= 1 + + ofile_handle = h5py.File('{}{:05d}{}'.format(ofile_prefix, ofile_idx, ofile_suffix), 'w') + input_ids = ofile_handle.create_dataset('input_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + segment_ids = ofile_handle.create_dataset('segment_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int8')), compression=hdf5_compression_method) + masked_lm_positions = ofile_handle.create_dataset('masked_lm_positions', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + masked_lm_ids = ofile_handle.create_dataset('masked_lm_ids', (n_samples_in_this_shard,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + next_sentence_labels = ofile_handle.create_dataset('next_sentence_labels', data=np.zeros(n_samples_in_this_shard, dtype="int8"), dtype='i1', compression=hdf5_compression_method) + + input_ids[ofile_entry_idx] = f_input_ids[ifile_entry_idx, :sum(f_input_masks[ifile_entry_idx])] + segment_ids[ofile_entry_idx] = f_segment_ids[ifile_entry_idx, :sum(f_input_masks[ifile_entry_idx])] + masked_lm_positions[ofile_entry_idx] = f_masked_lm_positions[ifile_entry_idx, :sum(f_masked_lm_positions[ifile_entry_idx] != 0)] + masked_lm_ids[ofile_entry_idx] = f_masked_lm_ids[ifile_entry_idx, :sum(f_masked_lm_positions[ifile_entry_idx] != 0)] + next_sentence_labels[ofile_entry_idx] = f_next_sentence_labels[ifile_entry_idx] + + ofile_entry_idx += 1 + ifile_entry_idx += 1 + n_samples_written += 1 + +logging.info("{} samples wriiten.".format(n_samples_written)) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/clean.sh b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/clean.sh new file mode 100644 index 000000000..e8a634a57 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/clean.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +filein=$1 +fileout=$2 + +echo "Further clean up: $filein => $fileout" + +cmd="cat $filein " +cmd+="| grep -v '^]*>$' " +cmd+="| grep -vE '\[\[Category:[^][]*\]\]' " +cmd+="| sed 's/\[\[\([^]|[]*\)\]\]/\1/g' " +cmd+="| sed 's/\[\[\([^]|[]*\)\]\]/\1/g' " +cmd+="| sed 's/\[\[[^]|[]*|\([^]|[]*\)\]\]/\1/g' " +cmd+="| sed 's/\[\[[^]|[]*|\([^]|[]*\)\]\]/\1/g' " +cmd+="| sed 's/\[\[[:]*[Ff]ile:[^][]*\]\]//g' " +cmd+="| sed 's/\[\[[Mm]edia:[^][]*\]\]//g' " +cmd+="| sed 's/\[\[[Ii]mage:[^][]*\]\]//g' " +cmd+="| sed 's/\[\([^]|[]*\)\]/\1/g' " +cmd+="| sed 's/\[\[\([^][]*\)\]\]//g' " +cmd+="| sed 's/alt=//g' " +cmd+="| sed 's/<\/doc>/\r/g' " +cmd+="| sed 's//\1/g' " +cmd+="| sed 's//\1/g' " +cmd+="| sed 's/<\, ref \([^<]*\)<\/ref>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's///g' " +cmd+="| sed 's///g' " +cmd+="| sed 's///g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's/<\/mapframe>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/poem>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/math>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/ref>//g' " +cmd+="| sed 's/]*\)>//g' " +cmd+="| sed 's/<\/div\([^>]*\)>//g' " +cmd+="| sed 's/<\/div style>//g' " +cmd+="| sed 's/<\/div>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/sup>//g' " +cmd+="| sed 's/
//g' " +cmd+="| sed 's/<\/br>//g' " +cmd+="| sed 's/
//g' " +cmd+="| sed 's/<\/BR>//g' " +cmd+="| sed 's/
//g' " +cmd+="| sed 's/<\/Br>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/del>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/nowiki>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/NOWIKI>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/onlyinclude>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/includeonly>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/small>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/chem>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/noinclude>//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/<\/gallery>//g' " +cmd+="| sed 's/{//g' " +cmd+="| sed 's///g' " +cmd+="| sed 's/}<\/graph>//g' " +cmd+="| sed 's/<\/graph>//g' " +cmd+="| sed 's/<\/references>//g' " +cmd+="| sed 's/]*\)>//g' " +# cmd+="| grep -v '^[ \t]*$' " +cmd+="> $fileout" + +# echo "bash -c ${cmd[@]}" + bash -c "${cmd[@]}" + + diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/cleanup_file.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/cleanup_file.py new file mode 100644 index 000000000..1d4e4d010 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/cleanup_file.py @@ -0,0 +1,83 @@ +# Lint as: python3 +"""Script to clean up input wiki dump for BERT input.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import glob +import io +import logging +import multiprocessing +import os +import time + + +parser = argparse.ArgumentParser(description='Wiki clean up for BERT.') +parser.add_argument( + '--data', + type=str, + default='./wiki_??', + help='Input files. Default is "./wiki_??"') +parser.add_argument( + '--input_suffix', + type=str, + default='', + help='Suffix for input files. Default is ""') +parser.add_argument( + '--output_suffix', + type=str, + default='.1', + help='Suffix for output files. Default is ".1"') +parser.add_argument( + '--nworker', + type=int, + default=72, + help='Number of workers for parallel processing.') +args = parser.parse_args() + + +def process_one_file(one_input): + """Remove tag and title of pages, for one file.""" + input_filename = one_input + args.input_suffix + output_filename = one_input + args.output_suffix + logging.info('Processing %s => %s', input_filename, output_filename) + + with io.open(input_filename, 'r', encoding='utf-8') as fin: + with io.open(output_filename, 'w', encoding='utf-8') as fout: + + keep_next_line = True + for line in fin: + if not keep_next_line: + keep_next_line = True + continue + + if '' in line: + continue + + if len(line) == 1: + continue + + # line = line.replace('', '').replace('', '') + + fout.write(line) + + +if __name__ == '__main__': + input_files = sorted(glob.glob(os.path.expanduser(args.data))) + num_files = len(input_files) + num_workers = args.nworker + logging.basicConfig(level=logging.INFO) + logging.info('Number of input files to process = %d', num_files) + + tic = time.time() + p = multiprocessing.Pool(num_workers) + p.map(process_one_file, input_files) + toc = time.time() + logging.info('Processed %s in %.2f sec', args.data, toc - tic) diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_fixed2variable.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_fixed2variable.py new file mode 100644 index 000000000..afe3db06b --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_fixed2variable.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Copyright 2020 MLBenchmark Group. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" + + +import argparse +import time +import logging +import collections +import h5py +import numpy as np +from tqdm import tqdm + +parser = argparse.ArgumentParser( + description="Eval sample picker for BERT.") +parser.add_argument( + '--input_hdf5_file', + type=str, + default='', + help='Input hdf5_file path') +parser.add_argument( + '--output_hdf5_file', + type=str, + default='', + help='Output hdf5_file path') +args = parser.parse_args() + +logging.basicConfig(level=logging.INFO) + +if __name__ == '__main__': + tic = time.time() + print(args.input_hdf5_file) + h5_ifile = h5py.File(args.input_hdf5_file, 'r') + num_examples = h5_ifile.get('next_sentence_labels').shape[0] + +# hdf5_compression_method = "gzip" + hdf5_compression_method = None + + h5_writer = h5py.File(args.output_hdf5_file, 'w') + input_ids = h5_writer.create_dataset('input_ids', (num_examples,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + segment_ids = h5_writer.create_dataset('segment_ids', (num_examples,), dtype=h5py.vlen_dtype(np.dtype('int8')), compression=hdf5_compression_method) + masked_lm_positions = h5_writer.create_dataset('masked_lm_positions', (num_examples,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + masked_lm_ids = h5_writer.create_dataset('masked_lm_ids', (num_examples,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + next_sentence_labels = h5_writer.create_dataset('next_sentence_labels', data=np.zeros(num_examples, dtype="int8"), dtype='i1', compression=hdf5_compression_method) + + for i in tqdm(range(num_examples), total=num_examples): + input_ids[i] = h5_ifile['input_ids'][i, :sum(h5_ifile['input_mask'][i])] + segment_ids[i] = h5_ifile['segment_ids'][i, :sum(h5_ifile['input_mask'][i])] + masked_lm_positions[i] = h5_ifile['masked_lm_positions'][i, :sum(h5_ifile['masked_lm_positions'][i]!=0)] + masked_lm_ids[i] = h5_ifile['masked_lm_ids'][i, :sum(h5_ifile['masked_lm_positions'][i]!=0)] + next_sentence_labels[i] = h5_ifile['next_sentence_labels'][i] + + h5_writer.flush() + h5_writer.close() + + toc = time.time() + logging.info("Converted {} examples in {:.2} sec".format(num_examples, toc - tic)) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_tf_checkpoint.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_tf_checkpoint.py new file mode 100644 index 000000000..2a2568e26 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/convert_tf_checkpoint.py @@ -0,0 +1,93 @@ +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +pkg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +sys.path.append(pkg_path) + + +import torch +import argparse + +from model import BertForPretraining, BertConfig + +def parse_arguments(): + parser = argparse.ArgumentParser() + + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + parser.add_argument('--tf_checkpoint', + type=str, + default="/google_bert_data", + help="Path to directory containing TF checkpoint") + parser.add_argument('--bert_config_path', + type=str, + default="/workspace/phase1", + help="Path bert_config.json is located in") + parser.add_argument('--output_checkpoint', type=str, + default='./checkpoint.pt', + help="Path to output PyT checkpoint") + + return parser.parse_args() + +def prepare_model(args, device): + + # Prepare model + config = BertConfig.from_json_file(args.bert_config_path) + + # Padding for divisibility by 8 + if config.vocab_size % 8 != 0: + config.vocab_size += 8 - (config.vocab_size % 8) + print('padded vocab size to: {}'.format(config.vocab_size)) + + # Set some options that the config file is expected to have (but don't need to be set properly + # at this point) + config.pad = False + config.unpad = False + config.dense_seq_output = False + config.fused_mha = False + config.fused_gelu_bias = False + config.fuse_qkv = False + config.fuse_scale = False + config.fuse_mask = False + config.fuse_dropout = False + config.apex_softmax = False + config.enable_stream = False + config.unpad_fmha = False + config.pad_fmha = False + config.fused_bias_mha = False + config.fused_dropout_add = False + config.fused_bias_fc = False + if config.fuse_mask == True: config.apex_softmax = True + if config.pad == False: config.enable_stream = True + if config.unpad == True: config.fused_mha = False + + #Load from TF checkpoint + model = BertForPretraining.from_pretrained(args.tf_checkpoint, from_tf=True, config=config) + + return model + +def main(): + args = parse_arguments() + device = torch.device("cuda") + + model = prepare_model(args, device) + + torch.save({'model' : model.state_dict() }, args.output_checkpoint) + + +if __name__ == "__main__": + main() + diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data.py new file mode 100644 index 000000000..73cff8892 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data.py @@ -0,0 +1,455 @@ +# coding=utf-8 +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Copyright 2020 MLBenchmark Group. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import random +import tokenization +import tensorflow as tf + +import h5py +import numpy as np + +hdf5_compression_method = None + +#flags = tf.flags +flags = tf.compat.v1.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string("input_file", None, + "Input raw text file (or comma-separated list of files).") + +flags.DEFINE_string( + "output_file", None, + "Output TF example file (or comma-separated list of files).") + +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") + +flags.DEFINE_bool( + "do_lower_case", True, + "Whether to lower case the input text. Should be True for uncased " + "models and False for cased models.") + +flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.") + +flags.DEFINE_integer("max_predictions_per_seq", 20, + "Maximum number of masked LM predictions per sequence.") + +flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.") + +flags.DEFINE_integer( + "dupe_factor", 10, + "Number of times to duplicate the input data (with different masks).") + +flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") + +flags.DEFINE_float( + "short_seq_prob", 0.1, + "Probability of creating sequences which are shorter than the " + "maximum length.") + + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_files(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_files): + """Create TF example files from `TrainingInstance`s.""" + writers = [] + h5_writers = [] + + expected_instances_per_file = len(instances) // len(output_files) + 500 # Over-allocation to avoid resizing + for output_file in output_files: + h5_writers.append({ + 'handle' : h5py.File(output_file + ".hdf5", 'w'), + 'input_ids' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"), + 'input_mask' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"), + 'segment_ids' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"), + 'masked_lm_positions' : np.zeros([expected_instances_per_file, max_predictions_per_seq], dtype="int32"), + 'masked_lm_ids' : np.zeros([expected_instances_per_file, max_predictions_per_seq], dtype="int32"), + 'next_sentence_labels' : np.zeros(expected_instances_per_file, dtype="int32"), + 'len' : 0 }) + + writer_index = 0 + + total_written = 0 + + features_h5 = collections.OrderedDict() + + for (inst_index, instance) in enumerate(instances): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + h5_writers[writer_index]['input_ids'][inst_index] = input_ids + h5_writers[writer_index]['input_mask'][inst_index] = input_mask + h5_writers[writer_index]['segment_ids'][inst_index] = segment_ids + h5_writers[writer_index]['masked_lm_positions'][inst_index] = masked_lm_positions + h5_writers[writer_index]['masked_lm_ids'][inst_index] = masked_lm_ids + h5_writers[writer_index]['next_sentence_labels'][inst_index] = next_sentence_label + h5_writers[writer_index]['len'] += 1 + + writer_index = (writer_index + 1) % len(h5_writers) + + total_written += 1 + + if inst_index < 20: + tf.compat.v1.logging.info("*** Example ***") + tf.compat.v1.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in instance.tokens])) + + print("saving data") + for h5_writer in h5_writers: + my_size = h5_writer['len'] + h5_writer['handle'].create_dataset('input_ids', data=h5_writer['input_ids'][:my_size], dtype='i2', compression=hdf5_compression_method) + h5_writer['handle'].create_dataset('input_mask', data=h5_writer['input_mask'][:my_size], dtype='i1', compression=hdf5_compression_method) + h5_writer['handle'].create_dataset('segment_ids', data=h5_writer['segment_ids'][:my_size], dtype='i1', compression=hdf5_compression_method) + h5_writer['handle'].create_dataset('masked_lm_positions', data=h5_writer['masked_lm_positions'][:my_size], dtype='i2', compression=hdf5_compression_method) + h5_writer['handle'].create_dataset('masked_lm_ids', data=h5_writer['masked_lm_ids'][:my_size], dtype='i2', compression=hdf5_compression_method) + h5_writer['handle'].create_dataset('next_sentence_labels', data=h5_writer['next_sentence_labels'][:my_size], dtype='i1', compression=hdf5_compression_method) + h5_writer['handle'].flush() + h5_writer['handle'].close() + + tf.compat.v1.logging.info("Wrote %d total instances", total_written) + + +def create_int_feature(values): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return feature + +def create_float_feature(values): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return feature + +def create_training_instances(input_files, tokenizer, max_seq_length, + dupe_factor, short_seq_prob, masked_lm_prob, + max_predictions_per_seq, rng): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + with tf.compat.v1.gfile.GFile(input_file, "r") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + cand_indexes.append(i) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + if index in covered_indexes: + continue + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(_): + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + + input_files = [] + for input_pattern in FLAGS.input_file.split(","): + input_files.extend(tf.compat.v1.gfile.Glob(input_pattern)) + + tf.compat.v1.logging.info("*** Reading from input files ***") + for input_file in input_files: + tf.compat.v1.logging.info(" %s", input_file) + + rng = random.Random(FLAGS.random_seed) + instances = create_training_instances( + input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, + FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, + rng) + + output_files = FLAGS.output_file.split(",") + tf.compat.v1.logging.info("*** Writing to output files ***") + for output_file in output_files: + tf.compat.v1.logging.info(" %s", output_file) + + write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, + FLAGS.max_predictions_per_seq, output_files) + + +if __name__ == "__main__": + flags.mark_flag_as_required("input_file") + flags.mark_flag_as_required("output_file") + flags.mark_flag_as_required("vocab_file") + tf.compat.v1.app.run() diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data_wrapper.sh b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data_wrapper.sh new file mode 100644 index 000000000..a31f97111 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/create_pretraining_data_wrapper.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + +INPUT=${1} +OUTPUT=${2}/$(basename $INPUT) +VOCAB=${3} + +python3 ${SCRIPT_DIR}/create_pretraining_data.py \ + --input_file=${INPUT} \ + --output_file=${OUTPUT} \ + --vocab_file=${VOCAB} \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_gather.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_gather.py new file mode 100644 index 000000000..0227924bd --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_gather.py @@ -0,0 +1,95 @@ +"""Script to package BERT dataset into files with approcimate size. + +Copied and modified from https://github.com/eric-haibin-lin/text-proc.git +""" +import argparse +import glob +import io +import logging +import multiprocessing +import os +import time +# import collections +# from multiprocessing import Pool + +parser = argparse.ArgumentParser(description='BERT data packaging') +parser.add_argument( + '--data', + type=str, + default='~/book-corpus-feb-stn/*/*.txt', + help='Input files. Default is "*.txt"') +parser.add_argument( + '--nworker', + type=int, + default=1, + help='Number of workers for parallel processing.') +parser.add_argument( + '--out_dir', + type=str, + default='~/book-corpus-large-gather/', + help='Output dir. Default is ~/book-corpus-large-gather/') +parser.add_argument( + '--num_outputs', type=int, default=500, help='number of output files') +parser.add_argument( + '--input_suffix', type=str, default='.3', help='Suffix for input filenames') +parser.add_argument( + '--block_size', + type=float, + default=32.0, + help='Block size for each output (MB)') + +args = parser.parse_args() + +input_files = sorted(glob.glob(os.path.expanduser(args.data))) +out_dir = os.path.expanduser(args.out_dir) +num_files = len(input_files) +num_workers = args.nworker +logging.basicConfig(level=logging.INFO) +logging.info('Number of input files to process = %d', num_files) + +if not os.path.exists(out_dir): + os.makedirs(out_dir) + + +def worker_fn(x): + """Workload for one worker.""" + file_split, worker_id = x + count = 0 + out_file = None + total_size = 0 + for in_path in file_split: + in_file = io.open(in_path + args.input_suffix, 'r', encoding='utf-8-sig') + curr_size = os.path.getsize(in_path) + if args.block_size * 1024 * 1024 < total_size + curr_size: + out_file.close() + out_file = None + count += 1 + total_size = 0 + if not out_file: + out_path = os.path.join( + out_dir, 'part-{}-of-{}'.format( + str(count + 1000 * worker_id).zfill(5), + str(args.num_outputs).zfill(5))) + out_file = io.open(out_path, 'w', encoding='utf-8') + total_size += curr_size + content = in_file.read() + if content[-1] == content[-2] and content[-1] == '\n': + content = content[:-1] + out_file.write(content) + + +if __name__ == '__main__': + p = multiprocessing.Pool(num_workers) + + # calculate the number of splits + file_splits = [] + split_size = (len(input_files) + num_workers - 1) // num_workers + for i in range(num_workers - 1): + file_splits.append((input_files[i * split_size:(i + 1) * split_size], i)) + file_splits.append( + (input_files[(num_workers - 1) * split_size:], num_workers - 1)) + + tic = time.time() + p.map(worker_fn, file_splits) + toc = time.time() + logging.info('Processed %s in %.2f sec', args.data, toc - tic) diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_sentence_segmentation.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_sentence_segmentation.py new file mode 100644 index 000000000..e907c0920 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/do_sentence_segmentation.py @@ -0,0 +1,74 @@ +"""Script for sentence segmentation. + +Copied and modified from https://github.com/eric-haibin-lin/text-proc.git +""" +import argparse +import glob +import io +import logging +import multiprocessing +import os +import time +import nltk + +from nltk.tokenize import sent_tokenize + +parser = argparse.ArgumentParser( + description='Sentence segmentation for BERT documents.') +parser.add_argument( + '--data', + type=str, + default='./*/*.compact', + help='Input files. Default is "./*/*.compact"') +parser.add_argument( + '--input_suffix', + type=str, + default='.2', + help='Suffix for input files. Default is ".2"') +parser.add_argument( + '--output_suffix', + type=str, + default='.3', + help='Suffix for output files. Default is ".3"') +parser.add_argument( + '--nworker', + type=int, + default=72, + help='Number of workers for parallel processing.') +args = parser.parse_args() + +# download package +nltk.download('punkt') + +# arguments +input_files = sorted(glob.glob(os.path.expanduser(args.data))) +num_files = len(input_files) +num_workers = args.nworker +logging.basicConfig(level=logging.INFO) +logging.info('Number of input files to process = %d', num_files) + + +def process_one_file(one_input): + """Separate paragraphs into sentences, for one file.""" + input_filename = one_input + args.input_suffix + output_filename = one_input + args.output_suffix + logging.info('Processing %s => %s', input_filename, output_filename) + with io.open(input_filename, 'r', encoding='utf-8') as fin: + with io.open(output_filename, 'w', encoding='utf-8') as fout: + for line in fin: + if len(line) == 1: + fout.write(u'\n') + sents = sent_tokenize(line) + for sent in sents: + sent_str = sent.strip() + # if sent_str: + fout.write('%s\n' % sent_str) + fout.write(u'\n') + + +if __name__ == '__main__': + tic = time.time() + p = multiprocessing.Pool(num_workers) + p.map(process_one_file, input_files) + toc = time.time() + logging.info('Processed %s in %.2f sec', args.data, toc - tic) diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval.md5 b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval.md5 new file mode 100644 index 000000000..bf8d906ce --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval.md5 @@ -0,0 +1,10000 @@ +e4515c4f17370f7418328077c61404a3 +cb03ca49918da9884997c2437b74a2e5 +947ae6147770e5bd1bf37343387b72cc +781dad6ca9b16d785c9e48b41aecee19 +8ad954fd2e7c2c19f8338a6ef7f73eff +0cc0c81a9735b7e92795417b9a3905da +508f75ff13c377310ca70d43f6c61c45 +093d706899aad4f1567b97fd6d04c0cd +66c7570301df7b632362467a35dfafbf +c696bb084ed1209a89e947a57a8324fe +27c9733c912554d505374bb7a49c4160 +a815f46241379e61b69e3e82dd32aa2d +e8e28c3378e15d45606dd81213c2e5b7 +faf45e144913e73da53b2d7e9458d667 +abd94678805debd2f677114a76b78961 +0864f74b0d6b996add81d55cf7545081 +3ed0d5bd91f4e34558ca5ebafb360b33 +873d73cf3c4c6f0d96d6595501c1cd32 +7c36b59496cadba791102077305a89e7 +c92292ffb66323be2988f89b3f3be98e +7fa33e23cc71b244c23f8ccd5ccd8cc6 +d55ca45c41bb79f86f7c9f4aa12b43de +8229bbbfc50c1608ff52d55702b0d0e5 +08e339d278b7f050e53dc2fd78ecb1ce +7fefe3b13b9fce509a80717e60fe17e8 +c5907568ba53dd66b251534053db2739 +3839dde112a5b5c09f2f5db5aec8029b +79d48014146c86acdfd5283289690460 +5d0ccd19e05191750b3f7bbaac0c775f +af2a1ef225b4d60a0bf03de84e354ba1 +ecee3ee49e2aaf774b876141c91bdc7e +957d907f0355485156c3a8ce0920a8ed +019d4de0e3d639592d1eb2e3903d5185 +a8478a88c5eb833e482a400987bf3a79 +d41d8cd98f00b204e9800998ecf8427e +7725cc5b58ef6bef0a6e00a8382e52d5 +9aa449b2265a1c3229ab54eeb51836d9 +f8d7b4f8bf3e62b4df4047692e5efa15 +b95fa421432461502f066f1191e3d1b4 +bb05863b1bdf727acd53ab8dcc589c42 +8aeab1332f832a1809435dbb53ece62c +3aae29ba345110afd9f62ee192bcee29 +a2474f9c45ffe966159e75743120cfb8 +c2ede108de22e546ac1a5db1f2ecb07a +4245c87d5c7a74221436d1fd072339c2 +ae148f40ac9e9134066340b5dd834a89 +3f80b3c7c9bd50aa061f159de1a9661b +9606087adc65a5070f3a4eeb23f1f0b3 +ecaa1fb034959fac41e3a2f0379e1c99 +05c1439f860223cd46b0c054ae18ca60 +8fc712a435c38c9eadcd745b4e148aa3 +ffc0af255030c5b2322adaadaeb2e22b +53ef3f7f97bb613ebbfdbb95c0dac251 +9c4c201b2e9f60d189836bdd0d2010ad +3e48e71ce6c1a5fd03466b07a82f796b +2054f64f1263720188d261f499f51cc3 +064041e2aab298edf9a62b98e7657a8f +098428953d35ff5a9870236a4411a8cd +016eb5248d0ae5c200ed41a1bb142404 +032a04e7a239c16d508f48701adeb78f +9df0a8c8d405fc0f935b6168428e76d5 +ac54c8ba0a81a5e027b097511b5c5c6e +eef5ff65e33e370dd491140307878ca5 +a4ca2f9cbfbfb25f006fc219a4939ccd +2b82d1490d965e7bcac58bab206555a3 +c97ee56278c01d4edf4530eeeab0df1d +93c24ad4bc93fe8e9bdcbd78eed6107f +9985eae5f5f1c9ba64740bb784755e29 +2d13f6fedc9fb5b2f91065a8e33c2f94 +b4807511692724fb83a76a39bbde141e +b916b3a8025e512011338c0ed40bef87 +6edd12d4c8df99817b445b9e35300633 +1bc92b1a22d261bff24dc899c195bd68 +a390540b707796ec07abfa010c48ef44 +2a6384f009123e06d7b6b644754a314c +020638edf98cba4089ff7e22fa3ba4ab +9d6403692ee427a8c2de5b85cced6cf1 +f35a18f8c5ba7dd5b745bde291457d2b +5ade9b817973e904e60908aa71b22f64 +761cff770ed4fa2b7d8a1dd2e74d571f +018bf65cb09a59ea9297f99eea072f91 +699444530a0b002e57e1e61540eed121 +ef4b196605cc91fbc6ba84ccd3c8d19b +3d49bf243d85ffe5449b01824d014411 +9ef43abbd82ae12f699998caa4ad6705 +1e76898011cfe256d4d7ef7df8edac7a +1463da0e0c51c48c4afa7c80784e24c2 +79c1caeeeaa6b839da0c9a8659ef4b53 +c10981aeabd6a45e94bbd99f488de599 +d3ef7dd596b820cba948b53ce11830b8 +d41d8cd98f00b204e9800998ecf8427e +f986d2b16c0e7630663d5babde8a9684 +a8c8fb3f07c6ec2ba9dc152e689fd4da +f947ef9aa46502051180a8bae8da569c +d41d8cd98f00b204e9800998ecf8427e +c6aaaeaa929e9bf26d83bb62166af8ff +ec2508d810abfd922d52fa13fa9794e4 +6009ffa7a814784a6f5d8dd57081969d +d18412fb659feb7e7ec3178ac6fa988d +54d31b471b0e19a10cbc1db8957f9dd0 +7116f875e5605503152c21cfee2b5ace +b501813e40d2d0c391bd8875f9d85b73 +63eef4b11fc02b250d4ea5aba63d8a4a +1d5ba0becef41d894cedbc9fb069d74a +534d041f991e09a4eaf14e33e418e3ab +35db1856e7f5353adfb70f9656efcf40 +ec522cf077272d135336d27181f5276c +964be8dd1164c1c6a22ef7cecb1aea47 +22454d415afb7ea0c6b91103b4e92df3 +ddb46101921bbe3f3fc5ac8106ef83ef +f81d9ce95913381ddb2c381a23463155 +d17ed0849cb434015bc20be31bbe06a8 +4cc5aec4147e58f3354773f288787004 +3be9098148759f82680147ddb2c468a1 +a8d04e89a0e423f1dbd5caa245909dd2 +af680b47bc38ff23a095affcda726184 +65a654f0ddec814b22b2eb05cf10fb4e +88edc883306eeaa828a1b0845520625c +4c6a47e7d2662edcb60bc7d00e5c9002 +5b1f8dbcd1e5a6f35494ef05a892e955 +040b2d46fe48dcd8d463df2632f9b738 +b914bc4089ca48de29ada6dd6033aaf6 +9595c22d5193ac583d01c33a3822b271 +ec1361c2fedb4ee25141f5bd5eb4f6c5 +d41d8cd98f00b204e9800998ecf8427e +753b1988966fb3e838b7afb8f4559ce6 +d41d8cd98f00b204e9800998ecf8427e +b99dba26a6d078073dc73bf9795adbee +5d1974e3969cf73aee3f7d7e42a6629b +dc0662669ee4fd2c40d9b983e71bc7af +d9901bfb178222624e1e2a80d3088bc5 +e9f0e33872c02e920740715c47740f7a +e6557cd111ebd0a2fb697dd32ea19176 +27f9674cbd3cc9431f61331c6bcbaed0 +0d4d1dc106bf6227d3300344245a2146 +f1b4297bc0250b25096f1379dc4bd8e0 +55062b58c270da524b0271c93e394f68 +668c1e6e35882914f9650e9fe4109d23 +15ba44fdc857517693d02559580b217c +6eafbccd38860664056c6a3f998d824a +9d21d99ed9ec5c08b84fd740106f94b2 +cd67fb2fb2683fe7860431ec9fc996cf +bc4dd130ceb1a4bef2a580b2667b48d5 +604deb1bda61485410d19df98dd590b3 +1461663d2edead70b8abd5b6d3fd229f +29667235686a8fb1a9fa03f17196f3dc +e20f6d3c5b32566b7051c0e4d263c494 +bd0732d28992eaa26a2da493591ca005 +58aeeb716a2eff3a79fd8f096ed24b45 +77481e625067aab9752695a9312cee9d +006cedf8a3b672294e8b2f7d182dd93f +e779b8c927a3a394ff8197bb5faf613c +9d94c9483569788ff622d5d55c6a16f7 +4ed9de010c0c155e1dbab045d77c2ba2 +e316264a234329159e10e40dabc93df3 +808b27e73c570483018c92c2162e4b2b +52bc89b2ba44cef4aa6eaba996866aad +2b6b61fafb17b328b58403064b04aae7 +225e512992a60fafd5645f5f67b03a73 +774863c0375af2ddd825748995f0e274 +6876e1411bd225590016cb2714725633 +c4d392c5579054189574ca584fb3138e +f930d4ae7529362d13945837298ab5d0 +e33e79f6b4944aabb3bca8e019141399 +bba5b787f342c232cf9e551743b174e3 +21e67e9baec6c7c1aca1de379e9d23f8 +6c0eeb70ebf13912673f414b5d269f8b +1a6b3ba2b2c7b1ae8a5bd8463182dcf0 +426adede287982223630b652883e6bda +f31b2ce4af2dab13cb1fcc13bc555eb9 +4c40c47d349aa4dcea2354ebc992a5a8 +86f4a9562a2f5e97e96562bade3ef632 +afedebe0f16adb819cf3280b9f9623cb +c52f3943565bcb9f7ab08dcae6847809 +63c2e4ef68162c6a636f1be566c5be14 +936ccf111900cbcd385138426ccda5a5 +8911115e59be8fe9108cc925fba25842 +b464e74bb5d59b48cadbabb6a4b4774e +b416f81909fc841ce59fefbeefc138cb +9e0c5c3a02617ef7653a1312aff1bff1 +5f6828ff6b2c74743980fcef36f0ffbe +71a2ad5484b41fd7433c4b52ba4263af +431df30f62aaa5912ddc7e434c3c7648 +6edb323fd74a5ae5aabc2b9856d46943 +fddce22bdc15c2e7ad282f8a51900cf7 +1ad30e25b803d53e55eb54fc90799eba +2df58b441666be9cac5820dfd94393fd +75a558848969278aad8a05a00a3bb93f +0a2c8395428cd4b12376a738d6e437a2 +63fdf2b4e6f69ee5c8249fce65700b07 +d89a9ea021fc6b8c71cd6f2d95582420 +2202e0c69a133395e20ce3e08ba34a47 +d986900b8f454760c2c55f42b48b37c5 +8410842a5dfbd08a9c86a0b907234b5a +278c91b2999d0fbea42d6c0222225cfe +f343fad3bbd44cfec27dab2f10da9055 +c3829aee4c01c7eac45cec55671689ed +d41d8cd98f00b204e9800998ecf8427e +e789eeae15abe40d5f740cf5ac1cca1f +d41d8cd98f00b204e9800998ecf8427e +312a20cd247a895da8ed2ed738061e8d +b83f210c10a3191682ebb258391cef23 +19a46d859cafde19a3619f17c6b05d9c +66ea898ea65d6ad8afb2421c3f3cb55d +cc47bfda019692e4ac18f22aa1a0a2ba +a3f38e24c3a2532139d18775376635df +ff72aa70914862ee3926db7238784837 +656237a3d0507e3cb556b59bdbefde72 +05a11c9a9e1c43ddc4d5c76aed989fb7 +20a2805417aff9283210b15ab58a806d +515cb5a0acd2e49c81f3a47beec36db8 +ccf156efe4945c5feb044f89838aa3eb +15813c22904d421b7fdb4181a807b568 +ae5e656012ab5fb962fc082b7f4f6810 +c32a4c5775fce9f405c494c8283b88c7 +bcfe6e00605070ed51b24519b956745e +437302b9025c1d7122dacf5b14d817ae +e4fa5ae547a003e7a8ef5b5f7d1ec136 +d086ce8bf6be4eb7365bf09c2ef60824 +094b05008c48d3b9ca3364dd40efea38 +c94c670ebd72c4ce9764a5a1465bda12 +b02d1bcb635edffb446b10cf5e7d1c8e +f30447455d05f41b978edfbbfb13bb47 +1e3312ba7dc4546682a75e6d3598cf8f +e54e28dc37d204061009fbba08a06cef +45cf1d8bab0fe8ab2b53c3ac228f0385 +085e4ed6cdf29a52f18d8f9313edba94 +43b7db299dc33eb4b2cf049ba9e23396 +ee2a8929295253a84bd4516bf04bf797 +fed5f9e9760e22baa74bf87920e39370 +7be2397c410ca8835fbbac82d60be419 +8476bead7c2d42a7fb7ed56459157127 +98264659f5bbbaf7ff7055b93906ce3f +5174844a4a993bf74258d4d0f99de007 +7f2978743e65fc3eca33d9a38ec6bedd +d19b338a37b41c7be1e10aaa9f7d745d +6557255f147977f46ff3dc1e133056e5 +0effdfeea2be8a3b97487d18e1299f9b +59023eb3b3a9c2da69f62417770ec0e5 +ebdd2fc42a5a8f303ed1dd4a39211bf1 +fe76154177e384de429a822722c9357f +b3a475c2906e4819ae34a26e17b4edd6 +192a276e59f8e7c704da640bbd04a074 +8b967d38b27e2be153f409d48993772a +a501f0b3f9976145dc5bf807fbc3bc06 +1a221931b038a7e64cb07646eff64800 +34325ba1f722c24885014dc129777978 +9b80623fd97fec7407aa278bb3a50cdc +d41d8cd98f00b204e9800998ecf8427e +ec39df3e9daeead9896161c875bd81f2 +f472fd836a61aada647b5b2e67221408 +5f69add393d9428fe00a62998ba353c1 +f4b3f8d7db994e7cfe85d25ab743664d +d41d8cd98f00b204e9800998ecf8427e +8f306bc0e4fb3748e6ffe4898381cc5c +e5fbe6a5a4f6bc8a325997e9e1aa9502 +2e075cc9489734f06b8d2a3276576f9b +fa7122a9dfbc37da5fdb7324d417b2f8 +b9c1d9712de9df894c76107e1306d426 +a5c4f330acf15f51675847f1601a0731 +96309cb58434a47cd46a5291cfba9fe4 +03cf776c1c231ee508de58443b5af3dc +fa13e4e7a63b15bfd4cefc227c23d5d3 +d4c373061c702eb0f9450d36f989fda2 +c44efc9db4fa10c874f87940fd5f2186 +098dab377d378ab5f7b8b8e9600877f6 +dc1f7534be4f742ed9308c202ae983f7 +74dc0736489450fe528899e5bd36b5c8 +62c5d1ffb2b90ebe38be062455ddfef2 +518a635ed54c5be03fac42ceb6787c93 +cd57a4149cc20cf89eb36e0467e54a14 +cbc2ea589b483d4eb9ff9981b7b46546 +cd058b487175ad6198ab71b97747b88f +b89661a4a93a19b604a4370e7c1415c6 +3dfbb0dff0a310adeafcf5f874bbdb60 +63b61b32562cc5e505dc5b52058bbc50 +d49c382df9f3b422586caf2641ad7417 +c009eca72953fb9eb19c1c8d0b565d3b +3c84a39a94c49c7a093fb7a53d2f3ae6 +6a192ead9e794bfa54ad2aa1118b73a1 +1ca5b76eb95b94e7b146eb56ef7669e7 +0089643155593021a5ef21c170efbe62 +d7abaee2bccdebe037c197dcb602c6a8 +8fc7ea7fbc9214a3e39d299d3cc327c4 +37e3620bb29c2afc4de62d5c8b7670db +e32b44d46134781dc00b48aee5eaf6b0 +770102c7fb9f421ea6e05f271592e7bb +c5afff1c59d7bd7bbeb1ad1fcfaca341 +68cdd34a8c339004d589a48ce50aec9a +a135132c70e892a9308f1a220cee2d33 +d41d8cd98f00b204e9800998ecf8427e +073bfbae3e44fc720f757f34371aa94a +65b6c0d940d8cd6e34eb8560ac233bef +80aaea68a8cd5de7fac850b14b0fe4b1 +554f3e4066996237c60398d3fc753455 +d0d4bfd6d5542b8ef96d6bcda9d5f9af +b54cfb7320a19f0edecd9f57a0239332 +aa7cd0556d561a631ffec8b52d78a808 +2858e3f57dd00fb04ba0da40bae28b8e +91c0767943996300864fb6103bf95a7d +b3a703894308af2cef638f2adf0ef00a +60f3ccaf4bdd5695c62bcc8cd8aed4d6 +156d6a3d9560b46f42f5d87164a3d6ac +d41d8cd98f00b204e9800998ecf8427e +aacc5d04213714249e410a913f551a63 +a1b8304bb1a984b40c78eca26beb5ead +c1307e7e4e50c63090a8159e0da80497 +85fcbca78a15e98792804321cdc2a50f +d49e910a45ee6484db7cb8021f93d33b +b2e903b9e7829b192170a24829e2d326 +1c1300eb1ae2b48e83416719af7e100b +51873e473ef17feb4126d178dc55771f +b78ea6cc53e857a214c09dbd51438cd6 +4680e91b20b3ff40f743e8f4b8754276 +17fba0d6586779a7a24208b24dd877b2 +ba26b02a234d244f8a241604a01828c8 +d6ec644568fa8cf970feddac1e6bd9bd +4baa8bc4221b10b42e2ed4f8221bd020 +06186e80a74d442cd1d83531b7247042 +354132687d3abc6deb2240265e603ecc +13f2b3f46942eea3ae8f7b2e49f6045c +e2d3b487b0c799f42f27df384871f57f +df3afec2b85bb48cae4eae9ffc31ca93 +3d026abb6db4720623779fa2bb445e03 +aeed47a9c6db4c3a1364426e450c4619 +56db6c77ec92cefb24b947b0b2b820a0 +5f9c18ba59498b2313eaa820a002910b +e4b895468236acb386b9df445e88ea50 +ef68a3340a38baa8efbef9e236ca49d9 +a2b957f3796fd29e8a6109554ca1fe59 +e24d33c57485a446868f34809a3d2ae5 +97dbc9662f2b70ad1037d207e9cdfa2b +5046847a27f90f4fdc451923704fdb5e +321dc9d429f5054602da66b6167a03d3 +b4e5dfe8456f18dbfca49e6b20b64516 +37056ea65add73a53d6eead4e5cfca0f +2501042a5c19cfc6df00c2d891ee4be0 +3796df2afb9ed593698188715a93c0af +7278c9cb56f676c56fc135b17e008c2c +5a5c9b8fc09d372e18438ba3db170a00 +75cce23f7cb8f808100ef81187f767a1 +e58469bd1462381549127d3bb0802914 +ce933fb9bf3b0f572f868b01353555dc +b9dffca7d3df1e8d671343654beac150 +b12a598aad8fcfd3c3ca9f1acce858ee +9c85e1990554ea2ae59d31f71a5b8f8f +dc43b246fe073cb48e3ed2b71d4e18fc +174a3b0da8b993e49e1cb2c104d0d225 +c3b5d8d53249474c16e2efaab88d34df +d61d644a96891375b6920c4316537b46 +685569e48f0556b2c45ef95bc03b1355 +f8eb504e31e2ea3e890b6d3e929d1211 +1f7cc5a81f3b3dcd21fb51b97d22763d +841abac714f2a878d0c1434846217fbe +4b12e7c6dc5be46e3e77ae0140854083 +772fa3cf125fd9a06692ed4b1232f2ce +73a1158d9574fbcbbe14c9494609dc87 +b83701f526528fa1b2e12573ba67ed2d +b6d1dc0c4f5c655c7cf62c4524579960 +82d0483dc15f4148b4e7d0ca96994ed8 +b8e8aa41bc1424fec2b073926d3b8788 +683fa0e0cd39a76ed16da3aed078ae0e +8efed4ee605d391d2003d00c27f22fef +d41d8cd98f00b204e9800998ecf8427e +d41d8cd98f00b204e9800998ecf8427e +1656687c4437593456c033073531bbbf +5dcddebbc884a14ebcbc9a247b64de99 +14c97ac709e07d5d683edc43211808e9 +9f7c4e356a8832bb14d85334d31ddd9c +c85cb60aabd296b9f539f3757f5b33c5 +8a25f8f5f7a8df0f8f587abca022e69e +99ad20e06a51da8baea1a8254ab3d47a +da19ec495ae72a5e41e4c764799fb0b5 +208f5aea407ee530c415a458a96631a0 +45dd7a394cf7c71d4286cd3f21c6aea4 +5c831698179a60e01d767c01f42ec08d +ea835d079292301163f27f8edca255a4 +889f8f6e35e7e733d38a1e4e72d5425d +4bf908ca00e0652a273933618a548289 +0d162774d98affa01a9fdda9eb6828a1 +7edcc41c66f064c836955c3a2c00b15d +e6f3dcfd9b65f348a6aa74f8ea6b9a96 +050ab268ca51fb3f60bb210823d84425 +49009c0da3bf1399cbb2313f245b6ea3 +61c705f63bb5e4f85845622cc7776360 +8865734668b23ac70ab6d093b25e1e1c +2ffe4102e1e0baaebfa9ad0584e5f7dd +108bb1370241bad0b2e9a23a8bc17cd0 +685328b925ffbfcb81f9f3a67e57b0de +24de0ae6d647f6bf8cb645ca212effa5 +1f8d26a8a42f08a0e6032198ff08e605 +596bf3f8a56b10af886bec38728d13fa +119b3fb61aed601c6444ba2357d6f2a8 +55c35844d97891860321eeea3ec0e562 +f379728e5b5215f4db57bed021e67df6 +c5c95e8678c50b68d022ac259d3fdf5e +c5b3672bc14557bf02a0a487ee336603 +65800e90eed41800225c614581e5ca85 +9c4756a2b881070db0f4fcafc299f5ab +0d0c4af54f179eadfa902d887336762c +74a816521f760202a129e29a37a38214 +19584b0e3dc86a9f2df7f504dc60398b +235afcb1239513108854ba48ef9c27fc +b47cd2182eefb63fb033d71a7a7bff45 +37b1509c3b35e52559baddfda1a673f3 +f2c556afa6043bcaf2a2999c7f34a781 +ed9e5ee7cd88994b7400ddcb94c31c95 +3912738dd201bc218ae0e0d70238e16e +b3c85adaffa5a0537b36dfb39f1ac6e9 +5c747da80e1bb90791bdab861f653fc4 +325ffb842a94eef34b60fc0947a26f65 +4314f354247c07abe938b1ad5a8e1429 +078e0a25897d6286a5ef6f20c7f7ead0 +59d5e5dd0cc57a4af5be09056428ccbb +8c2a304d0d1d7744f22509af94a6d3f2 +5b5938f2bb201d01d2fef0196dc33199 +40eb13411370f186ca615f62bc5b079e +8265ad1769a9a735a20673104751c68e +d0f0d5991e1b2e5e0e04935c7f057d27 +ab08a0e0b82d192c4a6f9a68d90b1ae5 +11fb792590bc8d80d81dbcbf880152ac +9fd9af6c9dfc95c018e4371aee715916 +f697157f9118aa085540c87fe23b4e21 +fe2dce33c482742cae202077189f49ef +1645605cb97bb79cee970158f4cdb21e +e4e90ee4eca5b1d4994d19b180f11e8c +577d89861ef7300e34b2d68fd34ca026 +a795e460155958525dc017e8703268d1 +d429885c3de34a795e44b3c32ccdb335 +41bcd8c0b2cb44bd9efe99313f55c940 +b1f27754047c00d57fcb80e45d29c8c4 +df8a5f3f302e4b73e24ba68c031593dd +304864d48e5e526fe3f04a7d501ad8cd +9b3527f2b3bdbe1ecd5a0a213ad2563b +f661f196ddc9c405c237cf8c2f61be3b +6c479e1509613826555cd81399d99091 +2e04bea9a5b2abc184a41c4ef9920ce0 +c1061783b7508380171f74c898f020a4 +5a145907193cfb11666e21a54607b328 +f3398d1d993ea2a1b11aba068f6ddfab +7e2c3fca910d006f913825d62526f9d7 +437f8dcfbf960f9bd8321b60cadb5396 +ed739c79ed326b2e07c6ad044b24de1b +a6c8d3d00d9580b98da17c38d1f45be8 +0fa6698411c7c0e1c727d72b313d1ee9 +3a5889ed99ad270ba5938a8e76897551 +6530658bd6552987ab90fd114598bb6a +5aaa8f1ab7d0c70f1223894973bb3226 +98c7f4bbd4b55e0422756a476c1b5c5a +f922a895e8a04486dac904017dfe04df +d6cc95b3c71e9836a838755a16e04c61 +dab6f3c2572687eff15183391418785f +37bd17b3664e4c3d588ac47bdc0d21aa +c5e01ea41fcbd51c54d7052d13ef9b3a +69d00d5844a1c77ac7e1f0432b3e37bd +6d476fa4d362247e4938c59dd5bad082 +a3135fee1f73f93c67927023aa99a3ad +75dbe378c0fa24f070308880f69b24bb +c7b4cbf7a73e567b2bf5c5ce4bc185ad +d99c1ca52911dcbe27c997e61506d827 +7a5a7c47e317e064a82949e17e123495 +224d8c27cfef4b3a915fa38451810b91 +7cd8f467f3ea83f98fa4b6d3ca6ac12d +61168adbf0e13c10c5fa3501dc35617e +52621939587a34b5917be73ec617e306 +a547fd7f9522922942c2e623a98a47a0 +429b8ac61536cfb7d72b739d5b86f2d0 +9a8830915741c273302dfa7dd2d97236 +19ee36db67eefb1e769029f69b715ff3 +101b24e90cb53b64ecd89ec3f1053df6 +f1e0d51e7d4ef0ab91ae5ea5acda3e01 +169003f67baac1d21ed930c6707889f4 +79cefe2a92811ae00b5f0c3d261a9e8c +48801333d1092034e94ae900cec18d38 +fad8252008f2783be0466a50fe874eb2 +3ee8f8f189032fa8ae357428caccaa36 +d5cfa9ce554e5dceaaaf98108f3f85a2 +66e82525b91fc858345305b5c401f778 +d71a8417d2703380683c92dd1c1a32d6 +24329a9c2bfbdfafee3c94fe4e6a187f +3aa729778cc13b0bc831ad078c906141 +d60166745714faef18c5fa8956dcd713 +8de97b5a3e37af4ddfd746549b23240f +ccacd02f43e9eb19e0fcc040cf840058 +c7b5d4bde58c0e5a70f86915e083d97d +10a2183877a7db7f2a26df478ad7c558 +972eea24efcc04a1959afc014f10495f +3237a8844f394e80646b93644a61effa +fdab3ccb7d37a3b1bc4b2ad2a7490227 +f0d585d41fff837f9fca4ed714af95b0 +b13005308300e9a525383bba1cab3e89 +4691c26943b91dacbecc251b88f97e89 +3475f84eb4965fee386570414da29f1f +ca37b3f84d6951483c591c265108a019 +364929ed273e99a3fcc844620986f032 +3e21f82da173d4ef1612dea5a2d73169 +a78f570462889ba774dc61c9390ac211 +c3637cfae8fd560f528d577f134fdf70 +537d80467188de1d1b741f85456904b2 +0f2e22278353f627543edad48e63f33c +a945139b9f3400da3c35bc8fd343d637 +d3a7698414bef92525256f53c8db2b4b +5031d6082ee65be6aae3053deaaa25ad +7a6986269e298b6fe58f9ab16077edee +3a88aa9e0bfb96e49e6ecd327894f93d +d76577ec1b544a160bf507800d9d6ff9 +d713fde2dc3d018fb291ef15332b1a88 +6544ce09551d2e5372d53999db27f50b +1d1b2a981e2a73cb820463b5ce3cc298 +1e1c5b548c60a8fdc018039d0f0467f8 +650db430ac3cc142f523e10334ef65ae +961150d98ac4301a446205f1ed276a63 +8a153f7170a6817cb98023996e2ab86c +bf37a363e683fa1cea026b28fc64c744 +bc52445312189d4001c40e91246027c5 +086c6f846812889e2bee1a7fa1c069e8 +b06011c1eb62f9cc18a7d91b34b2fb4d +6bd71ed86e183f35759a6e96b2ec8d4f +cce81ba2bd521e6d6e2d0daae18a21ea +f1febc045e73de3ff483563d89eab567 +4c4114f1735a8b156c3598bee80e1238 +ccf4f7368f75b51b6e34ed6fabb682ff +9b40ce0362a1f8359e13edecd82fc913 +589a16adbadd7c3f015458f79b8b8077 +0ebac7a1a7b7a1a1b8af9832d622e2ce +7488077fa3ff2d53b964515249f79a50 +1e1bf8fdf4656335720472ff3aaf033f +5dd4b41ffc03f3d8bc48bb170f7d3b9b +86b42717141338f9695578bb19e8410f +ca0dbe14205fa1e768c3e703e53553d1 +a0efef8721ee1a96cf28697d41c1790c +e143937c6b34430ea1a3ffa575982412 +ddc7af5ee483c3e0fa26a521d60cc5a0 +1f91770d86ac2a0f93a497c9fedf2d45 +803f94e5da482b49a87ded95de229927 +972b0a337d7ee4190ccd472fa181f942 +2c9c5302b3d339660a097d542ae04350 +084c45bb6408771b0d6a8f2242727d65 +f3a70e0be9b34bf4eb1bf537f87de03e +7a910a5f09c7e57769af0a6940b0ec5d +819bf4b001bf2f9f50a36bc6146a89fe +7eecdc64d906232cbfdb86fad1bda0cd +01e8100a9788b6e63c6763614f13fca0 +1612473e58383d4ab3b540c17a8fedad +c0f6d246ee8d3323cb2d55c558370bf9 +70ed3fabb3c8687b8265f77deeeef87c +58b8e51eb8f6c210359aa7adcede0c76 +1d4ed329896a36bb2ae6db89086abd58 +b3d3c805a1c8f70b6a76b663e15bb87b +cde666de7a0669904e5ff5efa8f99c73 +c1dd186f08607d74c57a1dd20a73e220 +caaf1c2b6e229a95ff2a81e7f330159c +e98be9d670d1ce97f9d0d059ba6cb80f +06e0ec29b19e06e7982ab3350d287244 +03e1557ae562a238d84553ae17836a53 +9cbe12826c711424e03f043640ba133d +48504748c7d743fa2126bcdac473e0da +2478bb339be497ac0d66b3a364dee407 +9f8f031dc97428621722de7354be80b4 +2e913550bfaa2c7af3d3b28364f3c20c +86de923679b339769f375bb90b4ab256 +ac7d6fa5b98aa095679efdb1de9e7d63 +2ec08eb71bd845a1fe89d29dfe91133c +29ae3733b8f7dc6bf3931978d3cecf34 +25e4aca18f8ea8ebe5c143fad83bfb42 +1ac1c1024652f320f3d3d8f49e15733f +54b0d4ed51d489ceb3419729d81538de +20c2429fcfca358a0d104fbf703ccdfb +3c450097286f9deea35a172fb1f086c5 +394f30d79815c4b4cb133f112320fc22 +789644faa3b5595a436c423d351bf5b0 +e4f68ee9c9ef9b3d7d01da6577d50df9 +f5364e11f786f33ca2482c335f1b60c2 +d700f412ef2727890bf70d92a562e066 +0427870bb7b7ebef86f22b7c17e356af +14c34cbc600cffaef158afeb87eab09a +a59b6dad7a299ab3f5dbf6783ab73dcf +69b1793c1649baf1f922fe7c42d3a18d +2e4f0f95278368872d07c27765050e43 +6f6f37634f2c5b599b11375d0e80d00b +c8604472ce9e82d81e115578b932f0b6 +1e9347af7fa6736ec12cb27ef17bcfff +3ebf5db5096723fb893f71b6436903c8 +1755dfb8fd1f329064222327f958e24b +8c0b9e17d07785e7f0ab5f720645130a +1da9615d6a4613cf7e8b7e3a74fe6bfc +a07496a83383695b21bf420f722306d3 +ce878a6af85dc8f2ae0c84a94facdfd4 +c5b807e0f5b1a5c6d618457b1d2086f9 +233e758e660a04bc2fb6081cbc520625 +e967f87c95b1c927b4132cbe28913830 +27414ab13d9881c774777ac178dea897 +c7cb467e32732f846f3db3a6c77c9f36 +13dad90dacba9f1e85f6765dc80c82a0 +42b0d6f23b60c779484d7c1090deea0c +0c7e191c68473070bb40acd09aa4a6d6 +11e99362c24bc62ea60411a00f033747 +5e72e545d6bf7126a78c312a576ae166 +9f52505bcf750a2c39c73415bcff8740 +5e15155d77280da83b3be5d276ec82ab +7c2ffbcad2404be6b561f3993359c79a +0e7b691ed0a86f86f142b5feb78f7c07 +c354afdafd7df91caa72a19fba302be8 +364de4e1153bf7e944377c00f89afc3f +483df9a21455c44651500fd239949e82 +c67f1af58b2bf55af5c16de13f86bee9 +5c60cce185c97f564e0ece1d03bc553f +fe4d770a3c57ec397ed8d5f29b40e6c2 +0de59cd550b3a8cbfbf3e6dc063cb81d +902f43b865a0e5f1d41e84211bf22935 +17243dbe6946c349ba8dd168905db426 +b4c341f4be352e631719c2024027ba9c +4e28fe19f404dc7ec817094f1519e080 +972861a3a214130546e24d8e7c696706 +9ec982d9afe6552296c041a9ba166647 +841ac1c12b30f47072a360b0a6d86a62 +eac5ad1c62f0eb79982c0524992930dc +fd1aef8f82ce816916a23dd4946cee6a +9d9685a4a2b6c9d33949ff9c78228bd5 +fa26ab86001c598421f2b6d003fa6023 +6831638d3bcdac4cdefe95d59c109a92 +23b4b25734f34ef1f8a6c2f00d74ea89 +bd28fa1f63265eeace6ea4b380630f36 +26b34ce567624804085b3e7f3707e259 +7ea5913a2e39fcb0c63f315b2a79c51b +3b7c8a062b8f083a1d508e7a49740979 +d6e88f464701e068c23487172cc50968 +1f855a06220c7ff24869cacacf2eb9b8 +72ca69ed41b8eba3fe0d7281b517bea3 +42f90a1d1a5f6ef566548201d2e1af3d +14b436af9ffac81896bae99e64a13896 +9e4715ba2f6dd1de4489071ac490a0ec +7bda3ea34adc5fe08190d64fa5582065 +2a8406d6f05f9ac0f668c1765aa55fde +35e6c1dab568c16f041ee21013a61316 +40654738907127554ffed84429afada1 +22305248a0a55423f1aba8d5b65b6e8c +806489b41d49ef2c318e4c1be8509ba3 +a761bed95e194ad3beb7e32eaba6d78a +d15aecf55343667ff179d3ec3948ec6e +34e9907b5f602661bb0b7ad5574240e2 +bdfddddcddc398a5d4257c9ee7458610 +1ba8867657a25275f20dd01e3e525b7d +318213048b2d55f2c1e20984ac11596f +687f31b28da702b8b1c61c01c43bd0b9 +370e45c031a5b9055e454ec642333394 +120ded38b14add7aae85add6ae9c3c61 +27d7a144af9320bec669e8010fe996e7 +b761781ba87e4644fc838cc8282cd869 +155240e22e5d8204c28bb4873f6b7dd4 +3c38c658ae93ed5526214b426bf86d80 +7272e69ff42aecb1ea18a4a83d330227 +8a6e741644cedc254326e1f55555250f +2834d240c44c2653e401334971c1d6fc +a8b2c1fe0ae11e5fa54c3b4dea349439 +ed63efb88d224cd11ea4fcd046722b1c +74b3f173209b641533baa64c1fae41b4 +ef87a13f960d9a45e42afac57409a718 +077076c3dcd943b6c3984012f6b263a6 +a79992de54c09c3255141d05689c911a +0c637b363fede615a825df01725d7b11 +099861a50b3f43d493ce94ddc0ebba69 +34cbc35bb7ddf3bc8e73acdf3cee54d3 +c2b406227a5d030b985160deff67ace6 +a690640df03de4ba427e9e0536cdedb7 +4221d0df6c780646536d435a40bb4338 +e888ef4673e7887336d8582a12af34d6 +99fa831b3ecdd5e6770c08a1ba47cf7a +c77978342f73634709419139d9ba1238 +432bdea62dda07fc73ccf10ccc5bbffd +7b5e25aa7990951eac6cf537fb36c741 +534753ed605faa374463db2d847b2d65 +d3db2f6b18013f98937663ce5544f2c9 +6562a5bfd16152ac34a8f4af4b3f4649 +85a4a2feb016fb489cf17694791b1b86 +6fdfd72db37e042de446f3b5d6679e56 +2b16f8702ed0b7678c54a52498778773 +42aaf6b14980e691e6b459200d5e5d01 +7b4fbbae61b97fcdc3c04a51ebc1610a +4af7a208a0d684d5b18b5b1f50349354 +8de339486049acf1fbe1a32139fd82ac +bbce5dd7cc922abc29182f35a8630ee9 +a71e245e4a16f2395c8d5a368e37ed43 +95d48a671adb5a20d66c015f60194213 +75aa19768fdd9515ce74c155c8cae058 +19a10360b6a5cffe02e83c65ab41ad90 +ebb420449c8b74a8a22bba380e41dc85 +268a5e49b330b778d5b5a273686d10da +06772bf881bdf34a0f00dc911254c3c6 +e8aa0470ff132780e5fc745d04adb093 +09a279e936bb169a7533730861246dc8 +2692f05a28b161434a0614713d715da8 +07da48d9e0664ee830e78c2c262f7656 +2b87f4ffe30a6c10a1d78b4d57fb9a9d +e6de3d3505227e67247eb2bd17b6159e +472e3e75059d2b347c6c97e86d675734 +de0bae2f01b5c2d981bdd6bc255428d2 +bd2d5117dd524d021871712d59efc8ec +c73c7d0facc265671f3177aa8b849364 +c62102d3b7bcc510fcf3368af83f14de +ae4bee0defe5373385bfacf368b132b7 +3d9599934fab0777bc65a78d40b0e0b4 +6b7dac3acd795a47effcec5f40438056 +7e396047931eca9a9c3d1f466f9e1906 +f9c5b22c5d93dc02a27df6de36aebbea +4728dd1827de424ebe4dceb82e45ce5f +41baecbcb0344c8b48922a6bcb5e7792 +56b25f2b825eb78eb26c6feb942d9457 +99ac46bd4ab81b79e77d88c3f44292cc +92639f13cb9a59f8930069bb95ba4fa6 +379230687fa3a69aa9375df34b92da6a +2ab569a59df3756957cad464d30f8d4e +614ba62577b2af15870c8c4070c11a4e +834197148a917de4c9e309dbc5cc97d9 +ec4854154c13d48afa9936aaf0beb0d4 +63c718ecafb1c5c35a82612382c64a0f +ef7fb4dd08836b74aa2e5a5cf8a2810b +e2592696987e0362655978a9b2d5fe4e +62bd34109b6a8a8e3a6c787deb01704c +1e1366333a37701636db1fe4cb45cfff +1d5d754d9fe5535de7e96709ea183e06 +4d733f0ecac5b74ab6cd5c994f385671 +6b93d706a7fefd4bbf0fd07421484c9c +249560e7fb457fe3f085c73e1c5c9a4b +86e8b48dcc818224cd9842f1ad3f8158 +f8902ada35a3053288c713bbd4df006a +76fdf3beaa111635fd309e8072840109 +de73616c11785d5cb107ae48949459a3 +14082ab58d2adf306b33e7c3b2cd70fa +72e947cd77fbf2d776d69e044c5bbac5 +6cfd43896cba01bc41428411e5f7a1f8 +92a9e2ae6c80cb2fdbf4587a1c986344 +5aef133650db2e26d3f75cedf2c03f3d +a8256a2912b4a7921ea80ba6741f2af2 +d41d8cd98f00b204e9800998ecf8427e +09e3c54c6cd483a3a58d51f4207cbc08 +0f9d831efc231e0f41d53f008bcd06a3 +6cdeb602ddaaa81ffd79b7c0639631a9 +8255d1b1c4b89f7aefd08605f477744b +4f4bb5c3da0eef842bd88d9ccd9d5780 +1d6d31468115c6f06492beff8e6b6c79 +7e2247818a0cd30ce6daf50c7b591880 +522b882a1010893f2f13b0426b0d6a4e +929eca3e780b2e1d6a5190e525f60893 +134fc82fa131a5e9253f2b5ec6f3bde4 +9ac9186ea7ff7272defe13aa15dae56b +850c3e60193ba7a4b90d1dca958ed494 +219e4b3da878121a4c5056334eb69e13 +38f8cb72863e8f1fd3d596f84f1847ce +81d6004e2fdd78dcf4db37c587189cea +64a3fd87a18f72b0170f71bacb07d5b5 +3b85e763eb59bf8840c54f3af85088db +eb5313fd3f7e30abec28b9ae907ebb97 +aef157efa7e3fc4b304dba2f5f906c0b +95c0e94cc4bc8c11048c06f7f1720876 +ebf15dc4be5f6d85f6a2c9bfe4144adf +ae6ee0d386dc7987545641db76cbce41 +056774a414940edfa52ffe093b2452e0 +3d7825369249882b5fe04d399b3c9f85 +4b817950c0e2a3dfd5429ab019ad90d1 +d9249e93c464d90298690dc895712d95 +d154904cfcd986037343b8fa87fe4b8b +e816ea630855c9c3ba60444a75014817 +8bfe2b01c63db278ccc805d93ca5884e +70a66d8ad058bd04898b7e18e81a67cf +efdafe68a42a595f56cef82f925424fb +8d0aec591bfee33c33851f35aa61f474 +9cfbe436a56adc5188b6422a421b868f +25ae93935c33e1d9852096d2cffaedc5 +76b855c96c1f58d0b564db1dc47fe17e +e3e18fcf49743ef15bd1a46c6f188d66 +fa4cdc589136175ab3edbb109e92dca0 +7f9f16c222386b814c1bf90a9d9b32cb +06d6d2efbeecb37c22054f4b8235150a +83fa3674b8189586691ded279ae46339 +8a2948d3acfe101143bb3a46b2d357b9 +10fe7f3673192a0dff622176db46c72c +f04919eacbd9211df7c190318f31797c +88f41f9c7af403e2e0ff3a0a96aee0c7 +3530dd97fce74f75f34cb7b94d121f95 +ca85087354021345e19d816df23b05d7 +564d0c3c6fc21d91a8fe59fff2ab6f0c +a67e249ae0fb34baff1be34e6ce189d9 +8613517e4cfb20238115e64df93c479a +82d1ba3d875b6c6c982e50edc2f8d8e8 +c8753fb619da1c35b1c1597bfc3cd7bb +81d6afb8014ed129b0683d2cba4ef9bf +64998c2ca41f08c28726b17698d14464 +205e9c24b645b01be40252d8e5de7a93 +25b823904d953aab26c9136a41713461 +177c85a7defdcb46a7475198f650ab78 +074373b3bcf0f794153c5938e5f305f3 +d466866e4d5f9328b2ea4133f0cf2d2d +1235454ab531f5c5c735eee462bda4dc +213e90c2bee5229c50c7426c879b185e +d43de7f3505023c27888a17958669b4d +4418df16b216b091ac9cca1b8d9de074 +a6cab0b6be5be45f52e330bde9cbf86e +80f4812c18b6344248fc32b5815060bf +d0785af48e469685db5a4185f22ac1c7 +7caeeeabf9de39e148158d4b5f6bf9b6 +913114033995fa8d94b910623242b7fa +d1c1ef84919ba7e3e374e79e30093df9 +44bf5692aff5f47b8cf9efff9d7dc0a2 +9232d3d76468d524d982884e22d46451 +9b1af1afebb1fb1b8837a84df4d1b6b1 +81ffa7287722527ef6451f470571e5c5 +8308edf485e99d1c65e83b61b2b400db +4488feb6b405a42a149ac3b146e5958b +8d84487ff114bde3883c30feae6b9454 +b4c40b853d02b15a90a2224ee5eea1d1 +b102fa7f3888406e4ef6cbf5c0e3482f +27dec5fd24a7cd3232d69c6b3a8a88d1 +d41d8cd98f00b204e9800998ecf8427e +bc7a9522574d067397af4858bad91eb3 +284a7a898f5dc30082ad764a91a43ea1 +6fc5fd6d0cf8867c746d40fb2fd2b42b +a6019132fcf6b26e4daed65c677cdc16 +94121f56d8cdbdc413ddc0701c6fab77 +2b7d3a3417a70de8306567914bd237de +b2681f633fa93a4d91d8b9ef9d3a2d6d +2d0e0cb25fc7bc9cbf67058ec6479842 +110a2761d94ca8c1cb05bf255ff3786c +1ae95fd1d7e422e4fe179a1cffba1f09 +b4e9c9710c581f91135313117adcde55 +486d90686c83fcd2ef73869315c6f42e +c585fc9386bfb2a04d01397f79391e62 +8c7e6c75c0ddde9eee2842933d313035 +dc9a6ad326c75d4dbcb4badd146d5c01 +fc62a072cd89eb0372ac5d9d0ea76c22 +12f6955c35f432ae25009de09371c489 +63017b37a15cfd35b5fa04190031487d +0b4179f1cc0256cafe8ac66b6a3c9212 +3f6556cf032a925826710e241926d7fd +8c6f3e966d7684f437dde4161214b792 +1ddced66bc2f3066132a28965b03103c +d0d84f41bc78e025b45f851eafc2dbdf +c446b5477156ab804c56653e14a7c3c7 +383e6bbe78dd2b188441ffa1acd11006 +e77e45f2b9bb09ba8652dfc64a9acb89 +569fbfc2194b201e54266f51a189f5fc +964cda83e3f6678ec81ee445dde25d9e +185c2fcb5c25bdaba12555a54edf7b36 +770bddcd663485de9594ff1a653da863 +5c18adf7798c8401f775ef3cfdd4c4e6 +70164b44808623ce3cc1188ae8118859 +8e6e75fdd868e2ab62942021911dcb2c +c39bb415e50b7e9a436b83a456165d04 +053fd7c8789afab9ef979fe1723c206d +c80174caff05b31ba18e98632bf7e03f +298970ddb969f875a63508253c96d7cd +99b31865b966782d1e50202d8cec11fd +3de6276b0223fc01c593f1ae19c8fd31 +f3a67d9ccf72f6761c223b01ac185109 +7aac8bad71c641761e5c59fee53a2be7 +0a77231faf19a2da0b7763716f77f50f +1770c0c31dfc509450cb0a5c037b18df +e2826539a80779fa5358394ee603d73c +b5b61565a37bd2a94cf2fd81bf2de215 +e735546e97a22b90178bcd8f1d555400 +355ce4a5adf854d5ae6fb266b4194d46 +d94882f02f1055a59dff4212d494f055 +79986308452e0ae3a781cf46d812912f +2fb19e649416295d0edd9cf0195d9524 +8de4b9771c470967b45d0fc402e5fe01 +9d972f67fdf9317ff38cc85ef8a5719e +28ee5339f1f5ba56a02d986ce9619f34 +abfdfe2589d1027473eb9438deac16c8 +03510eaa057c302ce9a605a069b3411a +92ade3233b70e2485f258716db1331e6 +cf2e830f7c2c6d011caa0ca225f32c1c +3e655a0b33e4be2ccdb40efa4fc15092 +e587a6f67db53493e81422af10ef4b03 +72a241a460b794912ba9b5ad44a33318 +3b1c188de9a104afba5261f4cb303f1e +be72422804df3da9091d020f2f2e2c44 +c0cd9d78ea51574e286e14876a93c0be +eb0fc8ef454c2f90bab885564181e54e +7b660ed115a1bc33fb16a664852ba4ff +b9d3b4671d795f84d4fb22d29fa57403 +6d67865dbff20793427acccd4f307fb4 +0ec919ab4ef416b255f5778db983b61a +d558951ca3b38195a9cd56b5269a6e2e +3b2fcf51575559f03cb2cb7673d80252 +b7342e925c0726e883718e53943d569d +11411db8f42a339b83efdbc683ad6740 +888ee0a8ca9d978665e00e41b6c93c69 +1a79786400e4612837784e504c673bd3 +f346d14da95eb76e588483abda75f73a +47d59eb3d694f78161de7ed1e08eec49 +6868862e7883476f75077090c4f1630b +1957a1ee117fe120e5eec2d537f77629 +b63d8b597e0ffcaa6d57c66c7b7b011d +d06f7a857f881a72e938ede632f967a6 +1ca1d0a273188c1bc3af651dd0d44c83 +5e66f1a7cf6e833dc6131573257fead0 +1f5055dabdca23e5d96fd8e08cda7971 +a1740663d056c6f9f2e8371bd5b29ee8 +99916d4ed168f8bb0b31afe6c88ba8ed +7a79989e9b9a227c3780cc0ef0a220c6 +6db57e4d27217c27da7f0575d40b609d +93485f4cc6c4331bfd8c25ea4c24138d +37709e93ef66abaddf5cdc86d5d5775b +a6afe96162a6f5e3ec1778bc6fa0c412 +a237475cc579739c8cedbeb2c910ba96 +0b3c4251d03cb65b9ab9d41e0964b23e +4602d92519ef8d6f11d0e0339334336d +b74deeb22a65317feffe8bf653e81f13 +d0a82462d6c1384222a56c9c63d19103 +dc136ea97e39227b405ea53c06806004 +565658af71c61b2562d523bd3d7d6f96 +b0ce4747112658cfe2cc4c472e0bde47 +342f7a77fce6b5c7542c2d68f02513c9 +b0cef7288f0b9e1e05ce7cd579cec0ba +2c02f0b930b5cd479a9a308dfb5a1be5 +d4aa4491f7e3d148717a4323d4c5fba3 +9469ce8347179294eeeecb9b6f485e1c +47bd4708605a2b919f9ecd423e25a136 +3637abb7f3aaf21395a3f8c184435e92 +f51efa0c010cf6f5b8fd8ecf16f9e344 +b30ae5bd89b06644b0c7606b121e339c +df7f4d1126002f39fd0ce48d563e5f2c +7dffec1bb1d2bbb003a2acb9839d8a89 +b1ee4ef2efc837af2615f7503784c14a +5962eb8dd1ca83c5301c8d7b66be904b +62faeddd99ef74918b2d2e65975e7c63 +f45c8511ee890d6973031dcab7a4d912 +b0f8d0c83908bc5717b340d43bf045d0 +394a289ea4373a6eebe10f7f2e655215 +328f5a3eb8e8e727d5dea80a3fa8f15c +7ae8034c6ba8714e89b170571184fd8f +f7f70061bc31474116e3305c10bcf593 +1df1a21eb706d2ca64acb478df9b364c +fd6a32a7fd2e9dc39e9791c29d2ed5f5 +ca47e02144e8dbf0229b2580d340ec56 +00eaf344b983d8b429631056e250916e +12fe4cc34cdc669638f9066537759584 +2cc7e395588c6bbd250f3542af3f13ac +948b1b20e39b5cab38a79d2a20ff9704 +8ad5db31ad174d17ce59d90fbb0d2e94 +80de7119e1cede96b9ceb629396d902f +f338217b4422cefcb4b285d3e74632e5 +f65b7320946c510e34da62c534421fe0 +28a696ed97ed191ae2406ed857fd1842 +402c23f06f01d154ef11084a572933d1 +e70e72df55c9ccb1957e73bd6abe6c9a +884c04342b5165ab369ab02edcf2629c +0db7ada6afbd0c73d730c3ba69c19122 +4e7ffb87c240f71593d196a4a4b51737 +98283fa4552b1ae9ecd028df35867fa2 +106d914ef402c31beb35ce7a91fea8f1 +023f22b402870f4fd751436b4f2c797d +f38fb5f4b669851641f1ef5c8770009d +c147bf62a80e624ae9c387d718e7a00a +acbe8a705cff212a73b99fa3ccae949c +017f6394170ca7b23098f6fec953e73f +06789bec116073e7dd120d887c597455 +2240266e2cf441940888514697f92ee7 +e37f9ba99b5f0d99c3ad514dafbba816 +fb99ad6ff8bbc271c7f396f7539ee0ab +21e6c2b7e80d1427b019fd4732d4d138 +22496e3c9d18b3c58ac52e58f23be3c5 +0a096ef72b51d94a45e88b2311290c96 +0e22749453d7e963859c670fe4e49b9d +c78092f2b45b1de3611fa389f0bdae28 +a956aa9b76d844cded5cd7abd0cc9fb9 +4b4a131d9be5e6ea253d346a00eaccde +f1f184b2659921d4fbad562e4dd6e2b9 +42c2cabbe4b029339db30d202c796e8e +00e6c862c03451b34acbef24e33e8715 +bec9f4ae6f6d7079e28c0195e5793afe +397f219d572f03eb26b88cf062025114 +f1e288d3a60185e59e3ae78198c0c290 +c2851317369046f8079c849d91d447f5 +cf1cced88f51dc197dce7b01a94dcd2c +82956e6ecb9050a0a70eae6ff7111a1b +7521317ac96a8f37d324c5892aade397 +a78dc68ed077be06a4ffb01980a1fbb9 +2b66640d0435b064b1f837226b069c7b +de289e72c19e24012b43bedd43b12e99 +33b5809ae107e90ebd3f171fa0036afa +5b9770aa9be28a691593e42821458630 +f96d1553400bead3bf3e7bb73d8f72ef +e94c2acb56a5259433455b014ab7add0 +65029ebab5d954c9e10f52f30bc35748 +8fc815c34b60270d8c2995da18ba1949 +d4d662034bf20aa65d96c3ebeda6c5c1 +a31ab248ed5b15c324c768155ddedd74 +6924f51fd8ca2078da21e16479ff94a3 +04d29c09bef73318158abf5a65c26cec +be0286048d9667289c16250ddb55ac45 +e46efcceafc0a8724498a6117dc9d060 +7788dd19281f4e39beca0f3220d4d3b7 +d5c6290152a322aec2baab9a3ea9325c +60e20a6e503a6e0c721d374a8e67d408 +3da48b2c1ad45ed9a0e94ed2f2b8cf98 +a60b5c269a8150515b15b54d1248e541 +f22d2a2dc7b48c5345e7a766fd8dbd6d +d3d0179a0f2e82e898620671e601c5b8 +b12103a1bee37efe908438a757b9d261 +1920d35b072cacdc8ccd0c4c37cdf404 +07ad9d9a7625577ebf1b0e5e7f17cdbb +271e1679794f1624c1bf674bbd5ded36 +079d7ad4074201b7ccc5d6364561db37 +333a387d13cf17fa2d3b3b4e4485234a +499d20ad2a17c96a3829bbd45fa44ef7 +91d125baed01b6ddb0f9c501a182c760 +327d8e9c5f49b343bc5f8c9d4f7425a3 +fd7ef3cb1dfbca956e199b4605c4fa8d +b7ecc052b13c4f269bee86c37f707ec9 +a48e2ffb4b2c866a70436acb3421690e +def23ed8eada869c4f9e0cd934c29864 +4cd18d5b475fad1140baab39ba101d29 +5d0aff977363500ebc6dfaf272f35cc3 +0268de9e4aaf8a47f97579729cb062e8 +4df34f718b5f102f2e12b54f4584c457 +6c78252079ef8f74d4a1ee052f102891 +571e6d0595ec149fece9811f1b70b9ec +7f51187a0d2187be3033e507d2355f2b +6438d5f606133fad099ffa162e0753fc +982de025317d3010ace60525e637ad1c +e2bf31c31e00fc3a903a91079af74587 +7d34259e19414d199eda7355c7768551 +7cef70acafe5066aa8a7205f071e3067 +c7d5c6836293e3a6e6c105a8c142ba8c +82e3f54ff060f2905a34d19d4b5d08d2 +016ae999477467de6443a3aeec3e034c +a31aaf55ed0ab75037fcd8344b5ed0db +95e51ad0563edb9d362677c46070beae +54ec2a9f0082c9e40fee1fdf9a56ede1 +b5d6995e643c656cb51c53ab076b8065 +f5a368d4d65a4953e88cde759c50ca78 +ba0e978e853f42b6afdd1286367306e5 +64543cbe751d4e0541932d5224c2fcf4 +8ee1f5628c34895ef6ce845a076b79f4 +6892c0bff7856c79fd2b9ae7a46f9bca +26c3e5cfe8d1ea6bfc33fe9633bc087c +ab40a76e96f8606426731451234351b1 +7b9792d3ad11d9d18837917d7d34a70b +77e5750a43036465221918c4e722f897 +b14760266f4bc7fe5732d4a223f7b137 +bcf47a5eb80e0b88eb2337e72114d7dc +a2d569d5ef6e7dea7f4014d380393192 +ce4628f0f1368c8ff895b77ab2ce2863 +0a698e3371758a89756d2d21e460a85c +e3fe476f75a197fd85ec93f6f3b2f4c0 +2590cb241e7d4eab080116d58e4a1b8e +94ef997830ccea7f24c39e9ca8020c51 +0b7efe93556db0e42a3d0655a7d4f310 +db5f792e19238565348b0f02a780911c +357ce0b4bb439c8583071fed3fabf860 +0e85a890c02d792468dbcf61c11945b2 +cf67f4474cbee464c8737f63a7e2dc49 +be6f44a8159a5ec3a5dd9fe05fd92407 +71247dd6455f38911ee89efba7eb9980 +38905f6c91c5cc9ddeac07f7d9e0243f +62d50232492b9a14ac341f9e3db13405 +3b679e35dbaa8296c618dd3aa1034d86 +e177e55d743ac4f601d75cec8dfe06e0 +65142ed22b65b98aad3636dd7ef709bf +cc0148d774558b390287471e8e9e2ffa +f54b78364a49ac4251f62700c4b0e07c +a371ae12b26c5013c9f565f7bf2c881f +ae946d68b6a473a8bd3db6d9ce67e919 +9bc9be4ccc5b056c638612a872927db1 +db8ae0b0c06761d990edf0f2a5021de9 +a2cdc16e41130d09705d7b200adcf506 +76d692c19c60779701e8601cab811540 +904a7dca6a2bcc8523dc2d3b686013d1 +93c80521572e84eda679c9ec556ef0f2 +d8a9f31e6015f3a9ce98961e497df36c +851ab2d3acae6327588a677d14fa6366 +71e3599fec4e1c32ab1001f11818f885 +9468ee90d23f471f01f39449c4fba2ff +137abf7b279b333cdb8f0ccad9a8b7d6 +2f16feddbf30dd652bf9c3c0790fec75 +8a87a75345a51ac6eb8437b793c980f3 +2a1371bd3e53f1cebcf17095c779af44 +e52e8effebe6224e9bbd15bda107a314 +6533990ac9c4495733489760f27aca0d +e7830ebe4d8dd6ac878f713544d47ef5 +f8aedae879945b151c805a6228031473 +639fcb3e9cd819a6451e51e898d08a20 +a1ed6444c9c5705abbc3bc39455c60d7 +a603a61e1428f552bfc76b30104978b0 +e3554bf767b906d9b804f313e69dc836 +9f708f52ddc4c92bcd72552e66f17cfe +52798e6007933d6ef6092cfd08970e50 +78f8faf92ebca0e2fdbce292ed294625 +21f87c83ce04d8e5e3a596cd9fff87f5 +6420ee9ae778cfabf7e5c62083d1745c +502a4e1790dc80c3a3d1b04a9c9b12d7 +fd7222116e38cf536008639d120d7f5b +6564c7d6e5ccdcc4d02881c31ed11c3f +4399bd116742186b137e6daed2f85d74 +64eaea4deeebcdf1d2e331ee170c1d1b +a4b96b5b354801a94e98ad2c441c4601 +fe0c38bbef2fd394dc54bb218b19d4f3 +5e432908c3734e50d94260eed6d0c8b0 +df28dc234513285bad0208285da50733 +8407c93fa449bf331dfbca8cf7fc695a +7dc5c3c29272e29438d8dd3944274dbe +d3177860f7528878aa99d86794d0a0f0 +7a983cc138579813f7240601590c5fb4 +32ac57635e18e75acee9b7abf4299881 +b77844f883d4c403f57f87973297ecf0 +fe46a8292e963bdbd7510297317ca047 +fef40daa87a8be117f2de905077843b0 +5fa612f06149138fc3fc7f66b17b9bf4 +e70cdc81b8e155cc7af46b7a278f8657 +ea622b313958296545dde97bd3a98a10 +56b7e6938f5a658a76b79b1467a8a0b0 +91fd15edb5595b19a1ac4e8c8c630868 +986c6e15a93ce1419f34bff234d96873 +129e7e61ece9242cb69ae669facf9451 +82dfa66ba554b8b27575b941e3e0dda9 +cae7608f284101e2e11eade1c40220e7 +8441f2d1a6f173cef3ce4f3fd756bfdd +6f866a638d8e501d39b139d3bf5da13b +4de8dfac0e46bcaa6328b96b27006803 +021e071a52cf66e2964fdc99cf053da9 +63863d35a3f9d8827d2c8a014e1f1d73 +516cd157b4b5124cd3ca0a58ece52d11 +fa861c321188c143cc9ee5be0183ad24 +af2d30875dbff7187d36f370d0ce8dc5 +b2e7f7c0449f9f58f7f329cc9270de56 +2cc85fa8ec9d6a069b84539d35c2af88 +ecca69140ca0640822dafc6beff83b6a +3a36d4f68a6bb5a8b045e67d11d6346c +90510c769742d1d3e5141961fe28fc08 +c836c0aa796b03c63bb70ec4c79ceddb +376a3004e19d5bd1966551fa82fb90e5 +f520dfe228a51781f501fedc680057b5 +18fa0e4740f890a65034b71425603174 +1ba40b966593a2ee7baac915b87b0985 +f194fefb8c922e2b9c2c48f49f33e2a1 +b8db46aca77023baaf6680ed4b1b3a04 +6503eb157d5c01b0dedb4f12b9230cda +e97cb8cc364e555f7afe8f31697a6937 +02b01455d31dee15bdd673afcbe22e95 +921ea47ab68b38761824628054d7a8c8 +8ceb9aaae7b72aa95f391a544d57fff8 +a3f360f6dfe96adead7f98f9159e3a72 +0e67e0cf976408bf0cbaf1b54c97ba38 +a843fe191a4d233154d8bce3a37a9031 +98c5a5b09260c2026f70c537c2c532b8 +6c91ffb899393d9f1eb437eb39825f01 +c10e34e8f4baee21f4a40a4011059d60 +543c5498360a3c8f3be635ffc01e501c +96279ff3f7f33f10de3575202f3f55e2 +c5e825b00e5974efc91a0e9917172bc2 +a7353cc024b1b24b46631661adba6939 +f385084902cd457506053f98d623b79f +af1e821c8702d3c3b4b9190db5e4e822 +fe2510bd01222e8c39bd5a49481be822 +d341802aef6290a15ab4d32e54b5313e +40d1437c42401c34c89b562a2b0839a5 +59137c5b150f4051ed4cc692e3b1a0f9 +70d87428acf5aa5dfb3dcb24bb7edf32 +c21aa7c64083c281fbb1cd6df572a7a4 +a5a4d2edf42a99dd3560add9b939cb17 +845ab2edf6b52b9e63024fee2dfe3afc +0b7502d7d11bcba60aa5f08e3ae04e4b +751b0ef478e3572b27628c203572d5a4 +790feb9ec0f13ccb24b1e64ea7f49e4d +79caaa192fec3897061ae460300d4578 +60a7302dc844949095eb15536c93098e +5e21432b6b96947f26e966cc14c30924 +f84f212202dcf2e96d4537990e2bf0e4 +fddb63d73e67692020696f754354ddde +da9354ce2f4bb12fd3024f6bcc0641b2 +a370504ffa6841027d7c25e6148cf113 +a33153425d7e609b5422ab3b6f3230a4 +e83fc71d19f2cb68db444e4491623be0 +3cfb1ee48d6aa16713d98b980c70e3f7 +62361cc37c0dea3b151d9597cb94ad29 +d41d8cd98f00b204e9800998ecf8427e +154b8c325b4eb7c295819168022d98b8 +326110398e39509a8a7f3228b0cc01bc +cb7635e6e1215ee18d4711c554bf7f3b +9169947d2247e046a33b87e5465438fc +7741b61993303870d1ab591abc6a8ac3 +d00a331224bb78cfd4f010d0f9fa1308 +72e959de642077421e3342cb012ceb9a +4fdb45e88b0037ee0968051b9b98a806 +52a0bd9862b5a849ea8f3297d9bc3e5a +db9cf2447587ee018b801217a7d0a1f3 +cc283cf5e8608637504f6753f318cee6 +655b4d902f683dce461291f23053af71 +809bac122522ef4adef9f95528179d4d +5cb10e1b57052ea0ee4afe1172e2c0e6 +edd2c77afb8ada27d4f51091d86bb07d +9d8a512f28a95ae89cc701e95db531d7 +3d11d0d8c776f7039bf3a11b6c24430c +1b88f85c751295a77bc97e3228d4d6ab +804e11617f088acfdcbb377d3cdd721c +61f3c0ed69fc5cb0de157031fd169619 +0643f7849a1369be54cecb95f3212a8f +e833fa507a1569868c6cd89f963fe35d +493d8a36f85770196686e8e7182ae6b4 +e7807646e787e08b7f085bc7680284a0 +365adfc15f4d54a3bd0a3cad4fc7b672 +71342dc963274ef677e45a3f70e77bd1 +9d10b4dbe6319c212326386ad57280bf +bb71bafa3a8bbb04f7fa265d8a2e41bc +0cbffed59380da43601180241f6d505e +23264e8ff302554bab5789778863cdd6 +fabe40182ba42c47a2a2b61bbc27549d +db37ab86a0ba47b47c24223e1bda2361 +325ece1ec1a4ef39c85cc81c40554d99 +19ac8b35e051da54a11c473113ddf59f +a184bddd31b69defc10d504510cbd042 +5a3ed175715297539e0a11df56053083 +b90070bbe5caaa634a9c121d25395441 +194777ec9edce56888ef69652c0febf9 +823acb5555d73bd1a00eead5de509a3e +34e2d72df4e7705482773c7f34dccb26 +be950051c705a1f0735dbc8663e15009 +f8e4a7a7e1fbef5a7ded52b224dfd141 +eb12ab4913799e2cb9f6a968c57480e9 +50ed99735b6a4bf628738857083e2c6e +df1cb7cc85f363258446ae9f73a24fa8 +2b0bf69f1993fdee55edb71127b790ba +a6e5cde3bd90752d3e3aec1567e06aa2 +0039442ecb43c91c4385b95a346d6035 +85ad9e8aae3b0b2d1089d804ef5712f9 +752c0d266978a8c62c63cd2e747a962f +f9c4e595a19ce010a4bab036813eebd8 +5d7ebea9c98fc8d23f03ff27db3d2ec7 +c111f77d25ba12260a77f6059811a501 +7685520023958f43949943d5891743a6 +ae3f72ec9f60ea1417c09e8546924583 +764c387629eb61a4687920616e6b7f3d +b6bdd823ebee6d281a13e1fc7d3a85c5 +d41d8cd98f00b204e9800998ecf8427e +4291eb6a06d9c9b69ad7f1fda068da6f +df741e21f39cfcc06ab854c23b8bb489 +6b46817d728fed2c14d52ad6973b5d08 +16f6608d961839e6a629fa80ca7bdc3f +09eeaff5b6422725a1d75b955b713156 +af62877d3fec877884618e1be8ce3335 +53c76737bbffecd1d896a18815276731 +3ef044e4d4f818e5ad8d04d51e924bec +673b33e1cdc24328efaa2484c38bd44c +062e60c6af9fa4cd9a578fe2e0a1678b +4f3362e9908d2f35e1f079055c48cc5f +3e495285c5b93503ca12960f64b0c4ce +bd785ad06dfed6e7a569c14c5a0d1d2e +ed70880daff23f4549e810e8b275c14e +121d5ee79b56b068215b0e30cdedecf0 +ff151bb5834673659552bb9343f4dc66 +254b2e1dab883b7cbce35eacfbb53429 +db19c5ec26681e49a25a1e181636de13 +3a1b84c47641a161a9c9528ea9b8f675 +129c421b96fcbb1ee75034035bbde429 +707c355b49f43eb9ff9d7af993d1a038 +57f396be555c9fa90618bd896e5e4511 +dabd112d9420e7f91dcaa3ba4bf80031 +9c439aa2047d0543b8b2ca04b0b24aa4 +0bb4562aa34bd2610435e3add4036e6c +e63c3ae63d20b15ef07e3becd7178c41 +9c1c938593ff4d3c0e4ab7649dcd9b5a +3a2b9179de93189d9426c3d74596ee66 +a2205df6696738fab61b8b3786c427f0 +f391f17e56b8e1ffbcd1a43bc70101dc +e6f3cbe57e5f31b059ebc595265e6021 +4f2f6dd49ab305e3d36bf3698d248eb3 +2341fb0d6d339410832fe35ce86101e1 +1ee721dbf2f524fdf4a82aac08723e28 +d4625a042fd2a6a8ab122ecb92a52c68 +f1612e5122f1a722ae03b8f2f79b04a3 +db7c16f5ab1b9f73a863848ed2f6e234 +d4b5014e67491e84be9fd51466eaa1a8 +9f97a799f6cecb65298b355d2668ebd9 +14dc06403035ed48921242bf445554c2 +863a6b15dae07573042e7ed725efcfc5 +d430683b9d3d56dcf79ddde67ba9e333 +ab438ff75c6d0ce5f191c86a4ef9e78b +de332f60d01e7791fb2b7ba094f2fee1 +fff9336c16920b672e8e223099b6a38e +ed92b2b749f29103a116121c849a2932 +a7411c849af9a9c3d0614f17ab4dc5ba +dfbf3e1889c8d75078eb4c11ea2e5423 +31a7b4b1227f5bfd7fb58d66677c0bcf +d392be91e18e5ce74cbe12e3a93c5a37 +c2cfe696dfa22d9d1304a3afff5aada5 +7c25d933a13241ef4819b8808574ec69 +9f3ad6197f9b95da40bf3a1d9b71464c +7691c65628d15f2b114e6ac1d5eae7eb +c08a05fcad6461c429c7bc0210d30bfb +0b8dd0e8b0d4cce2878cd3fd81b38c42 +c652fda6bb5c35413574898f81a3ebe1 +c0e015f8d59590e61a74be4749c3cabf +f10b35ddb7d97a61628b34166896f775 +279cdc8e1add726bfc3464cc1716df6a +a397c14ba4f7e2ed4a8c5569353b18d8 +910fb98baf13b436a3bab4860a639098 +beee7dc6cb000bcfd596f4bed996f2ce +6eb066949c56e2768c5f6dcded0131b4 +091e7e1975836d19463ea31692c52241 +628ed0a8fbe877cb211da173fc710b6a +76de419ca7d4fdebd931a9a018b272ab +eacacd93887b26b1f5344e919f010bd7 +d37360ec32481b7b8a26d6981af94725 +10e25f4e0e2d1fffd902e8a0adb444e5 +90fd303592117d92fe06585bb8841d58 +afd86077b90cee8b2e4782d618d24b82 +4567733ce01aa86745d379ce7582e7f7 +842fb887cadde9a5e89ef7413620a582 +20b55012c74c5c3749fd418d3241de45 +917ba69b21316208ae78e3334a63323c +41a0f4a90d3cbd1b7e4f69613c34a6c9 +fed45e1b18f341433fd5e184fe094010 +75524e0d225d251004a94e9e04963521 +5afd6e15462a5a00354b35cce45a2507 +d41d8cd98f00b204e9800998ecf8427e +52fcbd0bfd2df13be8e29550feb3ce8f +4619f558d01687e0c014059def6760d6 +43cbf708ea8e7988af5766a3c7f2e3b1 +f749d319457103ef3fa8e302f53226eb +aa4a022f0d6f76a976bfdb1781e9287a +06fcb312a06ea529ef8ebb18899caa2f +fce066e97ccd6db72d6d4f3f97ad570d +dbab1e077f39a6b08d88585770d08168 +2d9f37417fc870419d3a7fe191be669d +0492414730e83c805f4a720d42b2ddf5 +c5010840fbe53fb16fb7efefc5dc5bfa +33fab9d40efea755ba0027fc62bce94a +370d68774ab5d46e6cb099240526f50f +192f12e979cb61b192182a2209bb6a18 +07ecacd34c974eb47331a58868e59a5f +821e1d97a14cdfb0e6877def1450a8e5 +c7f7ef08dc00010bf98e375bc46d457b +4be1d98cf73673a5ddcaefa25bfbf8a1 +b8d84d76e461319dc916f9c46489e4ec +dbcf30526e06f4374194298b2c509949 +5344588dfda82f612f07a2a19deadb75 +a267bea458e0a4c1b37a2962f765aa6d +5302b44b54c0c473641bf59bf4d4fe9e +42b813ba1d5ea9dcc1df8f83a226dccb +6ec9fb4cf25c6626bd985c9301f472fd +41868a3fee4e6b2f595bd55caad93d6b +d60853c1c796929b87d058683b659daf +92828cd739934c70e06e2ca88144de1c +6f3db1ae3d0c9131a23642fa70494b67 +29ca1a0e003304590a81efda6e673825 +ee5cd24b698c2a9d5e1e186177c0c3cd +fd4bd769fce7eda9f5eac1c871ce4373 +f1447f3669b1039f742f01d5485bb9e4 +94d5891c363ce385a5fffb3e4c55305b +76865bd47da11068cf4323fb703eb9b8 +bc35369746a8030b814d24bd7bd8ac6e +9ae68f9f2a10e2d268bb7226ade2f73c +9a531ec730ff38d444c3d49f6f45ffa6 +695539d93fc10af2b940c4b15fb4ea56 +243e0c25d9cbe924ea92a573068bd53b +c7704a7de6354bac6e84c5ef010f829d +50408fe8125580ff6a1d3ca390a0dec7 +94f0b26de71e6331d38afacc18eacbbf +f07d26aeb4ee94ee066e9b0bf6638533 +bf09257b14dbb5df8fea5a70786bf54c +afafc15af48021b1e207d22465fbb31f +facf6a65b7ebf95b97e7637a8b6ba4e1 +f06843540279270dc10c5a214f3b378a +390b14ba97ef28b90c2db5f395e90cf4 +b9226baf911017f13f33013d90d1c62e +11e6c0257e2b8adb2e166a51b0fd3b15 +92c7a4dfbbb40596db3c460ec8c74996 +d73e5b1d27937beadcd144884235c13b +56501232d1ad0969cb2a1fd90a3cdceb +4063a7d538271663fac848999d76947b +7ca6620c4fbc1ec08031feb08ffbd982 +5d7e94c4e079387787ba0b33312c8de3 +e6587e7501e37397e5cd78d2dc7f5f63 +ad90a0fc9f1e94170968855cb658ddb2 +523d1647553da35b56cd6ff94ca4c865 +9c02e002a6b4097a91f6979b5c214125 +fc20d439f52879f40d1fadedb549e2f8 +21de77f9cd1a0311aa992f21502eb680 +72111398919a9679f1c8827fdc726350 +e1b8bc4ae863983bd24c4ccc18a60385 +58002193ec0b226fbe4a3374c06abb6c +a7749fc31936129dffebf7d9c595a0cd +15ffae6d4088101d7becb80b6ee1ef2e +96873004b8708519f01aac257e935756 +0822eb791b9e43197ca683a150232537 +0638330b58522e4361c63b6ecb574c3d +c91369766380355321089c290fc35c04 +67c32a6fddbf9d5200ae726a4949ecf7 +624115e00b6adb23d40ea49e25c14617 +9c957a05a902024a25fb52fe4948e8d0 +3a10884ff7e17f537c9fa008a81d9381 +df0b98dcae1fc3ccc4493fa0805bc513 +dc2fca462cbd1591e3795d46a920508f +84173000761f4d48ca42886b83520d7c +38b8b96a38e482908bad7f0603eb6052 +f72b639b6ab14362e6e51423f9d28584 +c1e51056bc86b0d30a6ef7859ef8ce41 +1ac6770cf04a809933f6f3e032c3b74f +7afffa8806538bad5cd5d23dd216dc7b +8ef433460f19da45bd9ba95fc1bb0513 +03e7ef22e9dfed239d1e031d33044b7b +07ce9aedb0025a8ef109a571b83f2f5c +bee3778f20cd8306c3b4d39e3e920f59 +c943daa6d164953dedaf164e2ca72009 +d2240e0d80fda2638f38b667382d6cbe +5257731a8edf3894302be28b0b0a95f9 +68753a411274e6db8b8bfcf492e27fe2 +db69a4862d5cf9f70bad86c71c93e7c3 +bb7d302237f48bb1167f7f9dad9a841f +8292c4aa713ad31a6a9a858fcefcba68 +d41d8cd98f00b204e9800998ecf8427e +d41d8cd98f00b204e9800998ecf8427e +e33112d47972e7b508b543b5559b080a +57ec8b2f4ae3c52e56df6dde9ff8a730 +68326340325524842342e81c2c1edb78 +466e3a9fc2a3ac2b012a7dcee494c6b6 +cb934d8452b975b9b0bf14622bb26410 +ef24925ad91070876d68a2c8f6757530 +a7ee42b0dd1dec9c83e68933d842c679 +fc91c97539381ed93953645eb1041328 +e0e738556aa5df197feb1fea217ffcb7 +e5585fbe36d9ca3f3adba5295ad1fd62 +34504a7737edb2847121dfb414193a87 +e62c15d50728be1fa999aadb0bc14106 +339f0d7cbc8e02571e6ae0ca9bda26b0 +2750aae6423849c47fa65c93845e08d8 +2dba45cc10aed6e47b54ad737521c348 +af6f08b6145cbf0a74766b4741ff0870 +ba2ff09c90fffcb9066a184d96cff8f1 +197272908a2d3abacf50b586b78419f7 +baf17ac21ea7f9993f71ab11e6522ffa +ba34b417d6b79aa30c315d6bf00d0374 +69bb83ce2cc2a12d6e57a0a5cc3c588b +3308f5ead8c9471539d6b374a4757757 +ed69506c639b026433b4b17e8b9f035f +728f4ae3959cea51515fda05fe3b96f4 +33a44cfe131096cfe122929afb3e20f0 +652ddc6fab349ae77716c7c4fb893c33 +de9fb6bcb5b2c66bfe548d8aa0240c3f +ae06ae0e33ebf54e6befa08c353095bc +bb569bd03ddc90710846c429bafedda4 +48f075e133fa4fe1f22d583a0fa6d91c +92973da89722d177a1b007fa26463fac +eacbbfb64dd59da51ea75df98a6d6f7d +1df933770056db899f563e0b35fce5d7 +caa58c1ee57e0d2af66ab917f63ec62f +d87a66dd99f9caef4ee60cea5d37861e +fef031ff341630f860883b50e741534b +cde5fa8e76eef0ea6e219f64638f0d52 +603d75ebd94e27c412f62894077405a9 +c550b559f07b6ed0dc275e63ebcd6b9b +9cc48b1f01e55dcf6f59443003570fd4 +a63a1d13b26c2c938383befe009552e8 +d423ecdefe67235f7c67ee3f1ea61aa6 +5172c1b754dbd95e9ff3f7478956a069 +8fe828d2851fc49abcd37202d982dc3f +b15082e99c91ac92cd16ccda845b6ffb +fb2d0044c8605a25483473195e0c4ecd +5f20c8c93b91734248ed1f0cacdce225 +65f3d11d0c7b6d013daba730c8b94592 +d538d1836863b4f18a0f2f78c392f25a +2d72988b1ae75bfa672f8c6e64937847 +37bdd921577dfdb4cabd42efb87a2921 +11271ddf3e413103a544ee5e70867b74 +1a40bf139d2fd746a8a9879b8280575c +91b1b8a2aa9cf6ed50f029122d304b05 +25ce938804f23f36154a92a6cc1aeaf8 +691c454d9873ad3d09c496d53ee0c6bc +8544ff29d677a3577b4af4b3d21a9b64 +2e856e61df11f27cd803363266a0350a +ac50a383d18b3c7c9b058d240bdda8a4 +2abc9e79d6edd868c9a8639b390fca97 +1dc919144cda17381cf1af68b3825eb0 +4fcb974c1b0c3f1949b4ce4f16742a4b +4570b638dc797ee79420d78023507908 +f96c4f504e0aa915de1ea6a2143e3653 +8a4471adc01358dbe4ba365e37d4a5fc +41ca8189e6ea95dfb0ec1dd36ff7ed2b +f2c0191e0f7a1dea3e44cd190b97be37 +dfc140d72a06025c288ba689e2d5e15e +03a4669244fb99f882e5e17ef4f8bc44 +ad73026b5f15d9f4768152a6e7252c35 +7ef2bb23d19e7bf3414682bf16d17c9f +9f29169db670431833443d5bec1b241d +5f7e5514f86950735e9fe561cff570a7 +bade19a387ea40286fd817ab526016f3 +51815c0eed00389804cceaa7d3eb18d4 +e7bf7355c103d4a86c97585e71e519a2 +f92e0b2b37b1c6b945c9d9ee2e2160ed +267ae58c836a9cdfc9713f69ca4b32a0 +085eeb66b320fdab5bbfa255a4f2769d +799727960343678811377031e631c987 +a9b3f5b80261c3a185b8278c1eb1dc1f +958dd614831207caf20c19d96ceabd5a +fa4d832ac13f49bab77cd032cea08687 +6a99e7147169829b476c5f69ae80a690 +2d3d9b69a530c4bb0af75e1a7677f941 +b55fd38f933194ceb9c124775ee65d82 +f2c5c9a41d180dbf7abf54dfc7b053b9 +553fcabf13cab220d077ff0581d22cd5 +3a03662b37811d253c7d62174a25d2a2 +fdab9f85b0beb686ce6eb2c27e9f6606 +c296cb017770e75f4126217ba7a94289 +38eb002ae5db0c83a1737a76ee4c82a2 +c0bfed2be3ee64515847aeb3cd098655 +abdb7a7920b35adaec0b2408325137a9 +f7622881c3c0d9bbf04125d62a9323b3 +0c139aac9e9989de56f128172f9d1d6f +881ab5b63c6cffd9c10caf5c46fd777e +b07d0f3e2230af1db06cc189bbe830bb +fe13752c6b71d513897287ed1925fae9 +db365912c76b67a712d9ebbf6ec33531 +6c572d0d489d3937089a79bb201af793 +90f3c37d06ea9577e77c95a6aa968784 +69f70098f9ed429028dd0a6c43de80d3 +426a2509e3e854b2197ae47461d45ebd +68d69d7bd4fd72a87b6cddc4324ee7ee +faf50a3e9d536fd732f3df53fa2aeac1 +2ccfe139f827e25223b889bf63d0122a +d508939d2caf753c9bd62b509008e4ec +30ac361ea6874f683621b0ac86b03043 +ddb9eb46be20d5deac9b467cb8221deb +1af6a340a64986fa22f81379c8df1507 +8e7339237462c5ba1e4e4cf14706b683 +ac6146657d71b931dd4d070384588b99 +85e86e6b255adae6354af14b503bc9f2 +67f7f0b4e6915e1d204b6e471431a9cb +33138685fe5d5d96cbd7b92f69eb76e8 +91dca4eb509fcf1d5a61bb3c138ee90f +0e00674cb873c9102642870634cc2d49 +e08ba17e7f164309a9a7c79352087a77 +83f28f86721799be4ffc9e85e09e32c3 +c09e08292356b3b505b246b17fc1db8c +e6505a948900512d1c3adcf2a298e439 +ce3ec9380e67365a9985ec1f9979d707 +3936c6565a8d7212ec9a4ab8f17b5e3b +12c4313826e3381b7f4189bc61e81dd5 +ade29e0dcc327fdfe7c90686901f2e6c +872e2249a1ae0f35245ecf9189312e04 +1f5f8cdb1acc76149dc1d70c0400872d +b72fe4698e3f09d962e19b16af33552b +c5ef52a5d1e45110ce9e5ebdd8c26c23 +8080d59958e8578d210dec8848b65a56 +1b9305c4bb7dc5f66188f8c1690d955c +b9e05c3b8e395840e8c587307c4e7fd4 +c87523ce8edc618ca5a7349ac723471b +905838004686f8d4655784033e64c86c +7bb8daf550be49532a912c0bbecaf4ef +8f40b661f6b1fca00a2e40111adafa43 +3f80f9a0472bd438eaed469173703f45 +908cad4a994e1f709fcc7f415dfaa1d4 +bf8dbb99e35ef6f85a0b0ad50bbfa194 +80441fc50014e3083cd2b4cdca095712 +f5045cd83d37dadd75fbfbf9b8458773 +bbba771b1cc40c065188b6b1b7990541 +85cd3ff1fcc79aaea72c3922ef53538b +26b817140ec401702759d89aaf9b5e05 +405ee9ecb2f677a05245fc9e97df2128 +4f1db54aebe73c847985a5a297f79a0e +c2a07942c06752c062ebcb9baa76f1f6 +b334ab38b959f661ae62166134332a20 +9cf1c20f442eb90fce13976a64c9eaba +c568d54cc1061edbe55bb36a8916e027 +ec76f18607e55828b45c6f413d2f24b2 +f36bacf723a704b2b5895732e3a7b30c +e74225ba9ea6fa3bd95f64183e4b0da8 +6b4328000c3a20612f1346dbeb27af9e +60d30810aac3634ba0640cf2dc947938 +5382c9b94c6706f4fcb7f4db93089296 +9c04a898b9d91d16bb581a83d2484a52 +2c4648e624b15114bafe5241d9cff7b8 +8b71c8e0b15598004c015022aae657f2 +5d6ec64bc4cc4f1b71b865c9b61da82b +4e6bdc1bb14b67f93e9aa8cc691e5a25 +974810e9823b25b8c8055c2b83e33ace +c5559a4013408ff9093bc4411fe263c0 +9f061fccca0d7df7876bab38905a9311 +14221eb6d068b956e9d5a3ba61a2406d +40c483cc248df08ffdb485da6c3ba881 +b6f1114f353a993f1e42437724b9fccf +2ddfec536ad8c28111001c897884b538 +e74abcd35126a8f7d6e366a1f5d0c33c +35368fea903015656fde751b5c945d5c +a3f2784bea3ef896ba44e486266bdc32 +8700ea3945d9080599de4289d67f55c0 +1b8e5f9790354e7cb09c0995806122ad +14eabdb253654da6482a1bbf84d531e5 +ecc6de89421151703ec56a23ebfd8b66 +8558f4053ec1339ca73b66ef409ebce7 +f909cc7d81ed1670f0cf6f83f9157cc9 +e309d7afd1d6cc56393bb4bda7b0e06f +89f5079cadfa2086278ea0a62733e71c +1a6e26e5d9d5fb42e57d7ba5a29e4762 +d819950997da2ea73efd10eb447ef331 +bc0558ae85f9b61e65b72af15fdbb938 +d46e136a1c3f51db26f3950d3d7d102e +80faf8161c9c1305383c0b6702095cf7 +572253c608e56682dcd3ae2caaa8c220 +535006152b03683b7b16107a1f287e66 +20524f25eead028a20325fc8da1ad778 +6dda649b6293c978260f1c7c647a63f9 +d62d10d98942ebb052f133a0a3950e92 +69efcfe2b68a94b3b0dc8ec93e13b046 +94a825e64c404ab3b945d89c4f6260c3 +cd75f57322ed2afe34a57c195ddf93d1 +5c7738dbbf4d4098a740423ad9b63453 +ef26eb0225518f21cb24790e86bbcfd1 +981c3f2f09c1d9c7fa4e52d12454a9b1 +b5c5324e6688fefbc13d67bcdbea1c49 +830cb5a5ff3f7255d3134506bc32ebca +07f48df5644c00532f55a34fd211d2bb +d41d8cd98f00b204e9800998ecf8427e +8378e7432662aa500de71d4dc79da62b +b6907b090e9e94a77048383c17621a1b +eb8ebb171ee9d30f775eb08b718854be +c43ee3f2bb372312a697442ab071cad2 +b2266056e8d9128999f0a1e395a6b073 +509ab76038d676070347f8463b7da0c6 +fe1f59a2ef2ce377c51e2cf1b54ea01b +d10d5c1f04f459acd896cb677ad0aa4d +a7336d759f0082baf711dacad477e1c0 +c47114e6c27805a8c838c4d262217e3d +e68d6d80b1bd84e44bbd408a866107e6 +22443c2e2bbfdf6431aa5939fe8e02c2 +b4771b628534b8b33118634ad3bc2457 +b993948dbcaa8b8725d0d3724cbc363e +88c05f2e6cedc31c93e3e1c5c9fdaee3 +2611f98829842d975eb8094af5fc98a9 +e7da74d0aa9915cc71a3f24cab964b1f +242977327df1fd9191471ca8e6788b0f +16b28acadad26dab0fdf9a37bc1e812c +3d7778d9cbc90ce41425c80a02ee3c32 +4d856c2a268edd04637309156514d258 +4b93963b996b74b79d642241e835f53e +b4063b02a7b5cdde15a6afa6bb70622e +fff51c7d357189288f82000b06f912d7 +ee1609e984c788456f9dbbc8aa499f4e +cecab21dd607f0c66e22433965f10013 +bd4c071abec7618bbbf6ddd143f2ef83 +46aee9dcd1344d6c0b448ceed8e8c553 +4bf6ff2b2ed95e12f2424ede8355541d +cc69af614b95dc3b77a031cc91d31b1f +fa255ac72f18f4e53aede3aa57f6cb35 +751388163a44b375acc016e4a31e487a +41d1b0008a55a182e4ef809c63e48aa9 +5321d8a91f9503a4ec28856330894bc4 +cb03a78d2dc79f3ceab95f15c5e27c53 +8f5774d29590f00a90e1201850a448a4 +26ddeffdd9e869941df3df7b14912e24 +e372228fa57ccfe48c741f610b3e0a72 +3bbb3d03de175d0b9dcf445a72c2cb9f +62840f803da220149f5c760ad1d24804 +42cb3688430ba4714883bf00ebf57120 +ff39af1af82d6714755cece3147e4969 +bb0e82fc5c180938aa5e7492f625e6e3 +9b7f93a9b2e357ec016779fc480f3a86 +d8a54cfcb6965fdc690be7d5d1051f2f +d023af40c3612f5c470d5c5527b414d1 +cf0eeda9424d2e09b04bd47b6584f47d +36b7340031d13a5c12fcdc78b6abf0db +fcb53152d54db96b060248c274660974 +14956508fe1a41d8f9cfdded72ff13d4 +7128c0ad5b233767d0b66810a06d1467 +1e6b7be8f0311c5d5083532b89dbf39c +709d780b4cba1ae0ee9030145453d9c6 +d4184f082205866f0f00b73eeee5c1ff +1568a32c4c007b215c7d34a5d791d390 +4e2c7571f646a00f3c8a7fcaaf1ebcd1 +4c8636afba12349c509ef4612f7635ca +549b262f98a316cdac87d62c0c8d6c7d +ecd536e7c6547da1cf571fa70637e4b1 +0dabf4afc6577a3f1977729a5772792b +cb30e9fb1922cebe3d40b35e34cdea4c +b1db2e43cb00ce9aa902078c63e3f93a +e222484cfd17f486ba5b9d9e1e26e1a9 +4c0e69639e856c18ba725b6a845462aa +d44005c7c1c30a1de733c8d758ef3ce7 +d6980d4a84b81e7037619d0fd4398335 +0aff872d7cfaf3b0719e28f5b5bad2ae +adc7417540c7a156bd14f759f6ed35bd +e9d7e43e1efd50cfd1c901282e27e58f +e4ca16d54f9a8c79ebd106780757f8b3 +2be2466b8fe9e684327396d2d21d3b1e +4faf9ab766a0482e4aff51e4c97dff8d +b8e6a6bc8e6fdf4aec6f3dcc3fb8cd35 +10c25310df42652069aa95df3f7693bc +aaaeace4a213d9a99f31b16fe381b6f5 +e9f1ffc7830430228fba238e6188ad7b +942ec532ce3bb0f9679fafb244e250ec +de5ab3d9cdb43de723970d548c01a111 +0f8f55ae6459b048cd80a820da68a2d2 +51e4e8bc8eb005d2cdd114946d2943b4 +68454b05f5f0306544c374a9b3670d7f +6411b47e78b971f82785cf11d53c0e7a +a275adec3e88e0c9206171a970598447 +db5115e81dc0b665b3bfa393507dbaeb +30c2f3d52cbb644df039c7467b7ea31c +0c2000eb5402b4efcf01588f02b53e8c +3c9ad89359a2d7a5a4ee87ed1cf8cc30 +46858554fe215fde02214d88fab2b615 +1c19f6f0ddb0e853ead9a4f40519be3f +9b2d9967c07ba5261c1ec22948eff33a +0e6bba43ab0146e663d33c44eb109803 +403b6165291786215ef7e3a182af68ac +2933817df03362b6bb9236e0bf392ec0 +07898bd9fba589320ec0ae1fb11f575c +4d3826b8158e87db85aceba3bae8b6d8 +038bab2006f95837e9012337595be2cc +afd279c86ba0b90e257a91c5d9f7c03e +5287a16963286e3852144602c63e3285 +1ba240f559dd46fbdf69cb3698ce372a +ff6db609de5d386b9fcfc8e1dbbb509f +f050129d222fe58500e775c6b567ff82 +1ad8b6149e8cc460e00a5804280df91c +8e55223047e86295bf014b11b2433417 +a6235184b5f6fa512ee9491493fea835 +46768935fdce98d98b625a2a3fcb68c8 +b06a06aec0a06e253d86d17073b6ec77 +dbc4e088fed7099e726dbe0199f72288 +07302bd8eb997206f00bcd914748cef5 +cdab8994ccc40a72b2522081a8873f16 +1ae8710248577e50a93f79a412e7cd84 +859da56a540d87cf707d9ef3183ffb24 +29ac180be036e41e9259046ac7bd3f54 +82b452ec5d7fb0de6b9041de205f51a7 +73d10fe1e1ed62b06386c901dcfabfa1 +2f050e54ff5c78ae402f855f441ea47e +d231c9600044c8d7d2facfb88ec0607e +10c20f840e9fa6367ff5d87d529e0541 +af9b1e793c1b5c95c7731f0de5389fd7 +08c0f0be2f8ce190319d7bed1593ef3f +e8ed5224c7a4ce4869d5ca25220f0487 +7b80df548bec5f43cdc0c893e403d8e1 +74278519c0de0234d23fdb1795219c46 +df2691d0729bd23d1a70fdcc5c5c6c6c +747887d01c59a153bcad1ac067285368 +ffd6e283d45aefae3b04a1ac0ad0b708 +3aa1961c10b4398bfb1002b6c2eb1613 +d9cb55b46648f56d74abcda991e28fcd +1bcf3f73c3814e50f9036381fb295513 +dea3d72d51c9c90a48a82672bc39ea4b +55f807c9835d01643ddf735322e418f2 +89cd84069451c616827057748cfee0bb +ff6c14b642f8ce5d9bcd8fbd4c444077 +10487e061a31b8326d1e654fcbed779b +d2ffe9ccf00e450a4a615db51951406c +a4b6996e32e716a486ad880282b64d9f +22f6c5eef0b1705161315a2e5ca5671d +f80b847134e87863eb854dd09ce392c7 +60ebbd2f76c97aa4be55c9d782eb967d +3e459a3fe1d976da62ce747ba4de9cb7 +32b728f5c0f18ebd5efc3d8e3f5d5536 +f50523a5e65ddccb0250d620687d0071 +b1551f36f10d0df03f653508100c35d9 +3e121986fd21e2be0f0f5896fcffc01c +bff6d5d141c58b97f8d72e54c44ae314 +80bee783ce1340cdca5358f8c5114bac +6f66cecd030cf04ec09399c25875e57f +35ca2d77d957f8b4d3b2bb572ac59ffe +8a1143aa8f57e7827b737b6205bbf0a3 +b69d009d8451dae02a2d5bd19322b2b0 +d075acad6a8c5dd96931982f0b710ccf +33849725f20b410d9418d2d2602f9a6b +e8be62521a8d69ee87dc63e76bc90dc2 +22a981399a379383667e1963b2ede71d +ee17eb82f2ee16e910a0aee241e6edd9 +d579f596fd48cc2220a436a7a02c8e58 +74781967e95f6d7a43fb74f54e3f1145 +91db00c0e3c1e7c2617be6b1a4863b24 +60b597bfe2b0a05fdc20fa7c4aa32340 +833e235cc647b7d643c2dc6126adecce +f3e6e4467e1d9a755ca92365377b9fc3 +afd06de5346c3df1e5fd1eb1f32af1e2 +dff409c7287f3fff1059dfe17628b6d0 +de0a340db86c2e21d86b4141ed9929ac +e68a53ecf87fe5d41920c915180d1c26 +90a82e3b4c3ab8db3b1a5e6ff9fe71bb +00f6a5b678242ac9f0349e754646fe6a +4ee10bb2d977c45302cf3fe4d001c2c0 +c232d9127e72e45f1b328effcabb9383 +efb633387ec02cbef8fc8a0d7c75c03a +d54f7996727f6bf0bc56bff067e9e563 +aafe26340d3fbe2be3cbb723dffbda31 +6fceb0e03dacf6f1b3e1ac184a4d0920 +1d35d6f6def536be533e8ffbb237d658 +00d629e07e0d96c6b664a183de245f35 +5528c8ec021797f8077be6ae16c51234 +2494c5df043cebf05127afe63039b535 +9e42da4807e8b853c29b2a1e5d8eb4df +eaa53f2dee42c6d66e80d6fd0c1ba410 +8702e95167390596f4362b58470485fa +9298f497edb46ee8a6760dc1907ec5ce +ed34c80e40976e394ba58693e344c480 +93450a24ce5b2b3d116c8eedb4e6b266 +14297a904da64078155e3beae363b550 +423aa20afd23684ae6f3b8f263c11f97 +ffa1e12030a264fa1328bb3d44ea8ea2 +0d816eaa08f702f844182251124ff556 +f057c14fda68693377db31a74c1cca2e +e8bc29367c6783e21ec009bd923b372a +6f680c48e563596981585d26605b7ad0 +fc290da80bb650ab21f22ea66d01cd4a +d99eb2cae8293e1db6ef59965af3fc0d +ab157e48ff7fbce709325aa58cdd3554 +d93076cca82c0cfd5bf01d8688162c6b +0ba23fd736a330b2002e7c4c2b2a1406 +28fa375f47d0d1df3a82f0170fd72993 +f027c543ceebcb5b1cffcbe7131ee4b1 +c6948be12c9888c40d338e41959d1cd2 +58f63666dd2c825fc3a9df40d9c0887c +00b041580b505cb9877d819ca84a149c +2c89078126e1ff6825eb0864f7ca4195 +acd83aaa123c6c02c93b00b4563b0e6b +0b8758b50eadaac5cbf9461a3f0d9329 +f568034794f2320e0a554363b4d73bbb +18b7d5b3cde0d6bc2119b946e7fc71d7 +abb155ec82334949af32ff1904da8921 +00a8e2ed03195bb6a14807ae5d33d9df +6cfa94418920175ae808cc66c0c64500 +86e48bf1636994d81826b19442694440 +ab1b139e0327be25c8379abaa809bcfa +26144f2cfd6a3db85b20345961228ae4 +08205240eeb8e52093abeae8fe3145e9 +664861698b5da01e11d716c1f9cbdb7c +f54045cb313a5515d4721f5a683cea45 +30c4b286a9fccc70ba9d88417f96c096 +ad84c1f4233e1bd60128b4b284beb6eb +79cf4adf5508d27a616578ed29349d10 +85013779f34b6c0295088ecf4f73549d +26255dfccd7ad2085412c019a1adc47f +9c3b7f4d5d02268693d651b567b4fc3b +e03e3ad6cd086defc0d53cc42850a9a5 +9fa86500bdbc1b89083e631ef64e14d7 +e4b520886ef505764b8419df7ef8c5cb +4bd4391fb16140e9db103cd3be3e9568 +6ad1bccfae06aad7a67b28826a801234 +ceb45e4badf3a8c3e06fdcb6cd252163 +611c8012f0897c6bc0c297987da30a5b +45af573f0dd1940817045026ecdc93b3 +3c951c96f5bc25cead20a9a089eee649 +017ea20529f9980c69137462475e3872 +f47973b722804477e44a2dce6546d758 +0ebb74763ad2b0c0ee2733dad86bd5c1 +7dc0870d8648064ae5b3ab6555d2a9ef +12ecfa3aec0837aef051d2cb044fdbcd +e5344be9caa73ef6c687db8f5577643d +ad0c54ab1951c953ab72c6468ff1873f +ec7b071a2a85789d23f159cf55c1385a +78d231ee522ad1c279b1347a5cd9dc1c +243b3af8e8707780d08cf5f4b96ba2c7 +9c4f224df6789706772f7eeafc4b5eaa +35ca867a52ce607b8302a188786b26e2 +eb6aa9aa1fcf8bdde936adda0a5a4a6a +b0c4b541026fd15974803898cbd22d0f +7a3db845ee76d1400e1685f70c3c8121 +3a0b121669d17d848cc91c59aefce98b +cd14cf27b2c159c7f6d883eaa5764932 +862600d1149d3b8bb899321f20fcf269 +b2523fd86afe29905b65511a7a100a5c +328325aef5554f2c394d8dea75209523 +74a4ecab9a380e940feca6993e0ba085 +7e9186f5a74112de6820ed942c5bf2ff +d8410f7f9c13c4ac53a340cafb480216 +685364c204c1f6c6802b5502ba021915 +bb215eeafd69278b3e9780b3a9ab6cd0 +d78b7e61bc32b91306ba658fc255d164 +46f28f2cd2aea7361e0b7af91f6f31d0 +e3a8414b311427317ea85bb32c4c6496 +d982b7e41482e9d630cc8ee39dfbc6fa +2f10e52c47c74eb9bfa922fa1a468d83 +0fc89448160b6f966a91bb888b036006 +46683ef4fca4c67b5cde932ed2b0f2d8 +7140d522fdee22fbd7c138fc304aacf6 +e6b12e3f7b0a6ebf9441c504c0c76399 +84fd02d7ad2bcb58a05cdfce4ff0810c +e8b0f7c70ba713e69551502a7feafd3d +fe268461aeb24833c2d1a525c33569c7 +bf361d555c05c1e20f25d4d4f962030b +96e8df84f0462baa8c6eeb2813a4cdac +e6ad79d235abb04255af16ca175e5ad4 +28a8d33706de61117cc7f1db2a318383 +d11d90601b5cdae17f38d825060db863 +8fa4003b16258d0de639a5a7357b78b8 +aaf2323297a036c2c5d3b05cb907bc56 +83727ecbdab2fc87545115d4cd8868e2 +7394a10eeada97845900767c6dbdf6c6 +815bcec299eb5c30e0139869cd1a7df2 +8ee6fcad62788467a7268ace590db6e2 +4bef2d709d3df29cc26bf8e2699e1c6f +7854ac26cccabf07f8edbb596ec345b9 +33271a5d4ff8dea29ed4dd1931563991 +842cd82ccf0aac0c4079bf654cd141a0 +246907acc8c6066f664b973bea22500f +950e778d97797c3dc6aac910a6e4c757 +1ef40ede6ba92b7a7e9fb344d816da3e +96bdf13eadc48819130941c5e5bfb2f4 +c6c13ec69c56203a4f0bf100d982309c +bd11e8e6916d20b7b3dc0729334ccc1c +14322af9ba299b0293ce20eec7d2365f +c8e45480c6ccec77852d5a34182c49a2 +3666ec2d0d4fa00fc2303e0373b85fda +7eef7fd60f9a0ab2f6fe2d31cfe7c7cc +9b1ac4955ff63c630d3e37d029a52e5e +a1afadfac0daa7ed6e1b1c611ce5d6ce +e1e6a6edd861543c0091d6dced0a2d0e +cad63c582789830f22eeb33e38ed7434 +6efab923621dd4bb48b5825e43f3242e +ba44a5869325943296e3f934333d3afb +8d8345f4d2a4169c2e61a1184081fb0d +477ac25ba902b6a05d97dc618d087784 +f716265bba2d0e2c01fb5bdbc510b4fa +1368c3c376577ff06f2f5f93b2d89df3 +1a9030b1dc9803b3dec3b3cdf8bb91b0 +40da7d16be1945961e7ca73689fbf70d +5b2e0236983ef6c82c9ab224473a9386 +ad125c3f0129d782bc23d7524ad88bb1 +8d22a8f28e1c9041b721956f9a66b439 +47b1ab0ccc1c522b1046360a24204660 +cbc5fdfbbce3dcaf8e767b943059ce9b +afed3bd4b77d38ceab503b6bf8d48f13 +d41d8cd98f00b204e9800998ecf8427e +bde74f6ee13decd6fc4be87ec4569265 +87c132f809bf3fa2552a57e74210979c +72a02a12792f1ccbc980feed3f85afe8 +219bbf777cf814da234e31e0de42e6cf +4902d2e72ad605c4b834b6ea538dc845 +35971811a2a4036765f14137e18ef795 +98abe18177697ab480fde044003fe959 +38ec84252404c33c064f050062ce6a7b +0c0070b1499e158443b4d3b27afa0667 +23f49c36fe06883fb2536c302d94e9d6 +7bb7e76bf4c6bd97df3a311118ba3d8b +6bc8df57fc69dcc23001ff9facbde13a +4fe6415b5011cc6bb9a8e6ee707ffe17 +d3c897593370f265c7ef9cedf6ebe49a +dd231ec5ddb6210d21e45365a77262f0 +03ebd9b477d269df67e2cd08f65b163f +1b227f84396da4460c5a0c82f75fd1da +def4f6921d4176d717d977ca128ee1fe +6e0f9d5d405cb71ca62797bfa0561503 +be7976468e06bd8706376dced80cc03f +4b46e4e9c17da6090b06257a114e2a81 +107addb78a8fc78ba69575daf084c8df +dcc4cd7add8f9a1df4188fc0cf32577c +5eb39a63c9bf801bfd792f135d18635e +8b8ff5571f8ef9ddfde91ade70b1399d +c0d2861364381332da36035038dae91c +53773135c25ceee428aa57823f96b0ca +d45f3a281aee57bc31604d14172c1d6a +f11473f378336ab798cce9a16f59700e +2ad88b662265b39df85f5689611f93f1 +f0fdf9456985b89b4593cc8a97d25e02 +a3fddf1d91c9015a51ca012f75685106 +016b7f14fbe4d5fdaa6de305b0ccc72e +3b787b92e733fc5d796d5613095d3689 +430a48d960ef3d166cc6674318d34430 +f2a72e2fc6323eb7dbf2623633ffd42a +7b0b8f55dd7a23468b51efdc8b5be6ab +ccceb1a7f490519a8a0f4655673dca9d +8344243281d8791d37716a9fd4a28a86 +927859801cc15a4d44b06ec114e791d4 +8cc618918473efe9c6305008d617e00e +d182a416e23bcaecaa63e749629a7cf5 +b5cc8f0ae93ea4ae444d30b68bd04889 +54f87337526f34f04d84a13a8480d736 +c7b6d71fda2bca1510cfb37ce3ef4770 +f93137ccb3c03d75e67a68c4a6083970 +c081c934a0b572fe5af940b9b3a7ac95 +e3fc94748e672c8d2a875aaec190ced6 +3a8284d521b46bcb8eebb25a8c5f67b7 +c0e752533f16d3b0aa04157cf09cd44a +831fa6cf72631d933431bdafa679b94f +3bfeba5cc5b9063a5e0d33386187faa1 +f152c6ff80f7931ef3b2e78fefc36873 +2ddfce188ad69ffa6774ea8d0c2f0972 +d1dd566360a56997d8ec2d9b982bf14f +fd3230bbe5e3b48a9e023253dbeb0af6 +3303f0b5a809fd8dc806c4e1edd753ef +3fe59e55b395112223d984d449793b8c +fbc7eaed192c484f60fa5f6dde078f61 +be54c3216c196b7d7cd44adff5ec4b42 +9d9b6fa6ff7260e1e5bf5a0640510815 +c475b2de0ba884e1cf4b271fecdb42ff +0bcc235d059f9dbbc13b1dd102860c5d +f19fd53a32d7bd84614bf5aafe13bcc7 +8b3a355f4d28a7b4e0470ab1545abb7f +4577eb10609d00831602f3bbda0baaab +dc346b25252c248c7b9e32788ed12096 +8f7dcb5a4167bcc1429283b3fe3695de +f3b8b0d3fc4d2608416beba15432c889 +30a2737eeee6b9b5c46032a7fb6b3a13 +f5b56dd7c3144690125ddc6f7423cf63 +bbe95aca7d5b48d5140df2d3c86bdead +583df8e747de78a5c43ddde6e5a428f7 +06b1029ed359b6de920d23d56b153b99 +ecf3dd7e10834b95a5938a952916ae68 +969a050d8129c1550d6d6cf62422229f +a9b6eb341adf2d81d1933f293085b77f +8b8fa6081ed91bf8ede4f75a99b3b9c5 +318fcf95ec10c58bb1f81dd7d856648d +2ac22899638b73bd37b862b19af562ec +cd5b7a2be8e8cd08a76184f6cd244898 +46b75d305c054ed0ef3e8a147d49301d +b85a25d1821ccc593917add296849fc6 +2315e20139173573ce34e7cba2e380b6 +ec3cb10bfd83cf046a594ea119181434 +5d50ffd9b7fa7b472411de8be9fa9434 +1185b538940a527269eb93181e8d5754 +8e065b5e92064b27ed7d08201ede21a0 +dd33f68f485867c301a23a5b00371243 +3e26caa0a56bda7ed7c1465bb35b5308 +8ff2e68c10854d7468410a393f05ceca +574c68a2e80cf8e39adcd1605b93d3fe +e9a32940a4ecde19457643a83b24db10 +64f0e75bf0a3e295a05c6ad50c475f36 +271d30c6d0f6ab0b8f7cd40c0b64549d +aa5136bb1c0981747e29744f0fe20d47 +d4663117573af0aea7eeb6e5380dfffb +62e1cbae659fb430a5dfee42efc3602e +b87c62a427f0b40c3aec6e46684d2f71 +62f00c432fd8834fdc47e8b22bd0133a +7f917b7513f1c200a2eeed034675245a +12bc8b4ee3626863ebca7f11b6a48cdf +75f6e14bfb5d03c61e6bdea3ad226354 +dddd2b02fc7e52a20ceff2561faaf00a +d44beee5a984587c465f52d1a3041efd +a4b97688dc56b13a8294f0830963706c +22f57961726448bf3cc32c2b75c69c38 +42ee4a5d5af88e7a4252597562c87e94 +707a261dfb24f44250a59db313163068 +137c5dee952735c27f3c64b60cd65329 +2a5f268c0347fa0c6cf0f2253d044174 +3c016bbf3ff201005631a1ebab4a4a0f +fd3a7bd1a763383f001dc37645420dcf +81930ac20a7e7351709f36ca18935248 +ffef8a212820b73024faf0822220b9e9 +14b39db364d654034c7048e7328e1ced +5d0d261875778e38c2b8806c5dc3b936 +bcdfbbfa43696a968fc18eaecb5f1f8f +24e91ebbbe458fc3227c55ca09ebf5b8 +e34b0e5264a2f4e6bd947f609bc1c5ba +46670f252523f88b9d98749916c2bd9f +ea6267249939973404173e3306026f7f +bc796416e1d1d44eed23dc4e8c94f0ac +5fcb25f386d77450c6cabcb52d3f827b +887ae9921dcaa5c13d6697dd9defa830 +6b966c5dee2e22b9a6036485cc601596 +7a2880cfd6c48b0577e71d87a6b012fc +1b579b7e3c2efb956968a91589fd889c +f7d6de4cb9d64012c3003e0c7635d20e +c40b3a9ca87f0d52ee1114c86001eb70 +e4a684afb41360b2f4565eb29263f587 +e7daac23d75a0007b5ccc36210b6064f +fbddebde90098af52bea177e32b0d512 +8a9ae8f188e871a290e2186319581464 +44ff53af95214e14ba4ffcd5eed8e41c +b5e81f6d2781c5bf2ac1ade162eeb989 +3f783c0602d97686e563c864c0ad597e +40b2c217e915b5e852fcc7aa90810391 +f3d981428c5b5b4fa1a0d3a25282f8c9 +3b77bf3d42b32f99c21a1676197d4221 +219df77f8d80358eba79102ab6fdff66 +023cf0321d1a19a52b58408b08a5b5fc +f1c9bb46112a9a09a29d75a3edd307a7 +257e16fd6d1ce5cfaf3179573cbbd8a9 +2b72e0532701b5cf726bf8e88ee837dd +ad49ba5349412574b34cb7a96de082f3 +17256047f78a34c3903f9f496302ad54 +face68b410fb88139bcd5b42fe3b22c1 +b295bc9222fb686f102819a022e95533 +68266c37f47faff688438c5c5e6ec3a7 +5ef5a9e35149bfcae7352708e8cfee61 +da2a017e456f48f7c376b6fd7b9363b2 +e5cfdd3498a2650addb5af9d243d7782 +10f8b633242b8ff1ca53e51940b4c07f +817710b9f349345164fef05921920e51 +7151565394e97bb45757bdc020fee071 +aeacf5a19696751b3d6f55234e92ee3f +9a9d2ce7df4b74b09c1b65422b77f9d5 +3e1d517ac6dc7d25059927e6329248eb +95f7989891cfc839a50568bcf4c0f144 +e144038d2edfd35bb27ecc307dbe0252 +d41d8cd98f00b204e9800998ecf8427e +d959c85a309f913d8ce7333aa73087f7 +a4ac2cfc05d24d045da18b186d44a565 +c2d0a43e997d924a1e14590ec6b791de +8b72b028485b23a0e358c14803062928 +ecc1477e11e219f4998dd19c38dad515 +394938beed40dd7509793cc5312967bb +34f2309b44abbb39d225f0612ff03e53 +26048ca674d707c794bdfb5123b18405 +4480c16a017f57bc36d4a0453d4ac8f9 +90ecf5ca383d4fa69a948f01345123c6 +b570c3fb1f22cd42c50fae14493c83ce +28c215fea4552ebc30c7782e7eb19f1f +3a033a7f7ab54ffb4d729107e75ac187 +b4c7dbe202c7d1e2b8c6633b411eb86b +0e166c43a8e6487864457eb219caa2b8 +3bde1f3fb552fc1d6b15f50e99cdc079 +6a5d4f363b7fdb6cb600086f672655d3 +6ae93240d108765fdc5f0f778a668ff3 +e7e23817292df420e2507921cfdddf98 +d155f412f82c54f752021bf9b91a8a4b +c3cb29f59d052c97a047fc1e76b78e05 +f0cc478a884e02388d6330cd662ac015 +b0d829f71ff80a0f039e905673543ff3 +db9f4f9dc520e94f797ab24618dbbbff +4a4952db98c00a08e341228c9cea44b4 +dd598c104fac71d695959ee2007abfc4 +bec42d4764b59d3dbf071fba3fad9399 +b6a385df11f52d23ade84c8c43eef8a3 +02cde845b63214277128da6afa75c69e +f867a1ba0f250359fd5a2a06e3aecd6e +88c79025f8f7590bc6b4fc04d682b6c8 +43fea3216c31a54a85dbbea4bb32f860 +79aa09aa9acb50bef5a1408fb45ef778 +113ffceb04ee5a02d4040dc7caa9b6d2 +b67b8fe74ffb5e37edf9cdc6a226c980 +9af9510863a4bff704031ec7566178ef +7157fce8b6864949a10e83b0f5d69f0d +28729e781809279d6d679feb38b9c4f5 +a92bda9dcfd6751e0f6cf90a87312fff +cb3cbe11aa7ec694d44da849bb150fc9 +e732db84be3e51f7528639130fd25d6b +e1dcb9c29a764f4cf9b2d9767b633c0a +fec99e8664eb9a7a9bbf279596e5d59d +5408d447bf05c93318d8df8b3a24acf5 +0d440c05e8d66083b3492d4722b0ec49 +610871d04fa0eea35396eb85a96dbc5b +371bfcddcad76cd99dbf897b07196159 +91693a69317ce4d14c43d8ac297d9907 +1d5c5e373b714d90bc69f0bfc344f26e +9ff0fd591e105cbbcb60931cf21aa83b +e6c9f231c311cc52173edaba423940cb +c5bee3f3523daeb62f915af7a42ff3ec +3c10d7304d3ab6f37289bf9a321a9b86 +dc133bdf457374b4f64aa53f6770d5e5 +1e3863f99f4ac2a61009cea062cd76d2 +f8776402d972484cff0533cbfbac2615 +72db22840b2678dba363a49476a32dd8 +94e837baeb0fd71d69859565bf53c324 +aa8a4c70d6628330291294c688b4368c +f90fb644e5676537fd518af238e5b53d +01ae385b32316a293b12a24cef98aec7 +8147c04c4bdb1cd13c80f1c2ccd53fef +78aea2f4deb7a5ab076432bc27498b4f +e62ad3f1124858000b896cb81dc8022c +93a33a655890b2e0a2d1b2378a627310 +44ac1154767ea5f3812f7d9c20e5b2e2 +22469605ee7700caa5fac81a504071af +4332dbff0b486e4399ffe606bd01d96a +9acb4d933b7035ef3817c41a68398f6b +5b5e129ae6f6811c49765b006e57b0e6 +4cd8d85dd53a4f0cb755cb42c9e21fed +ada978ecd6b8d875f5666c331a16666b +85e4b7238ca7ef49d71b9d45c1ed4255 +ae3349ec74d4a76a814cea551e04880b +054038d11ecf38ad011d34b5f4ede979 +98dd62c349c117b9921f9cf65599139d +6315a018a672981a1032936ae2bf1e04 +5727db41fa9fd79080a90e3f8b64ff2a +79a03b2437ccec25abb84582a3951ef8 +e3150e995bc1d2f068cd7ae9010cb2c8 +cfd170af226147d800597f96da18bc48 +5593771169a202b88e1a9228c99951fb +d5feb8d16b73cb57b44db2c84611bcf7 +189727a05768f18c5d8218141ae0849b +77bf4ef7a275bb272b5c6e87beb2c11d +ca50a1b794e077cd1d52f9b0ccfda1da +737e2b5c2ec75bd9f73054c9b09872db +c23d91ee387e15c570f1b522f0ea6e0f +a2a8bde00c33e94cac9659ada3336fc2 +383fe980ef6f174c2079e5763d884423 +f15ec1e04ca3bb2b48fb2fefde3e1467 +38e6688ba3a0255f9ba134884948dfa8 +e4cc26b9dafb9aaaf809fc3853fd3c21 +da02ac481f0cb1249fdf26c1d21bdef0 +9e7ddabd3e8dc2ec5b169fe68f1c1f11 +cfe9e678a4a36844b029c5a69e3f4bb6 +6268533806269aa5de8cc2629361c3f0 +7787d176e6bee8c25a146193910d57f3 +fd5ffe08b3e30d35aae8d398ba484fef +ad08c3740e9d9e79f8659ef3f60b5255 +fa10c1929f1daa8cebc07802af57c3af +653f49d4ab59285b849463e49bec1479 +c49a162163905045ba9bd6221ac27fee +670b9c8b89e2f6887b91aa94f349d67b +4ecfbab7887ff2c806a05e7fb32739a3 +b96ce8bcac6a83ec4058e01da8524854 +9e58cc77d42564e86b08b9667c05eb5a +bcf2bd25fbcdc4f5e6c5ce42aff601bb +5d7ad2d52cad15301c45195991169c59 +b40077fdae1bf82b47aa1190569a8bc2 +23e228a8618b945f5f1ceee23a402ac7 +8bdab8e83081ed1dc75b957bff0e22e1 +f8cabd026dd1ce2cfe1961214113fa64 +40d7e199015a219695c793c210062cbd +0626bb19b28deb280937eca709e20fc3 +0ba98201041fafaf2c904df79a976ac1 +8992dcc64a645cfead9f1e0b17e3f5a5 +3f9a15829c33c1042f2fef0c2f3294f2 +752d2cf728930527a8a8899bafd5e546 +0d7d601375473adef0b9be12adc7c6f2 +6ceba6bebc589305b8a8c0324c3fc9c4 +3bc3a1e5e2573d7cf0c6c430fd8a9a4b +0598cebb1925418f8287a9bf1bbda994 +b8d6dd12ebce8929056fee66f233eca9 +b9b5ca2aaaa4032b8782e879a08bbfb1 +5afee565dbdd18af82b49052046eb34c +42050db8e2ac516ba9ab115d6dea1f63 +5fa14cb542b88a406f8f345add11243e +13d610a98e76eb788fd275dc3f289dd9 +1f74b1a919499b5a0893283e5813a758 +761050bf69bcd2e862516bcee4b66af5 +60c521addffc03d4bd65c88f64b98f14 +020ece14e25d21c415dd1df6dd45e0d6 +17996763192849834eea731cde7cf784 +b8a26988c0ab944b95ab841a39ebce2a +a2e67a66909edc63adbbc5953da85252 +203316834d4cc8a47b203009fdf3bee3 +9907b7acad69e6404607779cfd6e486e +9ad8b054be919babb73430ca37c44cce +b1dd94386e43688665d79b9efc6760a0 +def5709e40acc22cf8de0258937bcd22 +abdf944fe1bdc4d1dda0200b2c16f9c3 +f88b5484c8f5f330e07859e8e774b777 +83e3d26a2dabb6e75400e89c0df9db1d +9b6c7e8b544f1687774e9fbe2ad637e7 +f721ec406508eef96dd7a61c55158897 +284f1990f74aae22b578ddce6a2aebf5 +be9deb71ccb58fa606263db0b09d0885 +2a42430693dc774072232e65bdc243f0 +20e3d74223c2e33998ae95ef77207d7a +9a677b660c533b5eadf4fb2ebdd4976c +c58fba60d258c8bd19c9707a8d820aca +ef4dc2919e50d27350d03d147a0f8510 +758ef0bf1af93c820061f3170365f0c9 +2797a9a4bcda45912d73f48df0d23ad3 +bbdaa69ac9d974a94af49225b4240d6e +89ccee15c13515dd36e62d302e0f3cca +87b715c4baaa9aac82927a54a80b466f +ee0bf6bbe42e952e37e67d1b3fb62329 +9879b1cb6ac541379794bef06e6eda9b +d264d3f7066e8a628de5af40c626dae8 +410d8a298b6c9e492915eaa6d8e71e3a +0062a143bd9226248e81a850af05f2c7 +0a64894b356373b295bc734bbdf84e85 +2ef67236467f38fa17a13cc9ca3baa68 +5fe5733ebfc80a7b99c62aa6b6e2fc91 +dda943abd01141d6bb376c288ed158df +9a06df989606035c3647a1a89eb04ea7 +a6dbc89ee7fbf30dff0a289a88a7bc11 +91b57e6fb40556c735a7fdaa1174c6c1 +38d4e7dd7160a5f9c1c52885b389a758 +3f1c7bea79803d1e650e0298df089b8f +b8b09f7a4577d598bb42aa2fe8dbe631 +63a65ff7f216571a222b46732f18fdf5 +88c4ace4699bf1b5ef27c1a0d3c06b44 +6a97d4c4980e306e1de0224917678e46 +fb7428c91e23f81894189cf75b6813ed +b254bfbb0de9e4070d3b215f86243998 +4f8664451c2cebb274814ab27b66272e +165c30b5c06eea47bf2a8a0c211e0623 +54a1774c71b1a77faeb095777ec0aca9 +c31112a220e3873dab4286a9e7d85b23 +47140b6a4bb4a9f3bee2ccbc0f2c0497 +f6fba2b480cafa09fcfec83e4de7bc4a +041beebe432bb1e0e40df840c63bcaa4 +18e1e0ac009495629b57dc0315cb1147 +26ded3ce83f3ada2a3a2912fca15c76a +cc5025c93702a7df191bcffb2fb26ee9 +546cd59f9385693ec097594e8ec2b436 +c391eca607495c15f65b6bd985eb4664 +025311d62cfe0bf5229057efd6f98266 +163cad743c5cbf46cbbadbef79335ad7 +f310e6ab2e63bff6a0d2a28c06e5a2ac +329aa76700da47430a6f7d2734163b5c +4bfd4b739d6b082140fee30739fe62d5 +59c07aa8759f0fe76b7d0fa2d1693800 +ba2a64afce073003ad051c860326573f +74c882684aebc17ecef1b392de9558d1 +81c79a400a312a09d51cb13c0397ff99 +da469bff4f0a9ced4dd4578c4fab50e7 +24e479124b1919f8ae1b2d0e515cbd1c +2f9d9e7a1bb2a7537cb596411b0f2918 +d02d5b9c81aecc13ddd7c558f4bd12ab +028ef6ee5b02017e32c3400f17778f01 +b4740d741000d8fc68998cf9c49e10a9 +98eb3cf5fdb88b74ec9c933fa58793a5 +a5269574a1dbb3e6561b0806b7cb3c20 +c7505cd1be825f907abbd8db3e01d39e +13cc87629cfe7e4e4dd23623be6ded40 +27920bd6c76c37170f2492fed6421bf7 +73f986f04297c8d179914c1a7dc9a405 +73b4c4b43b0e421803453b1f0c4d9c98 +18ef3d0e189ea0921710b97fd6184aa7 +8dd54670e8f9b5517aaa7737fc9400bc +657ebcb3c2dbe49dee30d0545ed5c245 +6afced216dc53685a47755f79f06b7e9 +93f66dde36e6c2552343b73dc2c0182d +3f80b78d63f12ca5a478cca42cfe9c39 +f6378e861d24527135d332cbd6912bc9 +ed5ce0d31cd6e4fad99e7402969d98f0 +3148d4706d35989a3616ded3daaef933 +5867a43ca3df57f89c63cfa1f729dd1b +d7effa2e5629666328cfa1032729d81f +86471cc0a058419f457de4e45a596b97 +fbea43f7e520035a2433b050da0a0d05 +52768460b5082c79faa90e6a458d1fc8 +1225d9bb53ae88b28f053b6207310afb +e27df3b8ccbcbce48e721a08708342cf +de806db6f40a059132a65bf9b6e905ac +a63d925e08dd02621a3b801b1cd3adca +5181b5dd0e9991d83db7e6707d4ae930 +8f0c3e9221568b5f0d46f55fe39d24ec +fe8e905420713d222e77c7ca35cb0a87 +19fd126e9ed57bc44b97a3f1bf26ece5 +d2513afc5570bbecf83dc926fd262a8c +dad08e40fd33b959cfb919da66e428ec +3aaae26a2b60d70306e66e14add18186 +e516043b549128b2b516d34abda856d4 +bfcb3f35335f7f29edf7ef9d81450296 +300b872c446ec0207a4118546468ce77 +13c51a717b3b52a8ab2e3e8ae6eb7d6c +5cc33942cdc72a9f8730c877d0b8a9df +bb8e5448d92a06a013c47fd1b3d47552 +914d2aac89f505d46a60963da82849fe +aee9ea8604b7818797f94c5341b7cddb +cbb7a9929c7dee6a76db790078cae835 +01adfb4b31117023b41e7c9bbc1258ab +d30ac7dec26f8fc990a46f967d8484ee +7b1b6dc6b52c8989e88186e872500dc7 +c8bc42f83eb8f07b2a873372a40bbe65 +4dacd6fdbca2158bbfea458ab037764d +17eaf9aa97001e6d808e9eec05e5680b +df3431aea42f83771325eb687b1618cb +ee84be277d71ecb89a0649c85768a9d6 +853f82b9c7ab3044d4dfc810f60751f1 +d4224df7223bc39da117228be0f424da +1b03572eecee5ac0bca685cb0adc8ace +609424a5c3aaa78b7031fb158ee37059 +0e958c3497650f535c9b577f487ab1e1 +8bab8913aabbffd3a0d368dad085b7dd +fc142944caa722df2ce61d077cd499f6 +1fa87b7d4665a3385d4d45c61c72a58b +0916ca61df22c4889aa5ea91fe923adf +a9842c9ee02c4f3beabb8548e96699f6 +5a4989ecc8fc9ca65a4a9e14bbaec26f +298eda161a181449c6823dd0ab886ad6 +e6f83f495d3e97fca7e3318a10e97d90 +121d0d0d3bfd0e9933b565d3c2c76694 +742fe401a12fb8b84354b41235c3e51e +d86857b53e30cd820aa539d22897d6cf +828cdc5961c6058fb865582c2f337152 +ec70364182e30f417bd9fda4d0b48386 +31e14ebaa86e987d8acef404c692ce0c +a7192d17953b3ddd8b47d45dd2a1c00d +8fbb3e67f6a4897dae613659faff132b +8983545868718109f4754877c3b3a008 +ca3a8ace82f4a1803ebfacfb45afcc4a +7534ead7b7e60bcc2d2468dd84ee27e3 +0c0426eed150759001d7bc81e8996563 +8f23dc27eac11da067927a679e46de98 +1b1b0ac8127c055f86c3b2b20e821291 +8a42f8a59e37f95e245fcec7744be9c7 +cb375701abd743979be6a779be727869 +4be0a9426a783bdc2f66cd5669c24669 +72d1daf05343e30aaeae251f8b509c9c +43519e7f85521bedeb317d280aaa8a22 +986fcf8f6988941a13edc9f1da52469c +a0193ca0cb0fe3006f43dd25bf9feab3 +677b1e0dcce6943e391049cb4730ec1c +4ca9e5295a72bd4d53e6de963685e7b7 +272ec08d6052f83dc959c72a1995d019 +13b8a049f10744e96d5a3d7e4cbce37f +956afcce869e3a42ae36c02e728fe91d +3258bf5db3b835d0d5518813210b6273 +7aad7298ff64e2fe5b1eb01e97ccab29 +427c67a1e9bac4f1d76a206beea4e5a4 +7d580879af13cade4e9b0695dca06913 +a11d42380cbe85a5f2e376340fbaaf54 +50aada191e16eee2e94744ad99ae508b +0a12ead41c2b98cdb60bdf7262070657 +c110c16a61f9f8bc3745d80b4482007a +1b1864f878416b79e305f2d016a8fdff +a1867c2fc9ee89405f6323f144979a0e +47dbe36c54ca84cdeee0d66f0c83fd5e +62317c14f6443946512f1f7d34864182 +d41d8cd98f00b204e9800998ecf8427e +be706d7ee426258c1a2d0d93c9480826 +e0aaea997f7c85eaed48ef7b1ff1f0cd +eae69d8466dc0ea932e5b12cb7e33859 +4bdf90e39ee52ea0ac04ee42daf95ef1 +d41d8cd98f00b204e9800998ecf8427e +95d620c47df3e6f94ed5aee991fe6553 +ebd6712a8e97c2bde3924cd3adf08378 +297b9279e350dc5c3bc0aa47a3ae6917 +79fdc0b5ea0d68709b95a1871fb664cd +ef582c7075316268a183077137d1e0cd +d51d05e9f8770c2a35bc74b735716090 +6a288fa96b84f63988019ccac1dd8a9a +27679b19b943fb991fe1b4ee049c0c03 +01782c522ee3acd121abe9e03d577524 +160cdae6078be56bd86db8f57e5d63aa +0f2c80ed2977f60955563fbd48e71671 +dbc8dd362d0bcced13ae7d9798d7dbce +d15bb5e24ee911ee79ac5ba5f92c4d90 +958d54585f1a77b5ffd5c9a38bf0ab05 +0e5a2f2520772b562b06dedcb54b6dd5 +b37d02cef6b170c72fe2166c6573b402 +55bd123a417b0e68a2c1f010508127e0 +00ca19bc81d279d2475bde4a9f5c6355 +af2c601351df2ccfac9544319e90fd56 +c6150c835b78e933ce378000aea13faa +955945686f5ebaf87d96e0f10a4ac309 +381d09e9de818504c770f0228977a976 +be529c19d3fc336031a166768444534c +cea12a2a19944cdbd2346b2d3eff9847 +6a7a83c656f3e3ceeaded7d979027ba1 +3b73b67d52530d1b1ae25ee4aa468020 +2429fba670161305eb4b78082113fe44 +6db7f93421dc7d648bc871f2c7b24cb0 +5419b3e00bcf8e24db18b0dcf2a2b546 +fd2245ec3c4ed1dd4775c5f3eb9afd1a +59a22efd26c04187e9423bc37bea78e3 +5a932442675c1069f6fcb013db9e0a8d +bdc35af4bafe1c2ba55ae91783583085 +3fb9d130de159d6eb1eb5e09936066f3 +817723d4a852f647910a283649e3f09a +edb17316da6bd54496db7f3b590b9023 +83814c2d3d3538bfad02b71f212fcd9d +1511fa41526999d92f5cc9bf0f991fef +4b06bc74d08a68c5a2dd60469bfe0423 +317956baae0f92b36f618948db66a0cf +f33d170e0b976da30d50e1d4c9123e0e +2f960b93243216ed00c667641fd89783 +1ae9982f68aaeab92945342b0e2a1d28 +d1841e6ec9b1b606fd7462175ee239e9 +f61c2d0320ad1e4edfb58b51368f1d94 +4627688b15d9e0a752e7387b49a34d0d +5a0c7f590525afbc94a928b43967c300 +d041f557fcd8c945672c841b552af38d +b7da5f6c5dd18960d0f0fbd95499dd0a +c0c0f7f8d51d0b2f042594c712ad947c +85ff74c88eb3d544effbcc2c2fd9c354 +b8237e0a57e0d05119df3f9fa013c810 +e4ac30c3c8cc29b63442ae9ec06ebab1 +30145705ec0414e01e2e16c4943b9a60 +e52583d035a95e5bf75dd76bb083d845 +295af57065a78f73273633a3e83bbc38 +6d7dd270655aee5d431e9472edc01663 +08797704b553c2ab372e9f3d5142d7cc +3402474b9e0d60b30ac41e708d1abfc0 +57a9b30defe7d362717af354d227b5d9 +76adbc5b3227dc6fbd6c1db9d906ec36 +d9e71b36738e6ff746e4f68a37bbc836 +4408539d74a4f2e4ec0a055455dfec26 +38149d886c0761561ab6657c267683d3 +13d668c7e4fce5c8f3858eb58ce0a85c +1a82f6d48d2698e9412ee6465eae7cbf +cb1bfaec374614bdb39d991d1dc6e420 +bb26e440fca9fc893954395c5e257213 +2188f5c434dc2f19f9b87f2928ea26d3 +866997cd24398355b2023317f96353e8 +a454d77f89ab9455b9958b74df4e11c7 +4b36c6d28316d2ef767c583937584b16 +51f02993bec8b8e9927ea482c8b58caf +5eff22bbfbe421856d01dc9ee3a5ef18 +cc419ddfc7f443bfe1dd920057e116d7 +67b7588558f9fd67ce4a8cad97fd54de +a9f8e340af334e57c264e4b47c4258b8 +47260ce95f1a915abe2ecc87d130ade4 +ac63d849ff6716352cc4b54153bd2273 +1551d54d8026b2d00a97171881f03ed5 +7de7557c2a97d6f407f55585b2626bfe +c8e178c17b047cb6d65e28a1acb4b0fe +6452c6ce111382edf8042305c599003d +d1e16c963bc95ecadbe66e1228a96016 +a704447dca0dff5e61c0e46fb62e6fdf +29a9f378941de5b25d88b856261af128 +d602adf9e9f9299c03a0c9c2bb6159dc +3f11b431bbf38983b6a35f4a22975fae +391277c4bc03c0481ad485bec7c039f4 +71322d0061c0cc5ee1270681f3bc2e2b +0ea6c25b1ef6b7eb440ea60d4dcff404 +8e027f56c06ed9e43465d70c56c86435 +08d0f4b49030b7633dfe2f1eb7103a21 +3862a463e5e47178a88d188f4ef90d7c +691b52ab475604eb2b186fbdf7cdb09a +2cbe23603f30a41dd1d2aaffe4377f14 +af88e10ec6e763cdbd59ae81fc15e1bc +3c445fd1a12a7b2fb8771f983a3dc4ee +d1e84f1d20292803b295d65d35616e7a +90f4a325b82e96ff72949e1946b46f3b +4819aadbed8aad5cb1040a8d3e3b85e6 +bf197c16b7a153dc83c10d19f89ddf08 +325b2d4e00b546f19f7f666639f89f77 +45d712bb089bcea4cb39978ba3a19a46 +4b3c8b0c18aa54ba53f498a0d8573eab +e5f79219cb635e70f1717c5215edf4cd +b9e587dc11a4df3ea6bc639e917650fc +7adabebe524c2be0980e6a5321e77d28 +2cb48e5707389b2e664c15685d6a96ce +fc55d0ca4b689182d87ce7fd433c57e1 +03ff4bc3eac84c0b5ea61d449b740dc5 +523d87f0f79a63c969ed41300f280983 +b6a0edcff378d091c81414766969252a +731e1671a8fe9d33f5d7b057ece0faf3 +aa3d8a70f235e13e711a53f041b5baa9 +207ae0ed245b1ead4f70c09c825be993 +fe58bfffb092653ad9d17d977ae69e25 +9c991bed0148b50135a9e05e92a1646a +c904797d8ad4e2fbdec579c88b9f25be +a50a2c4346324dd019ae6ef3a7f74b75 +f60f34010a480a8b43f2d5c9ebabe81d +6b11c4599d7ceaaf4cbb4555539c7be4 +5075bc94b4d9d064d27f9573037f0e49 +a479d484e0ed9ee4d3b777d22890134a +42ec348597bfca3af8dea159dfa1fcb9 +2fb5b54921b2282b5b9319e6bd495f55 +914a033878015c07bfc1ce15ce1b366a +ac2140f976eb07579f2dcc63e27aa5fe +116c46e663400818e6249568e19775bc +81af1b533c64ccd744cb2118478392d0 +7da90c32c7ecfbbc4d2e44785a3a267e +83e4cf38addcc54325a295abb568739c +e40ee7b4106f6fb41cccb3fa9dcec130 +05652aa24e2f0d5f24fecf441dd81906 +54ec44a2dc1a6746f920e517196f3617 +1ad823491f98cb99e391c8a5ee073276 +9e6de6ffcf70d830b046c718ae136470 +3d7449283559ffff7740fdfdb9e689d4 +9604b4fbbf2dc5325524e0df8d3e1941 +aae90502c49a1bf38bb3c4b38e14e3cb +2c5aa2698158d343c67b6e513f05db25 +64af7d958138c1c3708586ebb531ccb6 +83b6085f803e0f57bd0ce5ecd1404762 +b6e11592cfaf1c1f4be598c3076ce88e +7324ed4aa05a3471eadaba3b7438447c +ab742edb10ff2c3cbc83d2c6d4538637 +54723755a49d373316dcc919bf24c4f6 +6cec8cf410bd0e7045cf924a9edcdab7 +823af8222b1c23abca6d5132a0af90ab +c27144ad5be27c28a3bb5871c3475092 +572a18156f9a88a49456edf3bc3ac277 +c79dbc5120a4b87145493859c0aa9c6c +4c92906d91e4c504a3d1f757dbbd46e2 +c362be0acf8ec2ebeb121b4ae8aa615a +fce704f40a7895e6a1d7d9c4d44f5ae5 +f43cff3887f0f4a3f4993478f67ff8c2 +d841e3a96834b9f17e99b37d7dc7c9ba +139a458453c2c7c4d528ea7b4ce80682 +574ccae2241c47c4bc6f16052fd3b27d +180c381edb2580277852d5ed139f2481 +721054d1008f0218531ec90eb2dc359c +b99ebdc3a59a455cc0123df30803ac76 +6b635d722a90a5a4047427394f1d04d6 +c91fe693bd71d6afcfc234e03e51e4c3 +a9fdc981adedda498f375197dd384e16 +335d044120bd2e5de0cd42e38e36898c +696f8f723a4224b9f6d944717133b153 +f241c5c11de8d6b1b5b402ef7ca12a73 +467f7fdc6defc148033206749e5a4f90 +010fdaebfb75355d7fb0383e1ec47a1d +b4715bd9f0298b029b00d86e96e7ffe1 +4d2921d8c497f5456a7a397950292045 +e3fe198dc8ad7b30dbecc2c68740ede0 +8e37e9c21cfb1067a664c0da198901bc +956815a587ee7902db5b6a0572659255 +0c0efcac31149033a9879b4fe4c9f72e +c0a49e728b0871fd66d7e91a61e3a985 +0480ce6e86fa76657d0eaa2b0fc7a5b1 +4551e5ed099c081509c9e5c90326a39e +ab02ce00f110bf5d31f21ed52bd6645d +0f80069fe9e767d6556df3b7c3238d68 +414e15eed4f26eee9b9a9aa78245bec7 +2686ad745217f8cfe29f4675d3246114 +6937b41c92636a9947b143ee6494a0df +ae54aa6ac3af741ced23dc1af42d3274 +24bcf0cf697447579f72331fc018cd0b +a04cc329e903d437d61c976d95ec70a0 +3dbf66dd47268de96da5f48b70acb95e +cd8f4acda3ce871e9358c96bdbb2bd5b +15f2054134ee3f97ee798f6de1405549 +53c172aaca94b39211423d2948962e6e +14ab8d01473e74f72f734c1c5c3798fe +4b7c31f79bef4b4e45d2a50bf21abac8 +b47356b219ecb00fef7cdc3c46bec2a1 +c3e74088a21dc0a097de0b09e6dded50 +0828d74ffb4e9eb28908723cedd0fa58 +343005ac77ee718c1eb7ce3967071765 +bb103acec3cc7518f38f1dfd236c3b6a +ef18ca317ec1be47108704ad26a9ddb8 +6209dfd54e637e1e99cedf909f8819a7 +d796664f3d5f561ffcbdb4f5a09d88f2 +6d79d5024bbc51476a883bc23ef32167 +94b7e975922bcf50fac0a0af6575c0c8 +1e9400f9c5c7ab55df1ddd1ec252b17f +88222b45f50daeef93a762cfe54541ea +25f0cdde9efa9ec0600d84c3a2793626 +5b54c5bea0484de0cba3a49d3ba35453 +80e83f78e09f1e4755a25f976e6333ce +96620790726200f951ede48d59d091bd +844724bd8d458cd6c3a571493a29f27a +fda9cd76be2685b8307d5c3bca152b90 +f8c6f79ad80862dd3e19aeb2344b23b3 +0d040f9c1e477f09f928070d0ee266c3 +869cd2171413c5a2dc21947d039b1976 +cb81d7bef17d96a50243444e36077c74 +8c3cc78e9e40a08d61fb817c87105c4a +362cf6d658c0395dfc420f8dc4fa8d22 +abb1c8bb7bc7691902335626d74cac2b +70dc9a9eb64d5f9e6007503f0c21003f +d1453aaa5b05b7f314af63ad75bdfd99 +ed490c6515478c8254612eca47006d56 +a0c45a04fde40dd2d9c01e5aab30b526 +61d08e5ad4bdc750e07eb29c01078f98 +48cc57f026943ac7a1e8f077183decbd +fe7df947f2619d30344e96a4f6f0c87d +113fae229642098dca4c638127ada743 +b62d517db57faa729c5a941c9762ba1d +9480de0ca7ca4a80abe1102ce2f610bb +2add5cc06a6eb5653bfa0c23222ed632 +fdc777174287fa385228449930cfe862 +06152ea9146ac173be206e742db77c34 +9cd10bbf8d2277d1d35128c121b31c8e +9e9a07c2d6e73be51467d8cdbd2c75a8 +69abd4ab7acc4cbbf2b297e2c3708742 +8f5049c4e85628cd8f1682ebf34f1e16 +650e314a6ee395c16d310fb5a3c8893a +3b9c866548e9141e2f6fafdcc765b83a +c5af4993a8630b89f527e01b3e339efb +02fed860a5c4cbafa5396d20186a62bf +42ae6a11e2e0cb2638785f7662f749c5 +ccdce7ae45d89471eb9ddb8b26b74823 +34b3d5baa10287dc6e1eac51233f31fe +f1a506dcee3290487be34248ab74ba6d +a32b7cad35044154938cbc5183288e38 +0e0e86e38da0a428fc27e0d39f889ef5 +10052e8cc3503aec16b0473c3110e1aa +7c64fbf37f23e5117cef47178abe149d +9c7993c92b001930668adc8495f6d188 +c9407344ce4137fef8de66e62f3f719c +656db0c9220e92e5ae17ae73a955dee0 +e2ef15a69a5de6406f080f2d3697e212 +370894d9d20ec96a54efeb57199147fd +c15488d9303fa2b87b44ae93825ac38a +840acbb3c7a042a5f56a0423cd330fd5 +4eba0366eae673ea84482cc8cb7ee4b1 +4ccd856a39fb2f601b0822a23517dc57 +db28235707452091300c5c9ec3456062 +a51c9c810fc48d111ec457142b041801 +1ce3bb37b4d07fc1ef549bffa370325c +ca8d1fdfd18e06e40036776e705bde58 +50bfe6d5f7a2f88c1517ae9154824baa +c2ddaad71b84905e4362383414f60908 +20a9f7dc9f2c8b5d51fb695dc0c74a93 +d4eb101c20feb0d2541a9f2976b91fb6 +a5bb0a6781e2644e2560245297a7f6b6 +7fcbee4f03b09f995273f2afea158bf5 +9c926263d35fe49e9863e2de8763eab8 +e5d7396bf7eeb12b3d4a1dd583494c8e +4271cd9ffb5aca4e7d12596ff2815f97 +6ab0c762f3856194529c3086ec505719 +daf8ffcede5195012c1ab8c21855ec37 +d587b5e363c5fe13cbc782b8a433f374 +173c18ec2b71496b07a7580cce48ec58 +cd930f987158e0ead977fd2e1db461bf +38600cfb8b0562241a3fce98b1386b65 +5ac6e4283ec1960e5ea69d830e97c21b +8bd949d9a26c85e1cb3a59caf8403138 +640960549e2e5e52df9fd6210f880658 +70fb539819a5d28829dc75d384adb8a7 +a695e0f6401f391d3c86ddeebfe44244 +4d3f29345633e277688657246104301c +daa45899c7344081623db53d330f4bfd +e053bc37b236978c0fdd25c3cda3bf35 +cd74d515f333b93cde34015ce065c596 +ccd12f5d8c679ac6d0403bcf2a3f5abb +2667ecd0bebabfffd80413fdea70c140 +74bf3d6198633de5514d1daa1584b405 +0e2558f68606ba5a27a757636077154c +fcf197acbe61b80f9f54fc2381e9a4b5 +834b1abff4edfec6d797b36911b7a50c +acdf7000f89471915e384a7d66a8fb54 +ad7b14947d8eb10eb48e7a0dbf4c9c0d +31c6bf11f59f603a28005113319bb7fa +96f6c1604c51ecfb03883a87895a23d0 +9684b86c622dc78c627ed07a12abb55b +8a4f3c660ae05b0c63a732530e3b36d2 +a45dd32389bc6e025859742aef855256 +f218f8ebcf07228f9a274253d100e6c5 +47f490dabc2761cb2e48cd75527072be +e39fdc0c9aac34c2150f7ff966bff5fa +382c02c722a319435b1b87bf45793c69 +9f2508f194b974d7348b1bc98335955a +c362f4c16ea42f189f3bd4be7facfd8c +21b3119667fc79ae19abf0b23fe2eab3 +e6834440fefa21652d98abe9b900d495 +ef386116122b5020d6e2649aa137359d +9e9d682bd46e1fbaf5cb75977f1e7aab +9a415f1bdaac1d194e7e8c0247ddc6bb +872d9c36ce5b3345d41b703fcc9ed5a8 +d592daf19ed6e7fc614ff45c215ab940 +0d512a953ab122f179f7a19d0c6c4bbe +cccfe2d7f54f5a402e33917ce73cc5b3 +94d8b557d5b8ee181e9ab8cd3d0260ed +e437afc23af3df36ce8ecb0d73b6b0f8 +afda706fb885bb5260925943d4942643 +d0d133438b09c0022ea604e9c76bbf62 +357ea46d4613bbc043a445ed698b16b7 +86015e7589d3802cea9444306cb7d1f1 +ce9a00ab3470802171abe58d4ba8ad68 +df1fcbb320ee9d7780ce3e2f2df53495 +bee65b4ebe85317ad75f321c9c112a84 +04a6fa4d7b183ccefe3c102838d88fe0 +3b3d668187d23db02cc2b19bf264ba10 +c5bab8de3c2b48c623c7130c38286d18 +ab9e01a0d24e56337e9383ef61169a96 +c608738cde90ed463c3b0e4e81611f05 +d1de09eebfc83a5b161b7b933a86e9c7 +d659c15c81c1d886e8ca7effce594fad +23e7bdb66d509cb66bee53495b284782 +4cbc471859014c0c255f57ee38ae53df +4b4b93efbd4dfbf6cf4b376f845431ae +8409866e35c7d0d402c359030c42cda5 +51f3b94e1d052d860bc0760cd503dc8c +5010762b4900b381a440ba94c29fbf7d +9a8b31ebe105d177336c930eef13c548 +aba363ab4a1728ca88d01e38917dbe95 +82c4bcd4f7dc564b02356655beb22046 +968ed146e407d19fcbb0e0a7e43937e7 +a4d21022e4a7d536fd0bae156029d054 +b75888adee7e42222ac580ddb5e3b504 +41a72d69d8ece3575c5524ffd43cfd10 +5296d988322357f7be2c6e73bd1d21b3 +55b751bc23cce0b97d51cb0bb7ae8e90 +4c25f84228a2aa19c0e257fada92e076 +6cf0bbcdc028e414db3e1ca8424ce99a +636ad9df3f435df7db29bdf59ca54869 +05de108c7dd546e3db8c9cc01b2d92e3 +5849497f4991860960db2e870316b7c0 +80dd7c666a8ccc3fdccedc75670eed6e +7555d8cd4bab1340380555158f6443c8 +33eff2755a38bf2a046cf0ed7848dee1 +fdb572201ae17665a3f20054f3338e0f +96526e2306cd9a464f9e7f1e789602a1 +a9bee1c9208c5ee48d1bc8496c91565f +a3657858fb9f40fda9ede357b739fb4d +c1758a4bb8c3c90dd7db6b67bcaed827 +84a7950bbd6d8e7b1ab6449c1ad9f763 +d41d8cd98f00b204e9800998ecf8427e +479191d89d0ec2a048b5a6c08a3805bd +f4fdaeea77d8232402e6a5380e653723 +c7d6462086cfaf3a1a37ff4c74c8ecf3 +7417700dfa23ef81a00e45ca28064945 +74769e1d252f72cb7771e343dca761cf +75bf97489dfaa915e345a23cb513ac6b +f4b8ddaa1bd2babb8eed7bdf7bcc821c +d41d8cd98f00b204e9800998ecf8427e +f1060f076d8494d8fe2f52535ffb828b +6f7cb8276a2babf56404bf1c359702d6 +89bfd59f41413116bf39b263135726af +4b378477c8af74232ba691d3369be558 +5c713edb2e0f8d713b96c4ed9e3df317 +0d3897c2c142abd5d47ca3865b4b6caf +6c1ba6af85e1efdf32a994be8f676ee6 +1102fb85ff10fe3549cddaf39fc39cfa +2f500664cefb0f69124a738f27e971e3 +84b912ff8a5372f712d538d242e77691 +1c8cfeadf3b4a2840f2f6800f6a996b5 +6127b8cf899db470f2e17543a36aa0da +aa50447c4e7088ef36df1844019f912c +8bd5aea4bc8068a81795ef641594ba01 +abe3d5633eb5c6c881fbd7adaa8912c9 +5d46c2bdf943416af31c6650c28a6fc7 +26fd597e6bf4f1abf726700295ab56cf +ba42cd2e0e4f8fe5080b4946ede71006 +89f361fa7a234ee85df88bfa494aa875 +107e719bb11582124f0b706f88accfef +7606ef8e75cc1708c98495bbef242695 +cb33fa3ea9fdeadf7c7ef0bdbaa06567 +a2eea2d156e3ddc802fb185d4a4d7a22 +03e3954d48f152b86e269ec706a90cba +5fcf57801e2b4b256ff248240b3b9c34 +edf1c871e0ed341d24a420708a6f98cd +a25f05a633a97741ae0ccade75c4dcb1 +70383aaf5bbdb8168106755bfb5fcf5f +b77bf0e97960c5fa659b2b11d7d8d3e8 +e0c3b5e5ea1a13d06c131f1ac98ea9b3 +870a58ae9b9f6862d7a4bbb623be915d +8179d234356325be264d3bf596411573 +62583b5f61f7705f39b11d2c9d79ccb6 +8c56d650c0bc6216fb7414149e591c9b +3824bc31194e9317c08a7d733e38857a +b29c72a17ae06486ce649798c89ab266 +8d93cc15f77ad3fa0d40e61599cf881a +e98b305c9c18620ee538263538417067 +2def001d72b3d7d1711b33522d45d9eb +785874b14c12bbdb7a8637f5004ca86d +5bb14329e2cf7112d93d7f89990de8bc +d827e5d1f651c9155b89e41a51d90406 +002df920258723c82e2c6bf22854b780 +08e5ce8ed1bcbec7c4a5cd4529b6b7a1 +4239519c2757bfe036a68cc4d9670478 +4defa5b38e5375c578d7e1561d58e563 +b7bdbfd3db32134ef4f16417ace87494 +2eccb54525abb7d19302bf7b079e68f9 +f24623dcfd27016b5ed7ea2e9adeff7b +023dd2b9e718e11dc258e69de09c591f +18a120736b28d8ce7a612bacbdcc099d +c4a311fd37327c1bbc222f827f0f1c1c +4453edbb9c8cd8634aa785057f170856 +6b06911a64f95d5f9b49ceb70c0eb71d +662543f34d49c3d6d4d7ce296e668526 +9344b9ecb3ce8709361b7a0180903036 +b4f233b116572a74cb0d90ee93ebbb0f +40790b9972ecff045591a0bd869a3ed8 +84c435d023351705bf97d8d8656d6be0 +86600c80e714099fb1f1a799876c5a8e +cb28d9951810a183ebfa03357068220b +3ee47cddaff0d6e0ad43fbaaedcc15a9 +f5b294a8682676315133d5549518c515 +53c8bf11f19bd4084c25fb18fea75a59 +3648e41b5162dc5255f2a7ffd220e429 +5a3842bf510531c210c96e7f183ed15a +ba5e2b6e1e36b1c4c15ec59695fa46d4 +f0f0589c9b81db057f1da95395b6100f +1683a562fd34464932e174a70193a00a +ff7aa2e8eb5c4bf6399697aea0276ecf +4c0c23910a31793e2876d4953eb143e4 +287d6db738307e38fdf1e117ad14a474 +1d5865023e1da201bd43543a4f69d58f +b253a638f2a49106ee2c9690ae505445 +f31b884d359cc7077237b4c7cb47aba4 +443056c2b77fc7725259a7a87ffb37d2 +c4d2257083430bf299378bfe42586cc7 +c135eff5619c63e5accdf3a47035d952 +d6472d24f6726ecb5cc846a652b2a4cd +11bfb13aaf692ac023306823fc376d51 +840f1e396da7ba1cf01bad4ef7baa3a7 +e233d88455de55fa92b6cd77d410f36e +6334414d5f93ed6baf8c3cf8e4bc23c7 +e498a60b11e9c8c5465b51e437256aa2 +828978a2a425c23b4e5bd6696966a351 +ec3be355be12c5429e8e33d74191f20b +66a3682f3c90dbc136270991373e6123 +f88bc6eafd126228df852d30527fff31 +e6d10ef8df1ab4905ad046b49f131078 +32d09306c201297dd17e8b37eb32e5a9 +59dbff02992854b402ed0db653fd3277 +779575bc926cdd886a6a283419ee3c41 +5bfdb6a2a02ce14733b86bc4c1760d37 +608ca8c9b7c003ba766666c2b197eb30 +2c7a2416e613ff2f007211100e2a3095 +c8dc4bc9d788f47d977c0e5030b4943c +39b3012dca6b21ba02e9bd9b4c3e1e61 +0f336d244a0bf4cb42d7e15bbc6ad7bc +5d43a68789423bbee418831869e42edc +8a4421444caa950fc5e2ba451544fc29 +85c474435803a508609ac0a3e5a44d99 +3a4fe741e293fda320045d8e3763066e +519524a4f401a47b2a1cd92a9ea45db2 +ac15367a30f9f9f8f6495bb449b04666 +cdcc6e5f69307a3b27662395b992d5b8 +0c026895fb04b0d74d50c2d22f7d8356 +7b99d5ba87da05f56150057a2d5b1a74 +6a02ef195316963f8717b0628122a426 +ba385fa2a374eec583bb568de8064833 +b076cae7d697b132e324f675f6c48765 +9b9889e0ebc7afe9f23a2886764cfedc +4b32da81eff0ead40ae0f986ca48361c +b58c75b5ccc54024fb6706377a871916 +e1d1ff568671eeaba9757f1b3e531397 +afa48389ff69fb1c26ca79f07eaf0084 +6fe01a29930d1ba9533ea9c43c485043 +9b57102849560a0d3a61d54eeeb0a1e6 +a57194eb4484e3278031ed072bd27e6a +77ca618a7f2e10ebc809f90715932775 +c63973bb6cff6d8a59ce4989e3b897dc +cf910dfe0be23320a87fc46cb844f8fe +4fdee135fda2126623c2ab4f19ceb05f +c4e4b22c9956532b8899b643d3ed5475 +b76428383bc99825efdc5aac936bac83 +0f2c9fad35fea3a02a0f2b2c74988842 +9ceb9641436e0faf1d0e962ba1f4d3e2 +37f0d72d5ac6745bff1ad9ad19f251f2 +3e78d04f04be5df3ee5774555c9d45f6 +6f38888d5ac513b19875dfdd045023eb +ecf473a36595a415409ba20194d8c170 +2a0ecc57b9b4674fb98d0925a0711d1f +eea9624a25557bd5e13a22bfdd7dd742 +0a58a0beaa2b49ef4ffc2a68ebaee683 +42437d312dd7a68da6d3e623ee01b2af +79a7aa468ec835523bc8d8e2da64274f +4e2c56c5e17aec6e5d4f26053143a3fd +98927ecb272004c1f45eefc1e6698264 +3a3cb9ccffdc235cac1582d6ec8088fb +cccda001e52e372cfd8357504fd231f0 +a359c6c648781bc8b669c729c0ea9532 +2b507e51c20520744412b97a5c167ce6 +3dab09ce4db82219f59322c8068597e5 +2e569ba0e7c35e23a9a2b62377b70d30 +447ed3eafa19625475120f0ceb526883 +017895177cbaeb6441205584f7018e7e +779564082718e3fd92a32069bbd12c06 +271da3bfe787590fea6ee99090d6f9a5 +0bb0fc5df07da3663d418e2a8bb13f5c +a81ebb1389c5f75f150a07e59bd2a27a +73f347b4c2b8e218504e1f37ba8451a8 +74a2de90012ea85e082b5f08955a75bd +becae1d1d9ee277ea134e3f6d98b7e5d +e3c5937281c74058ab9c7430b0c5b61e +c15cdff210d8250aacdc6747c4823dd8 +1315728a52d23d0b9b9719bae34ab8bd +8197e78288ef662089e23a3c19988a92 +9bd5642372cef5d3d915b32eccf59edd +6705fab5575bd4b8ed5fffda6304220c +54a2407df47cef13f7c6879ff7b88468 +d41d8cd98f00b204e9800998ecf8427e +757d9b432cc4b31d284f0ba9e1918c2e +6d0130ac751d8b55cf9853410d8a76d4 +4d99b5c21bf8759f2ec78cdf0742ac68 +9c2265c54e15d6f127d269d13c02c98d +68f9ca9a08b33ca85910c4d0d955aba2 +94bac64a9ded743fb00abfd9b9517db3 +cd1f4293f589aebf2cea5c39dd2a48e8 +3f1a91f5426d555bd71396f70f4be656 +833b17b1674efb04684ab17cf11ae215 +55b1e3e0284ad230cbf9eee634fb3f1d +99a8752336092bec824ee7ec2640a9c2 +158bf81b0f935984e7a05a94285c8589 +0249125fa9c9784fe591d89bf3d61908 +a529b033c7ae3eb41d42de5d53f76058 +3072524fbafe110d46e24bff3085708a +a7274b38aed0da9bc3223067132fcec2 +8340baa99f55b5525b0ee59936b2d01b +184cf578257d9aa934afa4098e7f7d7d +70353f77c95f25b7d3e66f2d97b10623 +6f18b9e3f1b95d359dff94a96fd8ad97 +c1c13c7d93ffd5d500cd7aa3dbd306b8 +b0b6c072c8f4b868238b20d167322e53 +e36131e5df6d3cacfbf60e144f00e907 +759361f3f4a18443d719cc27d593267a +e86849c168691fd865881538f6c90c04 +fc94234d28256700857388213f927c12 +8f5835ada9526445e98b4a79e027592a +14a0965e2085ca3b38bcfea33325a6dc +d0573072831b02be0b2ee32acbfa82d4 +d7e3693a24ce0c174d653100373f358e +664f42a44ba55d298f0d5b90c6aa9d75 +373666d0a4e7b040e6b1fdf9e5d77cab +45305c86c110b07b7634a89eb9884028 +ee160377752f5c42f391246a473984ca +45fa76b5dda321c5ff12d20e142b0286 +116b04e908df9c2ce65f1f9a3f68ae2b +a826d5d048ba3e312a937feee812f441 +38f975217a1a0d1a8aaaa527bd6176be +b2740f8d0195ef25dc5b10ba946bb4b1 +5f5282121bb806547994cd392d3a3d02 +da52510a6dd0437860efc9bef64c859e +279bd310c02fb195c2bddd936af3c6ba +139cda641e95056a296b9c30a90851af +8c498f88eacfe0298d7257461829d5da +d92a757856d946607bd4cdbb95183a43 +c1b49086a71fa8e4ce1b4b479af1c62c +674996585877ba3959b2927b3cc69871 +b45a4b629b1713871744d9ce10a72007 +dfb9057664a234ef370c001e5db38593 +cc188bdb21516d6c45df666a6716595d +6f53b4f3686168377bf5e67c1a29e356 +4f44922d87763192d9d449c8f31c298f +4220ee17832e8df40538bdafbbaf7a93 +2a45a8f98c8e69ca5ace62898c785f1a +17aae32fb8ac7900b9f1e721902d2363 +cba9dd412ddee11506d5052ebdff2300 +b282411548fa7dce1319734524e32ee0 +67dbb26870b9d022cfd248ef30c5341e +753d666e38b6517e30ec3ec7b4d43269 +bdc6dcf1d96ad0f9084a00ea716473e7 +d45527a8f7a8cbf6880144255f2d7442 +c2e69cd32d94ec17c79e3a9bd30c2e72 +14c7e6ef9de7f4421da3b51a0aa4d88b +55e52c0ba6c9e96219c8ac2e58bdab56 +2dbb85d34324fc60c77b9f13356e304e +4ddb4a24981463e3daef7e5c7e0cd0e5 +d649986670d18b01ba24dfb57ca38bcb +44c93678331019b3abb5a28b563558d1 +4167cffaf2008e2b9ed3fd2789fd031b +87356505134a00da7dc1c24ef128fc42 +7922f52af57e3934e77b458b167769c8 +ee2cbd530738e9f6e7d29e9d85aca09f +a0d13e78759433fa31cb78a12031299d +629c2a3a93c965c46722fb541ba36bf4 +869b84304f84a7c681884b475ea9c2d8 +1476870d2e20d5196213aa1add995c33 +f68027bfa07e535a3248726917f10b93 +cd8509c1631d64763d6ea2c3eb208e5d +42f5deda5500d4e8ac7f0ed4f52f12be +c1003c5d63bff748f9b99e815e709071 +841346eea81e8f15af79d00e0e51e8fd +bbe21f040c35ba0ce78a11a1ba51dabc +4158b6f68874d7fdecadc6c9c6802888 +c45a57660d9718648e9c276f26e2e24b +45185089743aba8ea447e29e17134b06 +00a3feacbfad8e495de8c198ab7ee2c8 +1c9650d1c846985630a111a4acb0fe0c +58b691e142354f389414c5821e91fb03 +70d1aea9f9ee46032eabcf3c031a9850 +e974a17fe7cd23f6cfb240011733afbf +da8432bd4a41add2535cf0ca7d795649 +0e326a1d0fd11d3529250e7091916486 +75c0d1d0e521effddf700bebed125485 +4bafaf99a44787b68c515000d65cf11d +270150e870b329b7a89b5999495749ed +9a7cd3ab86fba3293c0b7faeb5ce96db +c826fae6659cdd8d92cdc914d384517c +27c6c0cea4702e8ea02891cf0c709226 +2af7ff4a61f1d14ced8061186c936835 +754f68e373086de1d735e0d6d5269f2e +b77e4120b0390c02af58c6e338022ac1 +9485ea666a090bc27c58b6d57ae6a25f +edfaeb668444a2d013848f916f915671 +b631b5761aa77860113844c1cdb339e5 +bd5bba39473edffdd5212b4ed81934cc +2f5d48238886c68fe5251d9361510013 +5365f3cd68f5f2da96e4e3bfb6b05878 +ea9f07b7c8e63ceb4ff1cb7ce3f0d50e +60be5111874ed17f0f8c851c3527413d +1bba32cc48e5b45cd7262798863b5d50 +a877a91145cb097ede194c1c1abae3ba +4ec98d42b504d8a6206cf635ae7cfd36 +3fae788a1df09523f1ac850ecd6d2f50 +af3c193b275e12e6531759384c6578d0 +27c90e412f4dcd812099bad8f8b8248f +37dcc5c8d5de8aee72d101148a1c1a6f +ae5141378821390cef63c1910a9cc2ac +4bc8a6979b90d851eb40b4519471032c +b1d5a1ca4a0e4c6a38a623fa19eda5f7 +7b89a0216bb05d54a000d90f6eaa4504 +e5bed5fb7c9c1bc9df7a471b8bedcdc6 +d393564d4827019130526a65012c66d7 +449add3972b0b1940648220fbe380e46 +2071d1143e37f66e24b286cc7ed2b3f2 +b8fc5b7bd9d66b12af265ebc4a55e18b +e36c303a9371b779b5e873c060baabb6 +929ac5fcc0e0146e866e634f77fb9bcb +ddcd0373a9fae0e7ea1cd0f2902d369c +4e2bafe5a7db3d8db69ed4814c18a7a3 +014c440a5c9cee34d8ad3cb3bc223a25 +4dbd15cc29b0a1d5a988239e1518c88f +5c717f6fd8de2d2d2771d145eb1cd738 +f4d4755bbf48e9fe75093dfbc50deae4 +769cc0958e9469173cc102d3c8408c69 +d41d8cd98f00b204e9800998ecf8427e +b335f56bc30ffb4191d48b053785d6b6 +5fd96e810fa0706f5c983734e3fb257b +b1ec03c42fc6c533b802254ab1f0625b +a1a99badb1233a404e9d5dbe923d36ba +f3e7d5a0f08e551a7ccb7ae3754655fb +db5ff96e02bf12bea93a361ff5d2dfae +389afca673b2e868d195ef3164b3c605 +1e8281f1a3a69c0691cbe85933fb213e +aa564151cc37087c11ac1d671a50f2ae +d2f3ebfb0588b7139a0316c4660ee5bd +5ca39a489397aa5024a8c9c5114cf2a7 +e5f6a9499c052a872046d84a9018e98b +8e5672281f58765335dec71130e46af0 +3ab0ddb3fcf698d6b083187478c29b7b +b49f7e088905a8c0e30f07a4f0797591 +a16d7a16e0599688a93ac68e9c53502f +844a61193e7e6fc4c142fffd2a538892 +d73a3dce9eab627fb34f3ed7335887be +f9b0ba5e9488d39f3667ff9951ce4d76 +a525f097a9a2d1bc11a5ec0b8df77da6 +cfe6b3af97f99f7545f1973046e96397 +c1c4ec02cffb80a89355ffc08a0ef09c +5a9df61a25bc19b989bf838b4074eb4d +225a6749c8ab373f1c20674e9d4a4ff6 +fec20d268a6306dc1840321d5c9b7efe +3b6f7fd2f094bfed1584db393dd4f309 +bf81edaa45376518dd0ca33106ce3e6c +2e3e328d7587f034bf7d7fc0954a2d60 +18c27f11f5f4d87a265f3762476b729b +e9082ac74630d176c9d9f564d89b7a31 +facbe35fa48ed77fc3970725f8654c68 +ea0be514ba58e5ed69592f10fff5a588 +426bcfa064630fbf84d197448533e702 +d64c08813e3dd3206c11faaf26011c43 +dbdaf6a7b3a986655f645fe86f448305 +4613719d9ea2b9b354e127285d64e070 +d163e84e816d9c77b578e0132e3a0c55 +a8e08b260543ce1d67de1f2e6a416fa5 +7f605ed5527ec5b586753b81e0e6b416 +bc2b1400666b9bf2fdcb159c5b4e5c9b +91466aafb845ee434e9ced2663d34d7b +d57cc0f11e762cf4067b3a2a3b5c4afd +59066ee941b32e5cc4a51fb0b9933c73 +f0cd30dc49d7810523bf215a924e0d5c +f5f9bb6531852a10154439106fdf6f69 +5a4d8360b654e1893d1869b7fdef88e8 +e06e62e939ba439d5eea457ecdc12c25 +56edb6a3b332448a1f28dc65185234ec +ed53fcdfea14a7cf5f34481ddd4573e1 +113c54a50793f3cc12ca3d8d2e32b693 +84f10598f6eb522cc8e10e897fc16ba2 +9a5715cfe60d86d846b76f9032551c44 +086975ab42fda3387c6869b909108667 +a1f13858e608d303a93fb1070fc52bde +af49386da8cb09b4aec5bebfe12d4a67 +45ec4fe4ca1c75f7d35ad17913d6f504 +31ab1ff676197eaf05a9e238de02fa01 +1bde697a670e6b9dbd29f959898cd7e9 +57cfed66f26f2d84a4bf65e4b2c93d34 +adb601c67aa01f883ebf20ca5364d197 +51ab1b14a6362d832020bbec17242257 +f454ec2fbcd6076ef238be8cf091cefc +51b39066e5d23f731d143f0651981d95 +581be8a2f8a6ff926451e102a44cbe72 +fc82a9f729a195c0061a5f3686fbde73 +8e809e76fd2fb12062776d5f36a61272 +d3cc74445cc7a6d24feb3e340579e990 +edcd13ec509967bcb7461ab6b433df36 +599716575146723b05fdbbac23f2ffe5 +2529cc9e732bedeb04cb0ff1f867a7d8 +8152359826ae10f8cd00f298ded26d8e +4e77505a0b95c92846942c6166a4a1b6 +127259bdf8bf63f4109fda811938ccb7 +8e381c662d33ffedb0ccf4f44da4fab7 +56f5e0baed918e67dac35b7258cbac6b +45189951aeb904f66cb73d917a73a61f +4ca9b0ab42e86b6cf57e1c16187ed0f0 +aea5c033fb649ddc68b20c0a8e35ee91 +60e5ba738476041728440331f296423e +447ad57dcc75168644cb5bc68d69159d +594e88f699d326cd2816d7e078a92ec0 +a5aceccb24d345af283799441c990cdd +d4038f03cbf863bfca65035ae0302e9b +fe4941bbbf09c27abba70eed3aa37baf +c5d34506e05fab782e8e5b94b75727ae +c975fa56d69552fb9ce9c7e7ba53b7e2 +c0797ec6d5c0dbcd5035a175725eb601 +568a8ad9b5285a42d6a2709e01e5038f +0d754f5bf0d4208f68d878a82de109db +ab4d141e00cfbad95b917f2f56d3fe61 +02b0c9f81cf9af9a9de7ad8c9ff9e5fc +894c56c6f76f33aa6a178da5885aa6ef +b5a7131b92ca3776c897fd7e999ef6b6 +75ae00f20a8083b660e95bfd7d665ee9 +d74870512754c80dcc7fa7ec9bd68893 +8e13aba0322852c674d549df994a7667 +26c5b51f3cfc92e6603f571b2bd285a4 +494a4c85dcc017f07ee1eac991939306 +95aad84f6b9f3fa10972b3385bec5596 +78f9f6af1283a53e7655baff5f8b6b9f +a31d282fcf831498a0d600fd4fc304cf +5c74ac3e9b6dfec1ff9322ad4e18bace +55f370a931d0517f211bad390132c956 +3a6dd052d6540c03456dd8675fb7162a +ed3cde5855fd209d9a4a15c51ac09f38 +a96650e9e70f3b257e4f672d08a3a068 +dbcc2d29ae7394df6576eb5b70b795fd +9196b56bf7fa11526cae4533d65e3522 +38c86dc91b997c616511b6d5bbe7c559 +51147edb892b832921a19af1e7f8e365 +07a7ecf76fe5c74bf11fe67a19106725 +af732667924f6402efb2f8be04e9c5e5 +270497ecb27150b246ca9c82873e0135 +7c9fb666f730e052a464acf457da8b4c +0c1d10e7d5a641c01dbf0520baefc370 +56a3784f551e3b08222e085fab152585 +b7710ce344984f65663f63b5c3bcb6d9 +91b50d26d91fd11120ecca538b6a6f6c +fe69aa16d462bec1dfac7b3764340291 +8eaf994e06b75c499f2c5dd7ad1874e9 +7599ff79a6e790210763c8ec2d37a467 +9e6b3ebf81d32b10b5c09762978bb377 +de79d1f9660083979a7745cf4eeebf20 +0797c95657c7efd825ae63e8c4ee422f +d7533d498ad3fc4fd7dd4a1252da4cf5 +aeb7b6428ad0dbf35338581c11608e24 +f7ef5574a0c8f3ba688f285b769c422e +51362d852bead3b9394e953f917aafc0 +a78fee20fcf890ca83cdeb9ea050bcd5 +bc9d82cc5f14569df75360ab4e1eaf88 +b9528db09da566de9baa619dc44660d6 +88348bab47f678f0e916e0a9f9f0f1c9 +72406314a50d8b8b9578daa4296c3e11 +878c0d208433f5e150b562c262a9d228 +592b41bbaffc18ac51b4cda700ab8b26 +d8ace0a38d42e3e7059307f7165a478a +8868988b2fdb959b4a72998af4472c52 +332897213e627995867083ea355d0f16 +7f1d3c43299b01dfb0972cc2e4e31373 +9b640ded5e5cd58c20eb2592c01b3a6f +4741a9b34957c06004ac9db5bfc45cdf +9a4fdf316498de594a5c3591a58f2a89 +7131c8b9d2b9e67f688a0819428fe386 +2071afc5867f85bcaa6a9f114ee83d45 +bd23bfec64fd9e783b08203b9d398132 +36d8c6a284995f05228b65d0559967ea +a58f141144a520c4abba54c3218cb6c6 +ed8d6a3f44401aa8e226e832a3810b08 +407b89e60a406a8bb697930a0bdf60bb +39f4c5d51af727823bcf7abdfba5ce70 +9599316412ee2f061047e72e957b83a5 +7b622d420b89868d4c802fdcc42d2b7b +533f06133c7cc0a5507af2808e9329e4 +351995cc34f2f88b2e4590bbe3676755 +0c0c23d3d53977df4c43dcb423288e14 +f7972a9536b9cc52628cdc20bdd3800c +32a774c54a28f00b69cdbea7fb997536 +97d40419f3a1989a0820792b030dde2b +f61b324dfd433323acef5569ac820355 +fbe5a5f55c155f71ed3a2e5b357c8001 +1974db51bf3b08252d7ca8a7e3068209 +b0b981e11f2e6ca2b8ca04cf3350c869 +a73edba9f8b020a90a6b50d544d51157 +4d5ffa1d84aa0ae3a09b1a421f115831 +3a6e9e0fb316b458c102ab3b051021af +e36eeccf39dd4222ba7b03a295c5ed50 +bafe3547d0a163b13792fa170737bf18 +9ec27664cb0b5934f249a1e92e9871e4 +b82086fe535c84ba1ecf7727b847eb71 +96695e3ee157b8c356b737077af7bdc0 +bc426537f022a52ebec468916ccdc1a5 +44c77fea322430b0d50833ea6aafc7a0 +cefe0dd6d6741dba166a31bea0d3da13 +f99bcfb51fd0f98c17ea806da2e31087 +cffd324c59dc28f0aa8ef50c24f16b15 +61644b0c6af730dc5e89e341dd9385f2 +2e3a2d79cf1cabc8e533f9b6c8ae8451 +0678a7bc52b1328e62edb3cb004f12c4 +5375a4f3d963581dad00e57131f11ffe +c996bba26321f7e49122d43856faf96d +018a0a45b65057624cb7117ca5dfe21e +4a230a45faa1d11236e5cd0a86a8e4ae +3ebbfcd4d774ae916df0c7740834bca8 +cc62d26a0d80f066341c32bd384a3873 +c2a36a115a47f91237ea5eec62f029ac +d0b6e28fd730ec397c2c5ac60732ae29 +b52bad90a2f49d485350b3c95d14c2e9 +10d66a28face105e28fded89b6132114 +d7645347184e74b8e0421804c1da2f77 +622811e6999827e2cf5c0faf5fc9184c +b50a7d890c2974a06603b8424cd34654 +d42a5c13fa1bf6f4e74a1b9c8d25e637 +ec3505670adf0345defe33a9f691c2d0 +bf8c72534261120f6ec44aca903577c1 +81c6ed02b0f8b4ddb0a82e9ed6447463 +e414c75bcef3a10efdf18ae011d2a856 +273386e07f994d804a86db1808d5f22f +53f158dd9f41141fd5340bcdd6137262 +0fde82acff6cce753b41d534a87ae6f0 +69a148387b91bce347cb02b2d2bbb8ee +dedb98c00fcb2c202a61d11c0cbc3f5f +3a9784a37a4bff9fe8107a486ac43f7b +55b23adfd90a859ef70ef4db50279994 +26d452490a452eac08ff3d592e1ba414 +d07ff9f2b956d86a19498e496b0e03b7 +0a8a72185c2fa7e65aeb8e0913ba782a +245a3c56f415b8e56931274e1e510723 +38dc2c68e88b1985622105e8bd809967 +13d4e439feee54f92b05fcaa22a08dff +91213fec7fbcdc88b777f4f3ecd19ac3 +d27a84ee329dc2f19882fd8bb0f7730c +693502c9100282d8d139cd0f051f236c +abecd37a48b713f2114bef382fd35c35 +570536719170ffa5943742d04900b8fb +413be8294c37d5749dde656cfb441e8b +49f1947348ff9d04bee02e03ad0bb4e4 +e696a0c56607e865841d31aa10e40148 +4772d56f03e669cd99af2b869ebb5a69 +2a81f3594cd65dd668593a7f88e1515a +a123ca250a58bd96b50f0801c675321c +13a623e57017097439d2395405bc972c +cc0f953f4371d7c84a37fa3705c9bc93 +8ade5a3cea85bf8a115b67d560a40590 +b8ea9549b0efd2081b852481b5f1f494 +38327d6c2d2c1e7698f82a3478a6ad55 +b6dad78677bfef5d7a0fb96f617e2121 +16766e363da074961804bc61040d5bdb +44977c997acab8afb05cc5529518ac58 +63f14c08886fb6eb72b289f1998cba30 +84ffc47424026c19ba9757a134c7bfae +716f8d14261a04fd8cfa1b0e19293e47 +d75e08cad8f6424e8ceb1f6c5f05fb99 +64b7d14d3ebe3aeb1c91eeeb0e638d56 +85e23f8c30e279a13660566c5c3fc007 +8d6bf6b42a0e67eeea72fc61d2774df1 +34624ebf01c5f8835b72365d70c61efd +159ec7e07c39336e774526b81d35afe0 +e6e80f4efcbe2c0e98c6b19a743d6948 +b06550fefb265ec191ec922895208aa9 +e517f208e1a3edf1e164b69275e1bb2f +f9ccf5ba95f667e0824b90d2c82ca4a9 +7d3ec2f43852e224d702044285254c57 +1ccdef52e05d6e14574b99019d2fff47 +63f37ba11ee13238d8dce0d8349cd03e +d025158f0dc120803945fb55ea0dba97 +5154a5aca560b828f0539eb8c22bb224 +75f142bd9c88eb8ce9b7a1e8313000a0 +1e989e367e633ff9dd9b90107983a492 +d630863a22458617b7ddd7a48c831cd6 +04572bc2c2501925252e1adb8e06bfd8 +22d1084b8733762a4ded07fd60fc192c +db54f43a4f04da33e810f9659fd6f621 +4bf8d424ebe22a4d8941a6ffcdee8923 +2dd8e8c1db484b752c69b4ebbaaf565e +d9f974b8241dfc09ddfc1a73df266724 +b598747c3f660f741e65fe0a982bb9ed +a5d1dd43357fce065f8242ed168c7cf5 +4e3bffd48c06679b04a0dbd6624d1940 +dfe8cbc625a979553ef7c2ca0766de17 +c988285918a5d3c6d09cfba6a7c13c56 +af6a0586a1521c28b9e796db2cd5b1ab +cee25dd1c71f2bbe44edd24d452fb088 +46a1eab3e948461f57cc0734466f3afe +633e0ac9ba0fc86e831064d947ba6fa9 +42c3a247f7a105e96332a908fa7b41c1 +17ef97aa99653da5ff0411936666da36 +08e9ef2c89ee7628bd1daec83bddec4d +002c88ccc7253a4c4b383d6fe8e1cf01 +99e8ef845c50af0700377b34a1f21996 +1e4c02ed9422d83a5680d6d9c4929c0a +53a0db99373a1a2a0811c28f71062682 +04236ff31fe6948cac94fe31804947ca +b514157806dbd7d9b4a1f8a1cb3fdeaa +7b956a9d8c3c1d0353fc0d2243cc4064 +872f7fe258a5c859d6ed0e7c0f62f78a +b68de0f85fcc9ae71a20dca129027a7e +bd39aace9d85ad9008c53cac75538c68 +7462d883d955e79296b7f86fbfda8a6e +3c28ec7f27c24de48ea09b0ab873ee10 +e3fdb0fc988102671919105515a27860 +c63a3b06d00f7d5d5c8bada44964f3b0 +807585414db4fcbd18fed8fb1d84f4a0 +225fbb4827d328d77480770fadc5ad24 +8561b2a1f39c3b424d4bce673665827c +155303442225fb2bee9bec52e58a26ab +4d6243a34bdb09140c877fe953725f98 +1f9d975f1f182ceb56fccb16b7b20f44 +7a91ee3252a707dd2d7e6c3fd0e516b8 +572197bde43070ea4377804a0e191bc0 +3819443cea2200f6c8bf6a92d0087d5d +926d89dc06cae3bea26f2f59e8130a5f +9cc7ee15805e55657b581c54cdb0ac2a +7db8e59ab2dc45b319ea544e3fefa868 +e730d71f8cfd00892c4ea3e672110900 +6b10e0d69135c0ed13e6f1ae3d6be739 +646f082d86ebc4ae45d2d130012beb1e +49e5c570f14d763bc1ac5be171defa15 +38327ac28175bd3d591cbf4c77f18a4c +948b74574422baa09b5db8671fee2902 +1155befaa9d0b89ebd34cdb8e63cf512 +989f5daad6e7d52ac73927bb33ec98ff +9b6898b6ad020b6351350da25818caa2 +eee5dd2dee818162df88f2496263a32d +7683368c78baf2f103676660407ede5c +14e5fa91800d0ccfa277871d4d6b33d5 +2cac0b3913b6b81bc7b249bca280e06c +e2f85ccc0a9a1f60ec67f0d1ae2a71d6 +225491e0907f241b95e365710977b054 +6e754b93dc278ea9dd71c748f1f39790 +637cd10d3b72f6eab11e4ccc21572499 +aaa9cecda4008731dfe8094d4a3a1087 +b42d8bcafc4b9657025f3ee2401fe5d2 +b9240af7b25df6cde79f328d63eb9e69 +1b2bf3834f2cecc0ff162982fd4c3e1c +b0915985e5ba978cf5234c8bc27cb622 +66922a8dd4a1fe368f67ee2ceff8afe0 +df06bb8d2bd1d999047d6029e4276364 +fcc00235e0cec877a50f0dea1e02875c +847efbe8be642875ad8f6879d04a2abf +ae61b4868bdb7214c5dd5a337669ecd1 +0bb55792884e6c714fe42969ab429931 +f1812e266ba4149fae82e74c555decad +188cffb8e0b8e74fd52cb2ba1af28c7d +d4d8e42e9c4d498191be5df2e39e43d9 +0609ae38bd2faa75c25e38498f5b51b8 +f84e9e6b868b04820622e2387532cc91 +1bfbce595972ef48d001051f223757d5 +e18808813b0c3524b9544bed2a3d9840 +bddf5885939f450328062a160e5bc0a4 +a7d114e96325028bc57817383b504f92 +fdd4946dda7468c60574d47f3af8e7e4 +00cbc21ba46b55bd0f6abefcbc359633 +f86e04ad60558d890b22edcbc3125528 +2fb18c786f0392405cd14b45507468c8 +5923bf8dd8b36038a0b860e480d5e7d9 +d4e881ad33ff6e4faab5c45193045ec4 +09d63f950f0a55b53fbe4fb0f2dc0709 +56756afbec2bc12c9bfe04152f1c1f8d +8ee42eb96ab047566c3a4eb760c2079e +2c36c9c58c93b78197a98386b701c908 +0bc242d2fb3f5a3c394f126c8f23f707 +76ca698b28bcdcfb94482204e5858d4c +4b775b0b003d538819f924621cc10693 +ef18a102d4f21fe78d4f2dd6012e114b +ea885235c0859d0608322d492bcc08ec +dca8c428c4b6762bb9a302137d89472b +095e19b11e42f5d489e567b86cafefc5 +3ffa773f3e5bd429f24b9dff36f75a48 +ffa2d7456bb6f8efea9c9ea69aec49c7 +b58606f6eef8c4bc1170ecf25401ef7e +15fe53694b016bcde58f161d4137220e +a9bfb0a41d59a011a60fc0d9cab5b21b +c207ec884a1186d32f3254d6503a93eb +faf2472d81ea9d1fa8023164c8a929e9 +9b4500394fa5ed53dc26c53577387c85 +d2e6391304d0eaa0fb3ebb00f2a766dc +39dd1fc8c4557f38071387615aa111c2 +0a6773a25d251f82400374bd31fd8333 +e9c7aac36da0316857c963d85b7e8269 +245f2dc4a6147703e4a19c6abfbb1cb3 +0e2bd39ee515a4b7d7b67f5afcf5b520 +a5eecc99f540be7d922dd9348014a905 +a0c13d65e76dacfb141e08377d3cc934 +493f1900528f60af6b44322258cff20d +d9e3aa368bd3de7ddda54fe08a55a3b3 +4c66114295edaa054ebc87f67281babf +8ef587a4dfd6670e96c0b27a609dab23 +680b42364b269c30cebd938796424ab1 +f975059c748229ce353593cff971dee6 +8e13d6c140759ddab9516e79fa8336cc +d2e92cae36bc06450f9909414087df9a +798d80dadb507f85808ba020c0725db7 +e5400b7b51e4229041b2a2a19a1b2025 +1c41fad554dd3dced58eade3debf3f0a +a77d7b811081cc2b43d1f51165cd4298 +d382c37bf3b7c049d0dadafdf0ac4146 +0594ba4ecc0af31493692f9577a51587 +775690a1143537a1289449648ee0ac00 +17479077cc68245e52e9c6d426a935dc +bc52c00b91e096b98e4cb603dcd3331a +e40151232c8183c6785d58ee6073a1a0 +25e02f42c3e64752b6de1e38e85757b2 +80de4b02674e56bda177afb0b1c75a12 +a07e6807496b868b8bdf4adb55a8b75f +50ddb871a181ec5175662ae8a430c943 +8ab05486d5e6fcb96b45aa7f6eb964ef +0bb3b6770bc46c414efca0cb9da113b9 +3fdd637b66e81752094a16c93c36b720 +7fa92c5c4f8a6607781f04e14a807538 +f4eac2e9d3249aad3b6797c884f8745a +62b431be2efe6b96a78218d6e234f47a +a01de84bf52923ea19aa6fa48a658a60 +3488856ed4c0764bea392a7d898b8206 +a3dfcaa72c0e7b0e73baff03a298cfad +dcf3707eef78124ad06377d5746db440 +cf4e695892788b4945871ae2c732cfc0 +048c226e663dee5ad0ebbb41030ddbd6 +8d376de4f7208b474e0befe3477ddfaf +0f6b6e18a34bc70d69ad1c27a5e8a6b5 +8f234dab0bc9777fa7c0c0bd0d843a5f +071008efc472d51e02b752aab9362fea +ecc1ad14c873ad669c6fb87d0c65fb0c +0fe58bb2e2ea15eab34b630f2da8057b +e8dd747f51fdcb3042fcf6c9f1ed6dc7 +a5736dd2d01a1bf1a867274ce8082e1a +3caa28d9754f25b9460f63373e494f17 +8649a0a08ca424328c6d63c6f6178665 +fb1d693199e6a24ccd505f8c170f7778 +6d2341a25bab0b042158ccaa53c04709 +5529873e890ff9cc9004dbde0a40f5e2 +ca5ca912c1102999ca76cd3ede288ff4 +49da77039aa784a203d25690e14668d3 +fa0713647cb3863900b14f5907c5804c +72a9a1d28cc96d350421b6cb24d7e3d1 +f5eccb1aea4dc63069512e7ca03bb9a7 +d574bd865c550497468e0efedf9be1c7 +3a11cf043ddd78c9b5d9dd2a313897d5 +c1f70f9599b761e475d3dbc579472ca5 +1a35ace806637abced3942daf89095ae +d56c7ebbb7a27b1ad7dc0208fab56d98 +d4a0690062f18c43e784ef40dfac151d +0a9713a8e5ff18fe495ea70ea6a7e856 +fd07e0a6cc8864b2245b73b835fc7c0c +4dbce60c290e663892b60af6ef4ed299 +8299c63e57c4b8ff4b6f009529dea7b0 +413ffca38b92f29911ee184f0ad700a3 +e4ddba8c15cba13763e5850a46914af0 +5eec33afa573f097c55efd032109a565 +3387f8aca7f11c07d4f693ae3e8b0657 +75ac34b67b569eee61f7a178b12c1ec4 +e68dc2b511c5757c793f3e9a6d5be6ac +4dd509af7a4be810c02b5a3c9b9fd306 +cbe3d7787ce0aca901895daa05f2962e +09a253346247c071785dae78769e21ff +b8463f643866045d3b2a9201ad614199 +bbe6a36cd1647e80108091235c7a48e1 +5641e24ebbd1227a79ec3937efa11947 +65db6e811d5487742825d21d6beebc5c +75f9c3c32696e03a3781c2e8bddcabc1 +82d12a23cbd777a501dc536f2226beb6 +0088209b0cbe94e88f2e9006fb490c2f +d025fabdffcbd16e5ae203338954ebd9 +63cc3e311b2b96a3f8414efb041f16d0 +1ae167e1a553cf01a6e0f014b8ab164a +995d80ef72a4b133bdc230202beff8d0 +7f846a349e0e91ff10708f7c04c1c288 +4c0eecaa60703ae8ab20b87c65517479 +2c2a649640e7739b18eac842efec68e4 +1854c6946c77d7569429f9f46c8357d3 +bca2b09c8e5d6669ca7b2d740d005e90 +dcea279098673103bc1750ff27959320 +83505785ddf4ff7c03e937b03d738390 +7a26e6677f66e108afe7cdbe888f52f8 +798aac52bee0cb14d2061b9c127bcd49 +560861679979b653c6a1c16e6b2efe9a +14f718df499386f77118f3bc63dbe41a +08d6f6801aaf02b833c1d353f32f2e58 +15e69fc468e7cddab9888ad74cc176b6 +e38a2383fe1046fdf38a143cd2c6c7cf +decd11b1196bea417bfd22c605e00967 +3dc682eaa9fb114db6f0cd71419dbb62 +bc2795711bde8873dde44475f36d14c2 +0d088ee43b6ba84b3dd7d092a9733126 +da25a712889f491cddd777a2f3660cdb +fad3d91be3a81eaa01c6f28aff3a5685 +58bda025895f515de24196318fa516af +a9f1c1d087c87c08244066684fd9d113 +ee50bb13ef28e9ea13ba85c39da7f619 +c6490bb66afbf19c54a99b24c1c38b3b +8cc6eefa0b0abbb89fe299552615159d +d85354073ddaca033bad164a6eeda940 +e7bf57a1c28adcf244b89ec8fccfcb78 +a235cf7106eebfceeb033d9f367bbb7c +22a8966bc1a9e9199d869140548d8b60 +3f6b5f6bd0bf1a835d8c0d86a98cd58f +d484b710c3b296bbb0238b32d5054ba4 +eda5467299d95f6f8c929f7c6ce5d10d +a62d1b6afa53431fead60eb8b294b42b +929dcadab2122ebc4c6e9ab5eaf0e5c8 +49b753ff486d4418a510a0bb70f98d25 +0619a3a15312549cb86c4b129447bb5f +1775b79bcfe045e724ff72a1ee3338a1 +bc41037f97c04513ac93f6ef65a5e152 +24e0e5ffcf59dba566118c00dc4fa2e8 +89b124d5a52acb71a18af70fa29a4f76 +add2264a666294c152ed61abc385e3de +c1ed04cc75f7ab0bb766e01479cbcbf3 +43f78d3089f6ec656820c526e7647e1e +ba645ae73fb4af3ecabc09a5e4db9ce8 +2517365b86b3d31879f2e1b62315ee0f +08a1656d11025404110a75406a606163 +e5cbb28e6124acb13623ac6684f8d6f4 +d913d4b8ea8d2cc7a3f599c17657fba1 +c1db4b416ff07cdb0cd62e1798b1d71a +5020a01e4de7a7a4195305704866b3dc +f33902ef8265d7484a4f2492e532719f +e9745d57357642699ce410da4a049b31 +deb305a2070726651268adafab82cc4a +15b9fd18c1d0e563020eea62b84cc204 +e4e0bd877f908c0459b318af4fd89659 +e3f6756cc33909796b6329bad67c511f +0c22b9c31d3534563778574f8c94ed95 +98b9140afa960da25e04370ade25aa9b +b0369f68aa6d157cff20c99b646e7d1b +dc61152d6ffc75fc9dcec83af0eb29b2 +240b00e617ee4620849794dae965e393 +3732dcff41cbdf35c84f25eba37d3ef2 +10e069d7197e73828739f93d7a8064b3 +397de72212172e2b7f77261e5635276d +ab941f4c954577c6abcc393d72d20ff5 +9fb50e6d1090fdc6e6053b874fcd39b7 +7e82a63ce77c5e9dfb863531ce298dbc +103431b17f728ea3561ab3f504cc37df +3e16bd064f042e2f1aa3ac59382cdb6b +8c1b469e4aa0a202fd02d97afb0fa424 +87a595da8fbbc0b36323a3e316256dce +6bdac42396088aa88e84466e366a362e +0b8b04c36c6c4250ce2a0cae1e3a6d55 +8738d9c98665ce38c7ea68df44c82fd1 +fbb6301a16d20d14d9a7a3dfc229e0ea +a3d6e7efe691d861f581f191945c35b4 +89bbc67fbae78ef0a5719441d2101446 +29c0bc6a5ec2a30b7d136efea515a877 +1ecdd305b2700a9d367e2af4e8fa78c9 +264526789907322145a2cd1f7b7810ec +d689c479749be6faa359ce29b2351428 +e0e0338f84575002dd4f37cf7f87dda1 +e61f2b87ae41de763e39a2ce2c8b6bfd +66c0c6841b188d197a98a4223af1e5da +6d0af215f6cea1f525190f802a4ced2c +985d0408b069e4f86c292e44c927222d +9ba7d1a9b92773b196cd66d4d978f75a +142aab52cbf86009b18ae61455e871d9 +a0c383562c780a8f349b7ad5bc33742a +24e8ed74d675ab52482f713d93c3b878 +d57e52de710b2775530a0ed85c2671b0 +9070e0d4e42f281471876447135287eb +fb79d572d7c83ec324b18805b79de0b8 +5f98238991c3e4212343ce0bace8ff6d +0204f049088dea8fe440695bcbede8d5 +cd7a1a5ed6db94e37cbcdc33f8831989 +5b858c92fdedc4b730c031db2886c94c +f1de10984dee49ba56bd4e7af509f322 +7925ec4c15595de02e1539f91484a6eb +356a4ec327a1ca30af130a69baa27e62 +732a97a31a17b62730dedd117d415d09 +15886066de9438681c2f57c38d211339 +092452313c8bbd4ead01e2b56a7667a9 +1ecb2270eaf296c83be88330bce0369c +be06bf58fb168bbd91a82e8d7eb0bfbd +a432aa58c8dd3719ed494aba5210ce6a +34822c9565da5e3e3ddd670e994f8bf6 +74d69ca984292f045889d38d83ab0b7b +6184cb3a7c5f7c385661fce687281e5c +6ca4f1c79dd431a3c8c7c9c072242762 +09a0d7e5e34f43eb26c2521ddfcdee99 +5f9a7eb614084d1d71e243ad31c1a059 +74f42b91931e665776349c9061d2d616 +8f29f6b2c83921472c9030834491c4ef +2c17b6bfac2009419614c133194a42c1 +c25d2643c1b383a2643d14e79e11fbdb +130f9b099358e741336795e3e481e3a4 +cc1632a95c961587383e73928211d492 +e65ea3bf612badcf4cf555a136b0aa67 +ba84cbdd39b8e477b54b93c9928a25be +1d8ee9a7c604e794c9b759ee72ba513a +d4341ecf54c520088efad6ba6138ed8a +b23a1312363b69a8c36d1e7eb37b3802 +535fd2fb75507e66b7adf8a4dafc04e9 +2eadedce639e4e8667e5e3b0df79b314 +056a220da86bdf5733d9481837a9b220 +6cea5abe8394c11de02d6fb9eed77404 +f32e8e1a054eab6d65634ca8b0221d34 +c0e6c0adceb6878f4ad266785b3ba433 +0e27e936f62de320f00ece2e75a81565 +931e4be679125ba677d8aef7f08b10d2 +c7d93629d29c1d7dd2ad64f3ea59cf84 +fab48723820d852f78f471016b24646f +c47bd48bce5cb645e7125d99302c9830 +1bdd97644c02f4ba1480bba6cf1e7586 +9a621ea159955cf9ec055d117797cb28 +02bec5d77668e9d7341b9c47a1ee2c1c +95f279c0bb7358b3cff6cae33689a498 +706d757335e9a25dd49bd228ae1d0571 +6dd925ef73218dc9d1af69c849737906 +c12f023b761733eb67d6c057a2959ddb +3fd594c37e1bf5578acf80aade552375 +983403394628b2f6a6d42c40fa923c92 +2fdcf016c26ade8e9dc0504c1c9a1fd6 +d036d89812fc77d870515b86296cb233 +e70a288e291d690a7a5f989be391c5e0 +1137d3d9fcc09b5388ca8eb6119ef6e0 +cc441b41ad87619da59ce6b376288659 +6d76f46191b6633500c653b25898d820 +1521d3cb42bc0bda47d48f2279e2dc71 +6e4c5a77131904d8ace5dff265daed83 +91cc141263465ec2618090440d98dc59 +a70340353777f7f980d907649ee1f2fc +c1f82d923fa48d0c4ee878e621d5dec6 +b31851eec43b505d586173b2dca876a6 +81a58f8ba094be56425eb10a7ede47b3 +f2b923f1fd10f8803138cf3dc266808b +0a9e533a10396fe661af2034f0a1f275 +2beb491d07043d475a7b4547d8afa7fb +b4d8a6d93ebb1449af58b13a814d674e +5e1595d0cb536052e2cfd202526bf783 +a78a3d90fcf6570c52b676e191a36a9d +977cd1ca31da70fa79de4a60977163e1 +3d4c87e84e74532424b936e78e12846b +e7baff598af12a4ceb9cbb7172719d6d +af017c65cbf042854982aa0f20a9b84e +41ba287a0f83f351f86629643d4a473f +72c05e8fb8b613b260424752999a11e3 +282cae562f35a6a765ba4705d5048700 +22eededa76b160778f637631e363432a +1b380f05977758c5b1c78d2201858301 +689043490b4f2d8d5979741c107ec9d2 +8efb58697498989f6167ae8a665588f8 +22442ad12588d92fe26a11225245d6d1 +ae08acfd3be3e401fcadb3c2f7913ad4 +b74f756f48666d8f4c83eee9cabc14ad +0296db3c6c8b0cd4f6bc8127e6488c44 +17c48ea4ef80dc3fd9e684307cdc5247 +942f84eb8e94f866b160ffae797dd9b1 +8160941b6a7496585f416fd645bb4c62 +51f6ded60111f3d47adb3fc8c6fc544e +946acce8f68fe6fa0c9ff80f8d35e737 +8c6f332c0563ec4015ac6701d05654d4 +7a9234eae3306669ed3262090d24453d +64f60fcf98d5066b16da018b7bb3415a +4efb4c4641287cf482c294bcccf13b69 +a64e97679ee13cd98f0c118f2b459877 +a43ec9b0150551bd428f4784eb5a2b4a +c37a278704435ba43e67e6e5741fdaeb +df3aca87d4cb0a58b6f9a542b02e4577 +c8f7f313cca2ea5cd7de21199c288c98 +d2648cdfd749c82c3df33fd6781ab494 +3f28fe9acacfa1a96b749c309893cc33 +80ea6e5c98fd8119b2443db4121863e2 +2db734fbc142ab350a373ac7d6e08a8b +9441762b8bfb44bcdf7384ff33e8153d +0658e43105c1cca6d246ffb5db771cb6 +159861295ea9c1f4a459942d1945f3af +8767f7495c4fba2305c72f1af15777ff +19ea3ebb15790bbd165f27bdffac10e4 +0fca6551538430aff17c6f5086d2f36b +20113a67b689155299326bb8cafd0086 +306486ec4c676ef2d3c41992867ff0b2 +a323f39318f9d45f346195ff8ab15ab9 +9f8ca244f98683b14ab2d3f3ad0554c3 +b316210352cff0d602e248fa6c9b2933 +e6d3a45e46ba43cd6d32544074eef5fe +caf628a3244939c4531ba820ef4f58c3 +65a19536947191ad41599ed0ab468146 +fc6d541769bbf7ae62ad91ffa9cfe804 +b7f2500b2800e9735e2348e0ef1c1be9 +c093cc72f3edc837884e6dd0d3b8d9dd +e9e871be8b79c7f8a175fe39a615cfa0 +5b214e83c153d3c3251692e407d86987 +f6f402e3ca9bbd44dde8ed1c9eb244f5 +b6fd5dcdf5011fdfccda7391451c0905 +ef688f871ebb5ab790c4ecb7125ef1b7 +9f99f623999a4bf1e1c1c141119b8021 +1ec45a5dd7d00323a4b43e97464aa82f +6f6cb0ff69532cfa0bca7baf0fbc3e84 +77384709079d00ea0fcf93b81e52d9a6 +5a3b441618ac691337ceab6780e477f8 +86e7919549aecc0d5c974c6c4f5b5978 +82ea04c754068fb8e7183b016afd6044 +4d75948d20c01c3060382b0287fba9ad +6b79fef3aa95b44da09ba85ec5ace705 +5e8807d41a660be63c95838e34464c04 +a5563accb5b9f51299cf917fd0072140 +3932c236f0c9639321fb271c6a86f2eb +40bbcb4d1bc3d69c8e797973209b18c8 +99bade726e47ba89e871218dc13027ce +0a1d70c829d4c608c16854b0aa93dc01 +04a377cc374945e453935934c2aa410e +91f4abc34706cc22e489862523fdee26 +209e432f9fd356a65b4cd4e81e951592 +2e616a23d119e211873de94704ed9539 +b34d868df20363b201c05e9b1ec53d2f +fc4cfe03e953087ed29e687506c88415 +9b2acef544f3cd0e506207a716f6e066 +f689c567d99ba7ba6f159c2db3e12507 +ec0ffbbd2167cc7439b9267752b13f39 +f691177d15c725a51d348b97de740356 +d4b321b738f7bc172294873b5af7d7dd +9af32f010e5f01ab43e4b742234c88b6 +9dd48e794a0c8000c5ff08c4f2f4e27d +33a8b0680cddbac84edabe45414e0bfa +bc52515b635a19b8f02378a916d7c778 +a4f317cde779c8aa100bb2f1814b93ee +fa7b6205cca1442dc21995e47e1f0294 +3fe458d2b238f55ca27bd2ebaf5b6933 +c821596720cd8145872857b2c761984d +aa495ae25ecf4f71bdfc1ec45423e539 +957a7879c4a12a9728cc20399f50da94 +183f556584d60deec0995d63f3a28ecd +2191018ff246a615635926f12ed26f3a +4c57b53865ef1b65ffa58de5bbbda772 +2b269a26367cffaa3e3282614e42f08b +6f09ba238b602e17834593c427e0c853 +70c508bdb1fe6dfc281d79b5697540e8 +e065e29927f7e10e2a9891770c0aaa9e +c19323a86466e8848df7b3438b4d01c9 +80b30c75c08dfdb4489dd8aeef361d9e +f37411b19279b4a3557ca15cb37d5e60 +4e3bea232546550b9c59dc228680dcc8 +27780c369bbf4118311360f8a972bf24 +63f33e53a8e941db9b7bf0cb8f5c3a22 +04a8cfb383247f8ab5ad0e8dfd0a55cf +ddf680cbd847af2e5e996e7969e11031 +15d7d1505e031f8204eb34fac715529c +a6fc0e1bca10cbf861565efa0702f769 +ad0a835c540ed73acf0b4091dac2684f +c16b58d33c56d6781c6abbc811ea8296 +e6d3c1353b3b8b573f34625d64b09e82 +90184cf95219286674c048d2c8b51508 +c5e69b4cafdd7de742545b02fa84618f +831fd3dfaa041d4af3867b9465d4b4b5 +87c4a8f0300c79efe8ad0903f55477eb +055336da96fb3217bc669b5091109af6 +d07fe87b0811cb068f2fb2bc349c242b +c713085686d552e977367f221cf98724 +f326549ed01ab2d3ef0ade37b9b74b75 +eb490ab4d5136f9650660a16d3ecc9be +860f4cc339a2bd492b18f5c8e16cde6c +eaf7ea76c4c6916da5e60e1f876f7248 +b378828959d90cf29424e4274b165d61 +a6c381042bc945f7328374fae29a7162 +ff40446d62131b20a5804da5a9ce3489 +27cbbe74ae4b66ba937808a6819fdbc5 +ef3d8f6a160819492522de1a4a259a32 +023bf0a28fe2c289a245bb1cbc1742ad +462f9dbd99f127d04945153a455524b6 +319b8ee01412fa581c77fc773f4e18cf +b53470e373c62628dde2b300e8a042b0 +409fa4a185c44607a5ac065612ffcea8 +6c352105e5b68cf96b62e9315c1cb3aa +c184fc9cca14e91daf0b8e9ed05a9851 +32c237cb3c18129a5d2792ed61517a8a +ceca8a0904ce4a9cead564f15a6bc4ab +4bc51faba05c392bb6299e7bf9cafef0 +60c0c897b7af3882027da1de0b3195db +1ab1967759ddda124a7472228e9172b1 +32972a92b6761cf94647632ea437732c +a444115e9d300ef07a6a5d7c97078909 +d4c0a612142b4de5f727bcb91e342bb1 +cced9ec8e081b2acf82ac079d8903a82 +3cd7a7c0b63db02bb1ee9e265a978a59 +884f678af97bd6de7a8fc31d24ebdc64 +d132f17f4a861321ffd9cd9dcfe7c44b +4f73be1240b6625515f244e7db87132c +45820b4cc8998087ee17c437649ca36b +1b9eb67fe49e6e1d392d80f483cb3e31 +8dba311c55d73468dd17a38f959f4001 +a6c8b155fc567918967570537125ea07 +53f877589f8f9fcb911a91ccbcd75918 +60cac85e9fbbf37a8c1bbbeb91ca1ad5 +8731d18a12fe528cf08d0e878ccebb3c +10a8afe327de4e2b3e808d71601f181d +0094b030c6e695f048d10daa6c273854 +9d77eef0f5a896eb7ddcf54a27ab6e98 +710abb73d36f3df0d5be1e171bf8fb3b +feb077c26d8806abba48c1e05109d26d +6f249dd8dfe6cf31b00200f0431bb66a +65625a287816418307bb871e5f79d631 +8a5b66e27bd73e2f6a3ad55a7661f07a +88b00f54cb1b841146a1675e7226812f +307136c3c6ed2aee0ed444f8c8bb18c6 +308acdfa8554a5e2f7282bbd255dd01a +4ad7a8a0cf06630c728126074dbccad1 +82b1c7c97110565533597aed7970544e +703545cf5e366f03a7c5656779f90cd2 +1182ff6f9bdfbb459baf7abe2ed4942c +414eff12892a60353de3c36d6e01e01c +5422031b2863d1a8ba605ba1e76bef12 +5ccd9c7f36eee2bc941e25fdf59e4034 +2c076e87be048219eabc1a2110651d47 +65ab1ffbe4250c25a45513ddec906a1b +db6b87d596b2bddbb0fdfa8733b347bd +5fa80c5107f7416ebb2b21242db27679 +b442d6d18fb7a5d193f05b4cf85e5f19 +7da25c1ad449443d55c41ba2fe1411de +8b205af1aa42374e4783eb7887eae71b +7da59b9eddbcdfbf87f27f5b197d8268 +044b9f39b2ca14762c657242c70a52c8 +cb54e6e458d11d75dca7d6fbf9e19ddd +07ca12a98a8adb4e52729921141d7572 +44b74dd6fbda1d7bb4efb9edaa18697b +d15fb3349673b6b19ec0b6ece655984e +a36b6717fd18e1146e71c6756d48ff41 +fb8d92dac67b5e497e39ad416790cdcd +5147da25c17bd96f505c60d4263e17fa +499413cec8d393f95f755621eedb7baa +010fe3e325cd6fa19faebb3dff0269fe +a93bee3188d3766165dd615cd19f9156 +98e6a3d19f182dfb54551839d307e456 +de4dc904daf4b836583a6c00a0671bda +0bce8d0452ac234342d4048edfe9a9b0 +995f4aacd4e3d52e9be498eadde17a2c +e677f52c235f31a2b47e35714ae4690e +b75964758e255b905c5dd1828fae9738 +6e349ed25c0e40d63536f139b3fd10b4 +5c35e0b9453a53de35b3424771c424ce +161c8881635ca0b30157730a0a865583 +5aadb4c79c9e13e5eae794b9086581f8 +27983188fe5b8fdab994225dcead1392 +6f25eeb867ab5e10ec3bfcc44c768cd6 +b25be6bc6fb0e483471948056f84c591 +d1b62bd4296631371e3bbfe7116e19f4 +42d179b751d2f71262c136dce18863ef +9dd3bd01d767944361a885dc9fa157f4 +44308b646da7c1aa687856d5c9fc3dd1 +a953f9c371a3c89a71586631bf1104a2 +318367f0762ce3d3f0e4c684751b337e +ae03b4ca45edc2b741e3b9965ff3505f +e687b867d6d226415aaf2be7367f192e +e3f849fc8695a2f0545c530d0957f3b2 +8db167c2aad932fbe73f465ea181d913 +3ea8f41dcfccf3139c793bf6d458480d +da4e975913a951a72905e37f382b6251 +17d9e98e0e01581602b2b4a5c236ed15 +9fca849f8dc54158f489aacb5e944e84 +4804afcbb3d81ed268878a3a1aa30307 +fdf31278aca0a328053a6e0d49c0cf23 +c509fccd20c6981e713bc8b4f814e381 +37fd755568ffc52991610cd0c0899721 +60833448bdc7a2ee9147bce40bdd69c6 +6fba9c49df68775f4021904093d9e72d +1d363a7d2812d1c040a8a4322c2bb503 +9cfdb753eb6b5ed81d77c3ecf46fee89 +f79866f1a6afa9638ebcfcfe79628a3b +a24bf48716499c4fdb732c6c2bd84f5e +a696e87a60767184c5e47235ef7ba11f +a780e3d05b51133c3aa2b2202a18f7ec +b6763177c6ed57fa0cea9355a195bc20 +bcfcd9fb03962ce355966eb3e2bbe46e +0b740c86966e4c59ce2f4bcfa4dde40d +48d8c34258495e7050a899b14ebf1ef4 +dc6c475af1b299cec896704a508f1fc0 +5b917492741f0420f7ff84218d25621b +65a022b5f61ebffac1ccefa0670e0839 +693463fe8537c75e08c0d8a9723e3507 +970d12e2ff2ed977a2c09f6cd63a9d76 +4d4538546bd366446a4e79bfb9630f6c +fb3a793e3f9d1f18c4cd3f7aecb16ae8 +381a67e7e0dbfd1dcc5cd46d3bad07f5 +43cbdd9e088d1b9a2ccae8c9eea4eabb +b53cc79d6863333474bb3bf76f89e114 +39f5c0c2394b3c3076f79beaca068a56 +1150d19d7aad7fea5673d5e5c2daab17 +c3f8aab2759e51de8e8482eb4e825a63 +294663ccc03765ca9dfb24bc5e8e2b3d +48b77ada0c596e5f335f059e19bf4f6d +a0df473d11367d5e1fd4b4963e604469 +59d5c20c7e3cf4dc1cd00459f4fb2f5e +1fad86b390d161b64aa41863aa7f63b4 +e8de0c6af906cbd831528cd4cac55f3d +8f69f16ed7d4a56d7fe6f68300783cdd +fdb53f5f9d73183bf77edf92eaa88198 +683adc9797a10a960f9a50e5b9d6d423 +21ce467b09e79d919c009e5e3e0cb050 +703cc23dab37eb9b9337b153ac1ede7c +265a88417dcf633707a877a5d34f4d59 +695286c9a3f46c2523022b5404f0eaae +133a0fb1ed349ef7aecd8bca3adad434 +5b444c387039b6b7e0935baeefcc0522 +bda6508553137ebc4059704a481623a6 +b1ea5ea507796ffcd4df76670bef3e70 +7703caf7bc84ab18f3b6c64aa85beaf4 +40f39c916054ece72f71d760b084c55a +f41ce2571850ad07ad6bd5d6663e0298 +afdfd7d36593ca72ea81c09be25bd626 +1f98b727a798af7ea429fc4109fe052c +acee84d91317e4650fbdfb9300934a7c +e9e8de368cb4e6d2f316ec7ae62d38d9 +54c3be224b2088d61c8cd1c61603e7f8 +3457ad8cf8f02bd8ee7a68a6b3332aaa +a263259500d8582c979828f0e6651c4d +24cd05dd7f89e15a344177cda29cb3ff +0e84b4e1f64966ad2901f3ef92047275 +01a3750ff38800edeb12c4bad2cdead6 +c2a234e91361e93d4a4d23d574152b3d +048e0f33b854b2a3c00c3bdced0c36ad +f60e77617188af6b7dfd6d291da2af35 +99b2c5c15251cb7b5fb00ed14e03a3a5 +23c0ea1c49666513593d4e3019007ecb +cf5b065eca0d29a18bb4147965af335a +68cdd271a2fed4dde6fe440501284b85 +343d4312c806a2db1c44387578ef7227 +a8412d028dff011f04e583d72b086568 +d41d8cd98f00b204e9800998ecf8427e +a8a4918a950159dc7c6130a0e835ab39 +2042c50a049d5d4dc0bf4d851c6da63a +0b087c626a7bee5c285815e3e0253343 +ed0719f8f9c8e4edd85ae6f2a5eb0918 +06938d12955a3b1afbfd98cd92f2800f +7158e368711745a1b17611dac6ac8b80 +aa0827fdbe214b09a44546ae5ef9ef28 +cebe5fbc96fcdd06ecf261f566cfae4a +57915d20ce6f5ba84cbcb30262ed92c4 +7bf7aa3e9f16153b52f3d1a21801942b +28603d55a61fa386af302294cfa8e020 +ba6194c70a22b918953af8c7e2f3b559 +53fad99ae21425dcdf7cd2b6e5ae0f67 +bda0572959ea4c35b5b31c18c1e544fe +a041b23c4da80d40a25527ccaa9bc78f +3894d05fa9b56fb159fe579e35fb19fd +f2ac8d6f7969085aa22150b3cb19a9db +4e27eb71e4487a0dcae936514095d233 +5753cfd4fe6f8fb3a65bb57d087ad519 +583cd9d994db6f251f742675559ea151 +8a2ed2b6617b6623311258059f1bdeb7 +17be8954e025a02e3660c82e59bc77d2 +9b1f88e6c0b2d7f3ae92fcd7607529da +3a3d7eacb9dcedb43c400c0414bd45d3 +a32317bd9c77bf379d923cd4510631d7 +c33cd6ada24d2ba1c74a348b876e4de4 +f8f6ec59493c6e309d5b6cb44c7104ab +b0ff2a333c7b194f04551c5b95be0ea5 +e64261ca6eed48520ae7c7e23dee7551 +aa3d22b6544ff1825763be46fa74087e +67a213fee5b9a6ba8a0e4f0bd1e49bb6 +644363a0f40c2e08dc7d742fc2a3634d +e64051a74d443b7f9f3cc38b49929275 +161d6d7d2a3dc54682a9f26a3f236c27 +41da0f881edb4677cf310947cd304e34 +d938faa2215e5981e2a810dc01100bb3 +ea13fd4df7d00e08ac7e03bcc4ed1c58 +c7aa6c6628b3ec4d0bee969ee14a5be7 +1864473e9efeb68ab2c5fdebaa5ee28e +aaff6f0ea1fed7760f9bd42ea6044a53 +5b49a95eac215342bb350fa90eb20a7f +3cae083fc130599acdaef4409fb45c99 +cf569fbef3ce4535b080d6067933691e +eba00f93285eb5ecc089567c0a27fbf0 +ca088215119f03154aa4b3484b8024ae +629ff976967712e0456ef7c2f25b4302 +033e92c406a9664d11c45d6ef3184d59 +72b258950eff5fc71466beb4fb78c105 +0d8009bc3ec56c347875b488ac0df482 +2945a8d99eb9cf8b5b73ded92c49a2f0 +1701b0553d884a6b7dd334e23e8ee57a +aab64fb40985b64bbbbcb88383b1c420 +427c5cbbffb18d06fb09af91d1a84cfe +23ab0929e0b690f8252fbd03550e4658 +5c02891d54651b573730f86f5a8f13d7 +c2c9d6a9dbbb3f370163ac1db36e1d4d +0e5399e6d7710f672d6fa1df9011c61c +050192c1c6acd415cb65f3b2e2699e5a +1eb1d53d350840480c6b4d2b6cba61b2 +f1fff97e5cf9c75d1df3b7cb7f9c4668 +5068c713972d501b10f4ecd037029ef8 +f611334b3d07d470c40edfb75c3e34fd +f6fe9dd9b81d6441cbb7a621fb80fd85 +2cb13e3b0b23bf9ce2f8ee654a7f423e +25c96636fef4920874290983da71ed27 +e10c347cdfbe043098c654dd536f2924 +268b122e49ee0c0bb2b599ff16893ece +b7f46c0548af84ce8bbe0ee48cf7f7fb +fc22d2bbd3e074592e97e5f6cd39f0c5 +9b905fa23df8cbd7ffb54f5686010663 +6e7d6adf07b453f64574ce5a9c9dc306 +73a954868e242429122ce512d8158ba4 +6b2e8ac4454932ef252d8bb171134138 +00774f5cc5941b4131142a1d5b6435c9 +4ddb271357efffae7a1a7d2fba761c5e +115a0b969d9b8e8a40405051575e65cd +5b68d3963f74768c49b5c88adff79625 +c49c6b9e75b0d941ec3be6ec004edef1 +d1da85c82f32b23c56f2aa93b60750d5 +8719f82749fba5aaad3e0856779a4100 +731f4c14de2c3ae835a2dcb4c496044e +b9b9266ef4900324c7beee1320a160f3 +3a804034d95c426480b4e0868f951025 +73d2a1bddcf1d95eca646fd3f67a751b +83a01122dc485e51988eadf492970ab9 +815aa77bd2b7982ac3fcf01c68178c18 +409ee3cd09b33235f4fa58c216f5e443 +7248d850f9f2c04e7135606d9bbb0402 +750e844d5577262bd6995b7ff1b6223b +4671aa6abcd2b1c1ebe1d68d90bd034e +af110386f8e6936b79734fe493636f0d +8cc62c2f1c79a1aeb55a05c422230984 +6eb59f666d21700969dd81c851bb7a98 +f1eb57e9fa4186cf668de23ea58774cb +07f00e6068e843aea79cebc08b9797ad +4dba8df78e1769300db1f202465393a3 +6981db626b5b6e446042045c0c475d56 +25cd63cfd19d3d339747669ab33e98f5 +3995e2783556e3c5a3939835167b2ded +c25dcf29140ac0f1d8a425b57c2ec381 +c8802624f08f0d8ae9e563070f143002 +0083ba674d91012a3d137619c6b2c7b9 +c02e16ac7521fd51ac0709dea2fad291 +ff8ab0865396b6e837a184d0ae911bb7 +2add9e5fbfb13bec94c935325bbff406 +93b11670e881dda9bff91f82a4442e0f +8b2a71225ddf418ad2d7b7b36a42e5ed +98b8e2cf57fee2be549f17380a7cca16 +e7498dbd153e213d8e7a26d985180cdc +7efbd2066e7d7d55ae87cf91c0a45dca +94bd03cac01e600bce964afc348f8daa +ec11e2a865b98be2911b7eee415d279f +c2b1ae09efa9192fd19bb9d98c8e3c54 +1ab1b0d77301ce63b7aae3397b3e64a7 +60db1bb9fe10bfbc3c91d941e53e9070 +a1432529b92a611a678c5980579d0afe +0cf1c862fced8cf8fa067dfda639d7f3 +31c6ad0d67476bbf01fb1d5d2574206c +d308cf247a9f358150969c0977dca989 +67e238c80f83ce39c119d18c966be2cc +3cc2c898a0a341c466a82f4c3b2f52c8 +3b386164cb7f2fab50487550419b4eca +a1c0bca5d683dbfdb9ad9e3220f5d8ea +e9a8e22b0a6691796d6f90e1962b4de4 +4546d31288608cd2e168397811aeeef4 +52e75365194e6e238154ada4a22744ea +ee67fcbd22879f1c457cb9bceb46d809 +c2b9da7f293b34af91048113a9d8004c +1681069a084ad22e73de24e473478986 +f8574e9d75dc5ad8a19500898096a566 +b38a7fb5c08cc7f19339214f567a5c8b +a8f39edca828d5034ac3287dd76e65c7 +6e93a0749812cdbcaef7734c05fae12b +f9c8a501d6c5ac1e47d9f16c35c94128 +04302bd4a99b82008737ece14e68edb2 +b0fbf3b51120ea480e94a0e2a0e3abce +b0bf8fe908d052be44bffdad02608760 +8c2c7616416685138af4909a6812daf6 +5b4d7a5b29a49ec758551dd2f7e19fb3 +235b2a7d4c6b0750b7047aa797b9dfcc +2152e8013c64d680d72d6a805f92b4b6 +f1165bbcf719ee4695aa778aee40ae00 +6d3da9adbf4fecb56a599005c3527b56 +1bae283c1b9efa99138a86d6a657eea2 +49ef53aaa3754969e601729644922ed6 +bbbec8ad294d043045df8fe6cb136fc5 +f57c7bb465fb974ee3466a070a74c635 +cd8741c1cb7195691023dc090259e51d +d5d3fdc13a884fd4c65f1b71ca6e4dac +78838feef7cbf65e0f27a25a3aef255d +306712bd703140068c8ece05169a4502 +e6d22e883fcd71308730e12f32a1eaa2 +58c0f2a3869f804769bd4e3bd00ca851 +74fb49ea70f71315f84087ee91eb29a2 +4b20ba027980f9771ff5adff426edbe2 +98dfb86abd564aede142a4695d6aeca5 +7b94d3c1009002f050108adf277492d0 +7fe42de2b32120e4d0812de4ecc4b24a +36bf9e42ae60dcdc6d65f50eba9e085e +7935afe6a28e2c090c0c917b622793cb +b15ae83b34ae2f527b3bfc0247abf17c +91ae595bf3828d57a4d17b3efe289941 +c876344d90a2f4d09de5c3756e54cda8 +3c0a0aaaa4e9072dd7d0e513e9b1869c +5ca0f75f8a31929fa5ba0650b3b3db03 +7598d98a6cf982efc25ad72a5b9eb648 +912e5995c0a4ac2bc0b9b9b12d87c03f +e2dc1502815c439fa5ee81b77a01d039 +6b749f3abdb63dd74dddecc15db74615 +f325da87db73abb7e3a4d3c993533b26 +1868b109ec42d5d0d3812b1dfa50c6ca +d1b6a9f6ad803b96346d0f868e239a64 +b0a4eb98ec4f38757f1b1d42ec3b1621 +8ff2a6565c4186296cdf3b177a46e5e8 +00a6be2741e2dfeb05f7db114e87e7b8 +e2d57095101931fd58169b569a8044f9 +6d57a77384ae02ec54e47b50578afeee +07891c8ca6338e0045395a44e83d35f2 +cb6b152bf06d27fe9254fa8ae06f7892 +105e3f58c481197e0ca61fe15fb058c3 +1c6523d8bf6dcfe200959561bb0e645d +b62b5b503c3d10a7ee5b4fff09e99704 +f8fbb40dfeb7413047c2849a60b95c5f +a64535531dabdc066eec664116c11dde +8a0950a4d60790aef36bb37737d70ec3 +be8c2a158e16fa4b7b5b821b3b93f40e +2324505a737d0b3cc71b1c03cbdb5bf8 +2f758001edc0e79d73c5ed9e9e02d14d +785508bfa8aaf0f01a9ca65478444d5e +72dd7ce19d4735639c57b73045095a7f +d9f8f71ea4fdae4d93fb54d038294cec +fdb40db62f9682427f2cad5b0529374f +6ede02c4092e6462e38a9abe09ee1aaa +84b53d2d7743c11b606e297523d54eca +4806c351cf865bf58ae56418b6905dbc +bd16902dfe0f5da38e3697741ecf9c85 +83b3634f944da3187fed5d88f8eb3706 +22330530436eacbbfe8fdef46befce6d +eb2f484edc82a9148e2b3241108b2db9 +4575191d879fe737486a6e937a681fb8 +36517d336094e4c9cef7f4b0b82d2c0e +8c1bcfa5da28415cfe31a0e5e12104ec +306dfe3f9fcc9a746ef8da891188ce5a +ec7daa9dd73b2d1729afed56a039b4e1 +83453eb1a8f51483f3a81a1e1b366578 +fede1d8bf7ab93b86b989e9720c46dfb +265581df5e1ab4b33125c766eddb7544 +d5595a7f84ed1343025c3a547e137124 +a85fa75c919c9ca354ea12b534695f65 +2c1eb96e82be063e0ca22ed5184bf126 +72288dd0dbd4fd56903ec1377e522357 +cab188ef67c0060f157dac4a1f9f69f6 +1d3e52057a4d8d7ceeafa02ef2eb5fca +ef57edbf158eca2f21f3d9daa9e27d97 +1145a10b19d5b5b8f073e27602ec28de +efac85ff714c306300115d4dcac7b9da +aad03d9b97ea830220a2383388d32fbe +6c614ed154b31b9ad38447e73701f879 +094edeb5ae81d2e1c25eaa8a7fdc0284 +1ae7cd56a16262fab84bb455908fe748 +ef88e7839e4954115c4e484247b7740b +1592536af228cf3a9266fa85fda9101d +345301028fbaf61d67454e25f19b21b3 +47e0bfa0ba252b48e04152f1547d460a +327738226c643398c561b4c3e5e80ab7 +1ce4856440de1b5d2edc5b0061bf0e4f +3d88ab87cf40a67d030cac3564056fba +9d0d2e78111eae4d7970c4759533a88d +36c68b7049f6fe5726a50e13cf15d54e +27098fd06d9920d93c975894f04a9222 +d1a6bd144fd7b0844793411f6df647b0 +d4e5a2940570e3ac6ab5ad2683b3b8c5 +7918cebb6013580da4070cd58a27e31f +c6dbd40624b7640ffa0686abea87f8bd +129a8f6a334c2413dd8464b0dd95d567 +e52c12cfb44341aa93405475333fe7e0 +971383078673bcedcaf1bec2447afa4d +3a1412bff9c84908de4cfa9449c09418 +df44e7aa1c2692aa6fecb4f93b4a5ab1 +0639ce3be7280c98677c5d257ec072e6 +f97ab740bab8634a04b4ca4fc28b6f97 +3de3712718a81b8380faebbad156f7d2 +bfc8228054ed46328e18f54d1e6cdeca +1aaaa749427fec1fe6a3bfadf801b81b +2dec7eba94138b1724d7b7a2e0dd9692 +d9f980d1f9b0ed16e398b4ffb43be585 +fa123e54964ea41e33969bdc4ed723ae +19c2411bfed0a185de68ef1381ea7797 +9c982139da4d145ea6d24c1aa24fd58d +779e40dd3847e976641b4d75788648c2 +19605deed9937ff331c353c4d3df5d1f +b4f88682e86c7534440b671b01e0dc3d +c51123a68d80f4e1d85362b35fa3fbe3 +68bd8cfae30a137fe6c166ebc8fe1afe +e5310bb3e41c871c74b9cd402edc0d74 +aceb63d87a085f7578537e01ed2d6315 +d2b540f8a3b2218a874bfadb3b4fbed6 +4b2620209ac18d2ca8341be35d73e033 +cb9cbd9a994927de338fb3e0a1da09a8 +10f94da841d64b5861ea9649be05b32d +5fb33dfaa3e06838c43cea1336e04ae9 +d860aa81afe3cf2db3dfb5bd3ea12054 +fec2b98e0066774c3c73dee22e3bad8c +dd1944eb872eb8b1833a429beea5ef9e +ce1cbea363a67d5370a914cda138b18a +8a649edc185774107ea9ffcc1434db7b +3af484aacbc0af0c38c0f19b74c51aae +0db433319c47f728549e121c95b1dd81 +9bc5419786fed6c51aa55f7f05edf862 +bc69aa685c6467634d76730e479eef00 +19aefb8f70a6a2d6fc3182e0c7407825 +5894ded8fc79589b97b6d66616838bf0 +7fd8cc7bf9b39d0ffeff4b2a9680eb4a +9d40a84650eaa1082fb5d2ae238eb820 +2b0760328c3759dd0458ba4077c83ee0 +625e5525043bbf110552ad6e152703fb +1c6f12eec0dec677eec891cdfef24e0d +e5275814a602ddc9b761c6b77c476d4a +3ecfd426148b2df5e831d84aa13c5851 +9baf4217e46c9a4416abeb999ca7b919 +e6f4f5b01a32e906cc43c9d596ba86f1 +9af326d41d9e42f2a07fb6ba98dd22b9 +f32884b0b08b23abe603de73ffcb563d +ce910a250d42a36d52d045c635ab2e57 +862d33a2eff9ed0ac5965b2d054cafdd +21bdc0790e84749ab61a77b48c4d0a56 +4eb8b62e90d8496af010ae856250f183 +7f0afafb4e10efb419d32e0f1ba61c4a +acd3b3576e9ce8c4c49db51d95046605 +ff21dea36f362464abc0afb9f1d4ad84 +e8d6697bda9e640f587198591b61b97c +f9b5df934cbe4ff797a00da2a8ad4e8f +e023a73ac3570ae266e4a17919c12624 +02115ce1286ac70e24ff859caf97f6e1 +90e38437d53ccab4cb420074d320dfd8 +26420ec1dc8fd4b85cbc2adeef875f28 +9237dba3abbeb2e7fd29f0c5469445f1 +a1f3a64e93bf6ad9abe7b3b4023378e6 +1980e18b86bbe2dd45a5bcf9ff5f5043 +d9679464ba0ef8723e2ccaf094a25284 +78ee16efa0fae6ae4f29d029a61fb489 +9f049b46228ea63ca8e4c1bcfe34deb9 +4650c904288869b324fdeb4a62a5878c +c88628b22a0e61da7e364d6323a84c9f +862fdd90b1f1e822db99c66187de0a73 +b2ba4065dbb9a8c6132c54cb9b05b926 +ec4641776943211727d6867e1f25f70f +0c414f0903fed3a95d0093e93f0cbd1d +05a1a8dda980f164fcc70a86e5c6c393 +b10cdbcfd19e5018bbf1a319a4ca3e14 +3d2bd69a995c16c598b119c17dc0e88e +4bea5530c27c98fd488d22496e570be8 +3cd50674e0584134aadb3f6ef1df9280 +b3877ef1ad35b4f6309a672fc6e8099c +9c4729a96737dfe0d409a881a40e2c93 +5fa62a8f4fdf8dd20bb9aa593f9b04c1 +e11fe9e9aed4f03688f7e634a42ea4fa +15fd07539265a6d96f16afc81a8cb93e +b9e21e71ccfd4c687d66e418eb740d90 +a3b5dd30a846ae451843d541fdd89f84 +4e7a5fb938f3b07bbdb9ade8ed218059 +7323c6bb75b2b10b64a9a75222f263ad +0386942d0ca26c9e38ef0851dd7e88fc +730c3bd7f044c3ae7033481640286c2c +3f297f7b519a8d82e1d84fc609348dab +6bb1afc5f633f9518dac90b40d75a9f8 +6c45ec2c7ef266d8970da39a6ad1a956 +372161b035d0086d848222ce9898040e +3d1eb956e2ae5d52ae272ac08de05c31 +363d11e3bf433edd518a81e73c606a72 +5e147138957ff01fe45475425ba36e27 +10eec6d8f059db7443797ccd8a09283e +7743ceabeeeafc0b62d82facf29b21cf +a151f55384375e809614095f85c6b474 +8f95b67b6150a65cababec403dae15ee +174a98a40aaa441c8eb215df9c6d08f4 +90fa4bb3a6cbdabe4aa52b6de5acb952 +0898dad4d60515d53faa405e7ed1f98c +1521951de32e93d3f6476bb0caa8a212 +d062b3d54975f080e9ed30ceccb47353 +e81c02b9d4351137de950510d2c32814 +61551d432774627a9cac6972270d5b61 +cd24291eec0019eda8ac62bd45d00a3d +ef0d45aec4848cab74199b1714542725 +40a78f212321db054665dea53acfa28d +a17ff5c5a3e79b3b929bef7bc7860e9f +a2dc4ad32e222bc9a03fdbd53ce7c4c6 +69540fc2431fb9d1afab40d3cc11ff93 +8e4d037749b60ebb23c90dd7de15d635 +0c91aaae754261b5bbbd1b2d2a11db9e +8c5743abd5dce05ec43ce8db591a5ae8 +2a1d45deea9518126d6978d9ae877b82 +545e931dadde28c9ee9ac912def5c3b4 +692d8c6fceabae5dfc5aed48039eed3e +36c0a4cf4bbf4ed58e54ad01a002d9ef +47867fdd37f3ebb4f05bf9633825cb66 +465fb330834b9d40ce3b6fed6cddd8a0 +4de9fb8fa273661844484ce63f74c5de +75e1a46a9cbd9ca6587a5d555a60db6f +e1fea7f3a0ed2d303eaab61bceb6c5ee +ed42023cca6cf5270aadf7ae4c8f130e +04a6526b42ec30f6471b20526228e58b +5b9fb51831406beca7750860afc2a45d +9afe59c50f390f08a7e99b47e53316ed +65936eafe247e904c79dc3127fbd028c +fd073b15b34a9ddc379fb9f25ad34068 +ef385f5a9c80e57d7c7b918103221d0f +e1de30d5a42e6d9bc56f0634f3b6f884 +1500b0604cd9d2590703a71a0bfa0486 +2a213fe5bfb12027ded321c010b76d35 +f5ba7d17740708c401b19ec322b6febd +458f7acfe8265bf9671a81d57415f1db +808d79cf20340af8912dd6a0ed049310 +82e6a52ade540a98eef606fdfb287b56 +adc2c9a417854b5f4e4b68f2e90bb44d +438dc46942f196f7ade45325ff760791 +f9e77be3d1d4cbba9e75ae95044dcfc1 +e17bcbf361d32fb271449138a4b76495 +ec131c23ab23bd68b0a5694ff4f9a517 +94c63b8ac5dc9f7d9af3a138f2704519 +267a360566cab59b0cfaf8eea20b59ec +7e25cb30855b197c75d9980fd71d4681 +f53624d2c8c42c9f08435f76bf7f2eba +583bb47c351413359d37b682742967bd +19b721fb1cba695447a511b34a842fe6 +74c5eb229d1d1254031e6f53b4571489 +d308ea37f706c136b3f28e20e89082d0 +77e18e6b43f811f3cbf7ad4620c93cb6 +d2cb839e95e8252c391118002e621930 +fdf7ce55ee75e54f1a38618b762080e1 +254e4535257efe378a5d120de164f41b +3a73ca5028eff0e78ee780da59d573cf +27434c4487302ec77b18e221448b8c78 +daeca7d7a03739b516a518279776a5d1 +d3a14f9086fff9b27d1a9b7b0ea91c2b +d49ebf21b9440512b12d96bc34d57035 +8b4d6f2d5719cc2f5e35ad564ae320ec +e4843b2289edcb25330ba74904f239e7 +2aa3101e07bd9222b65fb9b9dc9a6ed7 +8207c7e47e8bbb20980438cc6cf2bfa5 +9be49f44f18bfb7fa9a79948b9217bfa +469f0f6d11d85945001b6afd97eebdca +b63a7e3173ccf0b7a4c994ecdf4dbdb7 +61eeb6f17d398dd63df4508aab2d8447 +3a90ebd75088a74e4564ad645fb8c58b +b44924eb99b3e81c07364facb0b79e1d +8833d99b27ae6f97741f502179a1f9a8 +896746942c61d25da30428bb968c4b63 +cc70d91abf9648711a37b9b031e1e382 +9437a180220d1428915c0187d0b01589 +0fe386036126c07a8f3af6b572d2484c +f43daaecf9d318f2727b80b1b383cc93 +0911a3aa8ebc7b52278043696d8acc7a +879b8b3cf6c192422ac0b660239b2b94 +5e41953ab9b7241b8d9c2b77bc19be55 +baff3d44dd39e7e9c96bb8a7f7499189 +80febe5399f5003b6a2c444c3c199fbf +d41d8cd98f00b204e9800998ecf8427e +4e5234d662a45c8e3c9b0e25defabec3 +e03a34b223a3a7a1a4872b940a0b18d1 +b5dcf8d51add424ffff31aa3a48f6a40 +85b35edb36df502391bfa5517605d25a +92217bfcb52336c0a7427ea137ba511e +2ebf9228cf2d49f883b66c15ffd50027 +20203de06146c67591e4c4b213bd97e0 +77ecf782d071a4bfcf2c768ccabbdca0 +5691fb20fb890c22619bdae7a68f647d +494308fb7eb97ebd21210d6cf666d241 +72662dc25bbb686f1f807e722ac6701b +e7f1d26c3944ad720a2f2d86f3a1369e +98e1e7237bd7d8f946a0733778029d1b +0ca4232b2f599e01898386a9888cae0c +15fdc865ae8e5b2a521832c6861efa52 +4833cda85cf3253a6080327541980901 +ab3a7c743317856828d205a62a5a1f37 +6f4801a57867befab12e4d8d566aad57 +ceb77d9ddf9cc0f2c25962056450f732 +46bdac4f1d4e1ac7feb8c4d9e01ad7c2 +64401f9aa59aa9e079e16ac8367733e4 +6c0eb65404264f9956db5e1b8245e0d7 +4641cfe3a5068f1e406464e798d10665 +b1d679e7ae15a76b8fe4c46310c17a65 +65a6537b19af251617760c6fe85de64c +3afd3b18bf68799c37a9bc5e91a727d2 +42b964916ae84a603e2909969b3b944e +bb49621126e716d8b8279aace3d8bfba +02c7f2287a2e48c29920cda144bcdd7a +a08d5f489d856609bf0928b4078efc3e +2e31727565624403e44f19227865427f +0941614f6c26ba8cd51bdddcd3f73c70 +748ba27a2b40571bd9834cd7c03a5e95 +dca9d352a15e0142cccb1eae4b5c4588 +8d5a603ed3d0c5276a427df01adc418e +1f90abe2796e0f2a90843642ecd05ec9 +4eca96d566d8da885bb37d260ed32828 +c1762cb987174c2612275b11f2353fdc +b002c9c2520be157b68bdd1705820e2a +3387b93a694c2f07343b9a178294893b +7842c19a55c5e2a9d5ddb79c34f53fa7 +d644020edfd297d6fc06c125ccc14283 +5fac3aeeffbf07fc0a481e85a41df2de +715cdb68cf9e0b60369f49eeacf0c85c +e0d5bd78a5817f223cd945b3cca0ed38 +59fdfa94fead17d06d593b6e2f27fcd6 +f842353ea544e33250bc6d20f7dcb987 +8a0ef45cb4f90f054ca70ad2bd21499a +e1b377e3928f38c1ae09f1021b77a928 +19d165160512689ffc1be568661fa5fc +5451b4d17c42d28ab567460f09bd9571 +b6745feb6d8da51fd27b9946523aa437 +60b801a754278426d88c330bdc6044ce +1aa6cff4f4facc2ccb656506c785e2f8 +73ca2356bf5d8a984408e64e5438860c +623ec99f712ed0879265c44e7cbbde4c +7e466bf0e1cc7ef49f89e800ff463685 +16bfa64077fa3446450baf314fecf177 +ac63deac7c910dbca4960dc950fc00fa +4470072b63d36b30fae967c543914166 +38f2a5ae9e4e5eb51d024cbd6454c78a +fd98c043a8154dff6d058fc3c14e29a4 +69ed119c9b4f93ba8c1798d9750b1612 +fa0cb557082a6e9ea0f546a6c0134d0d +20f65094028ed3258a6ab6151f81cc8c +83aff94aba8e503671cf49476b12560b +34413b6a7783ad90fececff9d5f65512 +93ef0a26804315e282f63898db985a87 +f7adf6d3f32cfb92c8e9d3c813bab0c4 +a026244655601397b37fd0fe7884797e +3fce89f8f6d4bb070208257e9b72cce3 +b44639df73ec7102383fe711b123d75f +0100181dd8e9dcaec2a92b0573f5f11e +a682911f2ad3798e887f3f10c9cf16dc +4bf95e2563e1bea33ebb3215e0d4df1a +df1e565e3f70cd6de9d5e684dd66f6fd +0963cfab5415de0ca18cf5ab0c41446b +5dda3c52631507c7570e578010ee9904 +a0bb750192bdf8bedb88cf6862684c59 +3b7796f2365f587f6b61cfe322b56895 +6542c06cbc4e9ac3428e99cce2501b56 +13cd58046d9676bf66b31d417746b753 +fc3a4a4d8aaf857931ba35f65963de02 +98cef74df1b8461f000527169f0bf935 +b84ada0b7e62eefde01f204eb87d2a6a +ac18f3dac21671c4db5e6b6cb4813e93 +a4035f27adc61943bee97ae7403de4b6 +9bde0fb00efd337f909286127e46559e +2645d0949913986d448ed39c4e3930b5 +1166062430827f05d6582d3d1f4fdcaf +fc96c1b4bed1c7746640b4be43f8bf4a +370d5e39242bc2c6351291f1700a345d +68824a4f2d7da8a68843eb5edd9bcd75 +eba55a9e71b048966272e3599cd70f15 +dc2d4bdba8b839f84524ae0388ebbce0 +6b9f539f0fabbcdfab6631d98617e217 +b330059c8186b60cc8104e1a3ab553b0 +ab9c2a29b9d447f1f2f6437eb7d6a124 +2da772b183f8d837aba758efe68e46a8 +5b4c435546286a194a28a0a12c6260d2 +4dad96894594196742f8f151d81bfd5e +1f3e1960cd7c8bee9831ae0e8218fa90 +aa3040ba694f6d71145e476a082501c1 +5571085546afffb18f062fff9746f3f6 +b65f01d171087ad172b4b37b76240661 +579e5d7c189bbe407386ad8aab99a88d +fcb9978a97e8b8cb7f38482deb8fcc65 +3e81c68ae1765a07d55adf99a54f4ef4 +3bef65c8dfab393a04c8b00ad7298499 +df651af4eca9951a467bf33745cd0792 +4df79038725c610e4c4f0b2332815f95 +5208c5bb9c34ecbfbeef4f38aafb589c +f3017cf5a96cb3f61bc94023526ef940 +22bd6df82dfbc03ab9addc0a7e48cd15 +9a20ed8b17eeb27416ce27940a0ae80d +a14103a318fca64ce4ed43dd7b7d3d2e +92c7a0c56048f2aa191978a0a52eb1a3 +2536be1ddb8385aba08e86ae42cab623 +3d53840a4183f4536589e1e1cba269dc +942455ef5eabe707131145df68a45300 +3e7b89af05948ea90a0e05956238bedc +71419425a5ebf5f8f1eacf54d59ed5cd +4ceda2e7c654f0898031dd1f4cd28b23 +8fdd7708463b35e222efd57a2f149edf +a2dd35042933e251e4014a9689583fdc +56591654162276bbfb4676b6dfc601c4 +9040695f2f8caf2e72c25113b34ad2b7 +4f022738f2259a2492e0acfa0224f130 +b66e2d6fb3432b5d281e78793aabab07 +12a71f138035587605d5f1f587fb6c58 +4655a2ec7989229c790a548c604019c5 +c47d11b068ed06f137d5b59139fbd57a +70f68d937c1accc04efcfafe2ca15f3e +15f972a4acfe8b8f0c44abf6f3fec91b +2cc71a98188a93a6cbca0a094663e24d +18201c31af277e738f6cd8b56086c7e9 +14fdd80772b1e7c3b17a01409b6c76d3 +4bbb6345c48b0a1a238885eea6c407a5 +67984481de4a645763ef22fa3f8c3106 +987878fc0eeb8bf9e57cd361b187f2e7 +13151b79dfe92ddd15d6fbdb82455a98 +18c89e9d3c6c37501b95e993f24e30e1 +098b462c6a57c741d16017e1581b3742 +3545f29d0effecff2083479c8d5f8cf3 +ea9aaf400100862b4d2ce0534cb0ec37 +e84098625df55fc8be83a446c325c99b +d5796d5ce1291e75440ef5f02f81b683 +ceeb73edb3dfc6cf9a50ca325ea5192b +c96ad66efa6e8b85fc04a207b20cc8c2 +e773e775afc8caf56ecfa52369838c91 +c51bbb2f09fc9fe4fbc9e09f55b663fb +a9480ac14734159ab11b26778a0d9768 +ade9171290bac2863583c9cb1059d841 +ae54e5efc2cca516e0696547597110cb +6011e1bca03d9f07ee3077dc42c394d4 +f3e570dcbb067f6b62a0739a554047cb +9a066380b1a3d515785c56c953cab099 +e32ce8f3a9c7b5bbe99fd40ef294bc8b +6d093f093fc81f8d107f72ebe8e13dd0 +a32f2e82617fd3af54a84a215a36bb85 +fd9fcff6054fba1ac351618075788c25 +4b1b64557a63956d3f3abe1091dfe7af +7b0c542ee2a51bf20de386a946e29357 +287f91d0e4058870135c3f6ea1f1b6fd +7acb6337d7f98d1088422a87e9feb82d +299b5d784bb238921e203bbf8ca35207 +1f1b09d000010a9579c749a2f98300cf +d41d8cd98f00b204e9800998ecf8427e +b12aaca2997cec15f5010311139a76b3 +ffebf014cb085aecdbec5992b391eaa0 +501ba5e9de7cca56e136ecd0aff96159 +797fc01320e598642aedb273885e48c9 +41a16b4ffe9bfd3c496b337e6de222bd +227e01a3739b750dd481cb1331b2268c +e950f4c4f5085667c6879c65120e91ed +794f40c009c3581ee5cf63c7545cf39a +88045d8850e1803093ff6704a0ec631d +c0a60d437691e78eef2974859e85c6de +aad2e32c0564bc4eed35d3bf353800ff +1441485c8b4219455c2a975c1d239210 +952438ddeea5cf00e87c8a413c3ca5b3 +1467353c019533b552c69c757ddb605b +eb5ebfd9985484d7975833d7e0ce240d +be95ce125038e0e87b93e10f19c904ff +5cfd0d1227e65edd29ecc076f2291a94 +ec14c8d760034dbdde003451852706ae +9d5580f7ec001902b649ffcbaad8028b +c905a383dccb6695fb638268aaa77cd3 +76f9b7aea11e41dbbb2b7c9ef082529b +37e0685d0bfbb50e64f4d62a18b2ddaa +af106d776d773f7aa298ed48912500fe +06aa1f86c793a12280e620b2d32fe82b +971122ac0f58f2540abc53d1f6b15c82 +c5fde7da3761ca207d383c06b071f022 +21fc177d9e7ae481e849580ff105a9a0 +4fe9dc543c815aa150b2c597e80483e0 +28112f13b287ae304678890f67d551b4 +6343044929da1ae04c412fce91426a6b +09ded9375ef5d351c4eaa345ab54b031 +80036a8fd2ff736ef6defcfdd74b32ab +1435973d7fac10fd314603f285397cfc +2745a998c0ef6c3121a68501c504b2ab +c1a104215bf2d47be130902dfc64a6aa +3d1086d673b3679cc388a314e756a0f0 +12f498b141bb82a734251f7684cfb217 +8cb7030f6e6382add75de6ee29ae02e5 +312d7bd4b1cd9401ad724d6aef8f3c22 +8cacad94af1920831c085ede78c0caf2 +2ac109fa4c09347aaee6753d6d7ae7b7 +8ff9faee2d393df594168ec0a0be3b1b +460be07f6a60d8d9fca3bb8ca7bd6b05 +72e6b350491f3e551684860056bfaab1 +81bc378b90737f55edb16526ba538913 +f363db1eb9ba5fc4217e602fca4b57bd +d6949eeb7b0e8f5675b2d5e8dbeac3e2 +d41d8cd98f00b204e9800998ecf8427e +7bb36e766a9c2fb1e33d280df5d0f006 +52dbe039b4a3322c5ef44871e1fe5643 +847d37b48e902864ed3f13af8b468004 +b5463dc337ac7bebc450eaf67397f720 +a1268fdd663bdf60c1c828aa25a182a0 +05d13a474ded2094149a82e75f815f13 +825c4d1fda1772ede0f32a8bb4afaacc +2eb7c15c0cd1dfe5e28e67ce62a8f19a +c9e8d701013e5719761681e7f087cbf0 +e753c51de1cadd17302e46bb6d2edbc6 +45b332839ffeeb65ba3922edd068b6a1 +cf325e3457c1df35e95b3d3526517969 +e4bc10f5ff9d6339ec715cb4eccdf15e +f67101743d4146f2c85b5fae9824ae78 +3cc4fd24665bd72005bfe457bd2877e3 +b28416c5cbfe74c88939f9880940a038 +35f483b573f37d680b0505687444e153 +6c63240f4ba30526b8aad6d17319551b +a32c7c774c0ae6444cabb18e70e87ad8 +036af8e53ca92b580e3eaf62ec15cfb7 +9bd94d9b5b64f4bb918b39254fe76fcc +0756758f0461f1829f5f56dc242e174e +0df56a6404bacdc2a58c929917760a5d +d1e6b40209fcea33dedfce1bc2cdb8c3 +638356a26aa70141192c84db2c12fba2 +bb821404d019702062496205a53f93f1 +ef82fa83db273a3096df31aaa9538987 +3a858b72c620fcd5ac8c75c599cb9e5f +17b9391bad6f9f6c3a2a265753720ba1 +e144168ae0334edc419300a96499c44b +fda9b35a3c7b8e0da55ff61669da08fd +574e44715e196f8c3fb56a7a952471e5 +e0da3504ab0ad788c9f82487d5712179 +4855b048114be00e3f214c6691c29f52 +7d8d06f115f9cda0a79150a1d5523aa6 +8bfb7179a428c849591734a3d9bc5e8b +48a75b3f10a344c246afff802dc4fbe1 +7d7614f1b7af10e803842ceb44023ba2 +0d02971cc4d4f6d398c454f222f8c00c +71a231c124f4bf4e084f5361aca66586 +5e50b32d86d4df1ab2f0409b7a75bc7d +a75be02a32bf6ef6f2273b82025c30b9 +15a5398628dd54f30aa6ee64e080e884 +c70f27490e659b726015425ea031ec22 +96f861c2b1402203461e04cc4325c179 +3ea06f6cfe4ff2ae780a518b32af33b9 +28d41bec786b9d828e004ec6ca7bc6d2 +30b5072b093c12310f0a263be96b3947 +ba1c77584073b205e5a240e711f427aa +d7ac3924b604f54013d0a6964eab4043 +9716998325c115b8d561e22ec20f1dd9 +ed158e94afd9556a3d17b327b67ba809 +acaa23fc6e1657d9b1ca3d9ac29aad92 +e5ef3f233b82b477e1357b6148df7f20 +dbb82d77c51678d159fd4fc1561ab849 +67410355d538806ddcd30cb04bc59524 +265ddacbeb524726b0cd64450f493bd0 +548772aa793ba06d1a4b471c71dde120 +f23c807daff05944eba55c8b4e6edebd +2d84c91abfbc7cf958b6d0d3f387c5ee +79c85db4ca3d43bdfc60de08830a48fd +9f25cbeef848dcb310ca2a729da1f587 +fbc40325ef4de0252ca8defff068e50d +7de6777f5fa637ef70a51c89e358e520 +53cc8462945cc5322da69e978c52aa59 +62aa0d04bd06a6f199b76a0126eb4ac2 +3aca28ba1c467977c0f3d6cfca005666 +2f1fb47d0936e4c8bf74391b5abc12c0 +db16f4ed1457fffee72ecc26088885d9 +4ca24b499feea0aa24dcfb64bb042c56 +35ae9566db96d52886d7a2f0237fd78d +c697b952f01ab6d1b33941a18e4bf240 +263de5a6bdc460db063290d21bd7785a +2ca7f566d7dbabef8ca9ef7ad2febd24 +87481000d64801883daff0aca6ef15f0 +bfa6479a4cd0d1d809fc67683c7f90c7 +80f4016b33391c15c2169f943a55f32c +33d57848f07330cacdb7bf1e18d80139 +18c80f8117d83f8f4195ea1dbc1b4d56 +9dab4f2f633d24647e0099df9d485d5d +64dc3fb1fc0c416201751831f13ce3b2 +f1f4a9fb91331ce62e70bc1a4bf3bcad +cac7097b5318ad204db63ff4354da5ee +31dd913476155fc39d51b44025a55e2f +f7212e70f285bcb4991625975d348058 +59ddf8cddb0862582aa775ce64172fd6 +9d68bdbcae3c17e6c42818df96970d06 +34644a72f7f73e048a801030e4c4384e +e2c2e922851bdff0096c1a98ec8dd4f4 +4e39410828c8277403818782de7b7b3d +2d33b2f3ed20cfdf5d8463ec9690b306 +852748529f44b75381af71cb42834715 +b9e834efa77d5e7b37a432bc420e8155 +969f383b7c137dc5bf14c6685931eb0e +5f1b058439f1ee55d15b94bbf9a81478 +65a6b22da03188f54795838dd4a105c7 +34c4fff07e2f449c387524d3574f5fbd +276260737c1d9ffb0d0ae1a38c1b52fd +07373ead31d9513564e650a137d26714 +bcf986c88fb64291a8713f52cf360f92 +f86affbbf60cf49e41f2499d19c06ef0 +3113f36281d34c5f8ef61b473873c8bf +1647ac676adfa442ed54bd70786420cb +4870fb606a5335fb6ebf3163b4ddd160 +61d1c62518e6ad5a711699c3c2484c8c +5183da6e83915373bf18fa0ad75118dd +8c092336fc54a480881a419e7770ba60 +7f361a3b2954e235499588d1135109c1 +f1b1fee1f9993cdbd4108318b33ac93d +d6559136eba595b203a7c37efa2700fc +aee75eecdabf3d42d5630fc8cfdcc802 +b94973e5a9a50eaf7bd23bfc0e1ee8e9 +864fd1327ee0744a283152e8ff47faee +4eabf3bc8e38b79e7f795ef3eeaa037d +432eca7e256a64dacc0c3803a007f3bc +86597182f676571e5116be9212c7c83a +a345f6183db05e135111e788b29a5f8b +2fad7eb1a6b401d87df4136576ff5b60 +fb7bbf1583867701d0bb43d6b57c2770 +3725a639eb7464d1d17f94bb0dad10fc +98a23c1a603838c6e27a780ae8d91c70 +52ad9fce88bb6e6c3de6188dca9f23f8 +967b3eadaf44845c4605d054caecd02e +7e80253f03c8ecf11e2f492e198939cd +0a77b80fa8d4b6760bd814e67d1c0c03 +9aab467c2c47d8c2b62a5e567c1cffde +09f30034068061d347bed61b21daeab3 +bc4d57843e3875f69edad146a997acb8 +0238904a89e4459beba2a0cfd1263324 +cffa9080f58269d6291eee30dca4e8a9 +602548b7a9945f70aece2374af552ba0 +a607b1310f97cd50d828934d6192e66f +f501cc0d602d22e5443a64f9bb9d9c0b +dd14de77a7d28d78031691ba646a9058 +9bd13b70a446b101527b43fe6dc21592 +4909b1bdd6cb44285cb2504f09b3a616 +3af4b6717462fc4981ec0193c28ed5dc +fc62da3ed345f33e295757bdd8951345 +ed07a35db3ece3982a3dad0d7c646dd9 +9eba4031aba8147615c10572175e71e5 +8c6d8111b939333633ec8255541a27ca +da90baeb8a769c91caab9b9f15c1ed0f +9fbdb46f775576474974321c3c8e1c0a +b9aff6abde3d96a59492e9773c72d571 +b5c045cf487219c3daa204edf3a468e7 +118c1f514d443a105ac5e704026c5361 +af4f50abfd45a26915ecff94c54d3f58 +294755c94e7f18a9500248d0760fa084 +5ba75edf1b0f921a70cca75e6fe260ff +12d9a4d595803ce837ba6cea39422ece +0e6d344abfcce8f723edce23c12e043f +17fd96941fd56b3d856c2310dca7fe40 +f1d901fc87d7548e1a83d3ec50a51a23 +fef802d08c48b1e879efb3a4d8ef899d +880d0aa5921aebc7febca38a443f6a66 +ea73336131cd7e1220c14c9a1dd104d1 +cb3da05034d8a69ae49fc3031fd8e80f +9a9686792a4611d96871e93a81b1f9c3 +5651c8640b2835c52128859a051493f8 +92eaf4c2960e0dc98836793dbdf66b03 +a882db39abaf8b4ed547ebc955eaf6b3 +783ab15fef590f86e180ff405e93545f +1c27c170d9e9827c372faaab15176905 +7f41c8d7e0c30d3c63167bb5cbecf5ea +b7f55877a8ce551863a707932111ebfa +48dce791db154a99401d56471156c287 +465c9c774416e2275f86b644936b868b +1d5d7d7ab422d1582cf8fd8ebd2d7bf2 +5f1b96b40d6d9db064ab2fcbf3df4e62 +b6cacb277e72f3a2084f9023375438e3 +30ba67fe8c78cdd462924a5d403be210 +128d181e2868271ad141822e0f0cec14 +711f74747c30e6507f8ad136fc3ab7f0 +9f81d3e43bb8b455867695ea5307492c +0e0e55c72222bc3b34d472c2d236e723 +e1929f89bc9ee2e70b3021bda4f5b300 +9afb317aa6a94bbeaf50b13126d1fe56 +a5b4903203287f7bf9a70d406e6651fb +a1d4be01f018075b7743b76ae514d412 +e6931c1c3e9719a010e0ff8a243c11bd +63979ca0a32c293465280de39ef50846 +7094c467aed1064e7e84b37cc65e39a8 +d16f8b9d397e42ed0255d18ba72b554b +ce4e4b1c96609d98ad588c567e756286 +10c9f09cf40af307c1fa6fa1e2ab0a99 +b6cccee2f937c4de3a50c21075536928 +f95e8d463f41b6e8b0366a7d07acc856 +fa053eac71110bd75b6e9af27808a6c5 +ccc1f81d2324bbed2e624f6f7f5865a5 +19320e5507e58346073f3d632d5b555a +0cbb2ac5d0f562056e94d29419f531e8 +04948d1213a12da9ab521c0bbf4c2278 +aacec606f77778b71f224453452cfb78 +8ad1942fe88f6daf061fd0ad1a691285 +9105c904ee70eed1b5458429766ada94 +9a4912dc12ece981fe4500186b96ece7 +b5520777d6019e521b2b8b6027514790 +6d34e3e635b6da159b976f294fd887d9 +ac432c9d6a5468826e8d5bbfe6884e0b +7d7d20a695cefc76e95cfbba5422e1fd +902e146c116c751d41cb118e1e048dae +6f7e011ed00f23f1999d44c982dd2eef +82010d8a73234d00daf532c11830b388 +663d9b89c36713f5122dd58080b457ce +1d662739229f68a020f58777f9ecedba +7fbb699ec4977e63c112150c6d4a0896 +5ae9bfba1983692379b383fd5559100f +3df86d35cb84be914ef5bab6171536d9 +f40566aceb83df6eda18903f60ded451 +748e7958a0615e0e9d912f900a536521 +c9cf6d8f1e75be55529e9b7595674243 +2833afc71ebbc6bc01fbf16b07bc7db4 +086bfb4f08174f895ff712e7bca3985b +939cb598854a66d11bb408bc5547ba4d +91d39bc8ff230a0a14fbf3039cfc6228 +1b94800572cb91d7b90e824ef5a7c724 +0d6c65d3048e460a849f86c3c9d98836 +ff4b5cbda4c314f072649b53bc7880d5 +bf2adc1b65d92af42dc4c75dd52cf8e9 +dbcde3d84b11a238ab27830d4cadba2e +c4485a95298871e4948c276cd8f2991e +e0253a399a100dec5cb48e935dd28e64 +f0c8659523426c9ac2a3ddcc182c13d0 +5c2fd00e627716dad7aa229b90badbb8 +8079080985a9e965ebe0e16c4b62a9f8 +1b4ccc71fcf04cab7717346bf7adaa48 +28639d23784c0f5150ccb4a40e8c5499 +a122a6913416d88365964478ffc96055 +ba9db83de58692b4250d666fdee1bcd1 +2f11667fcd77ef3ea3398b37ed217d5a +1b64eb05bebd96becba8b7d9a99961d9 +53359a2c8046bc39e1e18354a49ecff7 +879169c7d803255d29cdc03f98391c01 +5d581663d0189deb4c52f2a62bde5c8e +db03462a68c7b51c0176fa1eda6511e5 +cd3fd5169d7ca7f0f631ed1791bd5550 +2a68259dc7d231016a80996b80eccac4 +c2406590d8aecdb8a7846c6cbf2d78b7 +84607588735822d341fdcbf1d6ee94c2 +76669b161078ed6a07ade8e727f6cd10 +1c8d00ccf9d3eb9ff92eaa5469268d11 +972bca9bd6f838f2be1d895aa9caf505 +fee2b2e4624cabf8ddb1a1d3aa77b6ba +c10d42188a208550c04b791ab666dbdd +8a2e570b4880faea5efa86e88afad0ca +b7589d27989fd909f1aeb6e94f331686 +f7ec74078f29037c45780aeca2b4e856 +f8dc36019ddc92636005c6d13346427c +c9e0f0ab2e1a4e20fa3f6470034dd5e8 +495f9b06f68a3c695e5c88cbfe9e55a3 +4e10cc16a27741c2c19f600d4d3a99cb +e2202d471f0047745dfb57247af990e2 +e3feb843f3122fffa07696758b68b9ca +44c2ae5a2e19517c874cbbe999304908 +61bd5a3ba06f3d108419482ea286960d +2f5a9dafc4134f7d98b4c3c700c6ea1f +15f64c3963287d22f60ff401c3f4f045 +619a95b9f50517d214390236d4f6231f +9b73e1f002685be818223a3904a5c9be +789232700c2380104bbae1d191328278 +f16279a893f2b8d5384fd91dfc3cdcc1 +b49f822c8297b7ee750ad902714f7c64 +d63adcbdb642fa5e5efe0dce229ea723 +2754dee56b87186b0c8ac3830a8a8a83 +9f6ed3076b58d5e19ef52d36deea0503 +407c3e1769bada353615444469109a12 +2a9e98ad88580352061b932bb09f20b7 +74865c9f5b0baf833c3c4250d8867019 +c491c413e6f86a6477d3bc8bc04d03de +1a078ec21a7036427dfe5649eedf69b3 +512732f23f5d0c91fca1d363ceade86e +312d33d58215fbf15b0316eb0d4128ab +0781ac1c8004d1e47fec00ec3c28a95a +a0885d53b2caeb113098b5051f34f613 +fdfccff63894a202757323ec768d7eee +a79d934fffdf1c012b54ae10ec262c8e +5013678cb16a8f244347db271518ce78 +3427ae4e40d07c5f362a8d000e26b5fa +faa4b9e85345d8e27176574038e15209 +c5c6bd18438e98f8c13712a5fbde7268 +257702261dcc181933df46f572573c72 +80f4342a5e4f26270abd179956a53e21 +543486ace6c8094eeb886957c106e5f0 +0c4853f96745085cced384585b1080f2 +4ca9ce35201903f42f418246fe6b1a7c +4f695218e9e333c6fc7685c9e5d51e7c +fb069fe5b8ad641abe35ba70bc1489df +c39e156239a0a98783b551f993033973 +5ba2ab65fa19f8e444e70e9e35b149f6 +40e60c1151f3e668d9e53313f472c490 +c1149c964499b3e034b2d1643bddfde0 +8e9596988d1e9d40726dc840145b3ae3 +40e0e840f814f689077123d55b7a71a5 +8443084f0bc16d1e1e4b0acee3e3c791 +3678ee9925205b400e9c6fb6978f3ed3 +e26c166fe5784b7bc69b6eeaed083f0a +135f213b81cc60acc65dd2154c180c81 +42e3e14accdbda699c1b117d790ab3ac +820f75126dbfbf77a7c70e5cdc6a2b8a +25ef1353453a0b817ed33bf7c5e07fa9 +a1a94c7086b0defc70e8ccea2ac659c0 +b661d7f13af2622cfd6ba708d85f015c +9f6afa626af9cd40e3e52f6af1d683db +f0155cb6ca3b08530057fb9330b54302 +6c946882d0dd2f67e3f212638f54c206 +e7c21c0240e81081fa8685d969bdeb1c +ccc1d9cbf8fe70896cfd53dd889a50a2 +a2de2eda4e97a09b005bcf39bdd1a56d +0f9277baee0b863edca0ae2756616609 +d236b28ef28ba0e1c9851740051b85bc +b2958a30a9aaaf1be5204e180ccb9412 +258fb2300f14a82ef12f4a0ca6ee2444 +09e8164414477c148aac573185ed5ac2 +2b539a13831868e896d7f18f6c1c250c +50686c59b8477e29318ed825be52aa34 +03a8c348e628df4f15a0ab42a8b7290b +1bd6a78a394b27f9981908fdd94a0a38 +f9f79ccdfd8460381c8cf68ba055492a +00c60ae4dfe5b4391a6a58a2c8f42baa +d25d7d16638320468c39ed6b2153ad73 +df67d7b25245f0e88170aacc0a3f32da +d7c0ff819a62cc98222cffb518ebd305 +936b1ccd4afe0d5bc686eb492280356b +cd8889184862aa18090399b224620492 +e262d909b4577668df3afa2edf30fa02 +37979fc8fcb6088ddaa78e073cf07933 +2941b9f187606caf4180ddce0564f640 +f5a437ad5514032b3d911e7048b15fa7 +791925b3d7fc2072665d0090cefd0087 +0ad628ff3d90f2b978f3635ac82a581c +7d148e85e9b40f40082c34c43d707c48 +41cf63d9403da69087102a4ae5ffb75c +f4f63a358ac0cb56f86634fdff999291 +50f8fa988ed4bd4d18db83b43bf82efb +4ad5059fcc1fbf73f6f8cd101cddef88 +5c5c2fccbcecc15c3f78eaebf43e5a49 +421ea07627ffecc6a418a8ac1a664506 +9282758c4d12316c2aaf055f46e23a3a +ec50047b858709989b83f5a01896c646 +3b61cdf92cbd66b2fb824d1da488476b +d3c9018ca77aa152b2b7d3092f8eea8e +406e38333a02b9737a7d17e95b3a9cbd +148008039d100377fb12c5791a88c0e7 +ab67470f2debca1a16411e70e3c6d7b2 +fc2d3ed4f8ee7199377db14576815adf +7888352c576f33b5c90ebfdfb372d86c +1638f25b3664acc721434534cbe6094f +600a557eaca5515bf37676e220ee9a86 +cc6f65539a61c9f57b76bba2798480fb +708bccaf71d5b3167b062c5f1909c7ac +b8f829f2ab768154fdd0fe5f1d8d2236 +44425eb77e774ff8e3bfb9306804f5b3 +31d0217b2e8006dee903867d371f971e +95929326d434ef0bd049e3e6ab21ff06 +3307034edaf0b3e9aae79a0c8286608e +dacc99b7d7990d887942e2ffaa67e910 +7e79626061448fc41827dac12744d20a +f7e8229a271763a8c9318bac7fd2cf0e +2ae5e3d603120d52fa66430fbbed73dc +0d21acc9a406ab40dd99d535d674c7de +aa551dab754a0bc2042ce25656ffd476 +58d3fb3907cc15166f249fa119ad15b5 +95ae7e67dd1135cf1c0761388471e7ee +813dbae8f56466ef140e2cccf8d2f761 +e610563c8f693ffcfd5f1cbed4386984 +0554a8c56e73c78e83e65ac11f752911 +1f6589971327328ee16c3df68c0515bf +6b70db2c0044a33f39f3d6899a0cd1f3 +dd3059a2fecc8c87a55efe7cc53cc880 +15e26ea5034e09a40d9e1e7f3b13a8b9 +a0fc4c4763cb463ad647323bbd462557 +1abb6a543692a89081c6351c574a6d8e +6fc6d9d94274908c8cb764a4789b9c00 +0980272d9e6ac0a4c6060155855a091d +e7be7d2785bed1d39c1ad52fe1227922 +4d48d65ce5400887ff4d2901745b426f +19159b14eab65c7dfa682f4098e4aece +4945da109c19838bd2c465341dbd55b5 +f878f51c621e60675a0933ab7806f753 +b9d344090ded42bb4673dadbd1648cb5 +24bac23e91f35b3c22c72d964c7bd8ae +367827ea8e0547795b759e25daae2960 +b37dd1ca734a92d422b68d74a3ea4bfe +6191386eec12f9052def3d4e75b82cd4 +2d026fcc106f64a86825677f9544eeaa +3a7aebc4796b5bcf92c2d719ef04c2a0 +7709d3bcd4e6baf2bd231b6a75d96647 +e99de5326fb2231b8fc6dd9dfbedcb70 +0a2a60f44199cf19a1005df334a95f9e +6566f0dd8e441e12828c208771feb9fe +cc31995abc9f3c4401a58e9c2b37128e +469f4c3d42315a8cfe85590734c24f15 +2e3ae8d444fdb6aadfc6080d0e5dccc4 +a8f52f08893b0c55511a1a8af52303ef +c38fed233ac83ee22985d7bb011d8c24 +74337300486368d98c050bfc9d88860f +90eb66b09c1470a134dd9d005ed333d9 +3560e945e96196906fac38a94e5e9413 +ee307a19bfa63cea0ecd77e80f46466e +866fb80e584bb0b16abfa9771fc6d412 +7de3f7ffcadfb59e97e43c91cbec228a +4e9b1407a9c6178774c230b0becefd24 +26b32639be4a0efee004b28e2b36d7ab +233641e6d10f787c6b8757504dda4af9 +c66f76f5c81921d4c721351a46171bc2 +b816573c0c73eb1dd1100e05b44e459f +b1a9dfb893e6fc2faa872e6677c9c438 +86ca93714b475785252149fa75fa4b20 +43e732e0e405c7f83181677d94ff15c9 +a4629b6b39eac5e9a8a67d93870bab18 +07828e988d5c061415864f13dae128e2 +2ac709aaa136551bc00dc0a7498b4f75 +f14791b6126556661a8609fcf9cf8d9a +81c5651a19e7520e07eece0c47d548a9 +c787ad665bb8d73d45ccc115bda02a2a +9a23728c1185e48a60665e7f09a77c0c +88f98ab0c23811d119db30e65ad10bab +b4fddcb2bee3f5123691b3d547d0a81d +afbe879b6661b3cf4490aa11b58a6adc +ba334b97cac0beaf4de820a15f8a7b5b +b97450a3a4d544f4076c7657bf4d741c +34108610c14bee58682b0e1f22ddf07f +b75b1dbc7ba21812f2ee5e1fd5d0d1ac +1eb3fac473fa449ef5a9f7c7cb559d36 +45d3ffd8ce0a41aed76800acba97b5c3 +b87966e34f606507b124cdf4f582b05b +85e52d1c4018c2e5f4bc5702385e9502 +a5e54d2c7d4ccb4f29458984c00a1d0c +d1c74f7b763d81e0cf7e6ba6ff154d29 +de060a0494d229760554f0975173819a +09fe7db517c7be7a1d29962f2c6425ec +ce3e1a0195510b289533f8208c920168 +40cf8e2a61b2fb90f3694e571b999057 +f1d38db80ad63441cf3fab462f75bedc +6b6a4414e5e6d63fca7e37b2a4c4cef7 +36690bc162233445d8d80efb2e3fbe1d +bc455bb7e7416d9429fbf45581b16f05 +7582d4c6e99c6f95abe9650e0e31c6ab +fd5038d4878aeb91c916ef57cf162d93 +84926ca60833467d66442fef3e817293 +5ddee6cc345c5cc4b38596a4bb32df3c +5fe9ad532730f4d4bdb1207f0a659dc0 +ff0e40b13d998f85fd8127adf6a06870 +c2c8fbe47e6c0909b35bfe4ea89a335d +907073a310ab2683ed1d024c4cb71183 +850714880bfd5b95f45b362ac53540bd +c2f96ee8d9679fa8a19e3430a6189681 +3be0eeff3280b8093b699e2b8934d6ef +0a3b0ca37773f282925f722d00a25ed7 +b25136c1999a179cf832885f6b80bb34 +d86da1a8a1a690e3ec665bd58e2bc444 +b8e193d30c1b8a440ddaa5aaaf5eb41a +942df9a2c4d5e0cb90592976cc9e0930 +be9469719aa6efa2fc3bd5768bc406d7 +4411dfc21769da817be549cae022c270 +3c5249f2fbf9a97ab20ba027f7d4eee4 +c15b5d003c679b1e4d28a1c3061f58b9 +df24fa9c66a0b1efd33de33ee840178e +673c5b19c0eab46c6fe9a9c8d72467be +79515923453cd7f9de800edca612cea3 +03a5d737e4a8b305f89d9c00af336f6e +04e1d6e1f6be8e5946e29058c4ba816a +d31d4660eda23a6a2f91fa651e4735ae +8ba6895e9a912f024d710ed65b1a3337 +f03eba65d9a3a359744b3e7e80e80f2f +c2fe9bef89d05198e0058ac8cbf10767 +5e3291440eb8e99ecf9f18edc507f220 +49c5fc7e131ac465755a868b1264b2c9 +3619c739584e4328b17ba3888ccae9fa +1498d87906ad9f3c89a520b670991a31 +bbf4e4f42c748123f97e0b911abc4101 +17946e10f80965651676a2effa8e6bfd +e5416bdca358655d1111b0a5fc018875 +1708e92a876e363af38ac3d07f666be9 +adabc298237c9957d992632a3e57ec2a +b3c443e2e77449c46d65f249f670cb16 +eccc899fb5fd843e5f1aab0a8eab0e97 +8b3fcf46b1db902313b24bbfef322da9 +06677b2b0aa7fdb2054217462bd74178 +a5b8612c2f1a6a487f4575a4ec06ecef +54b6dda339dbb0dfb3e31ba80a4ea65d +afd64030efc458bd94f99754c0a75937 +06529e5e27b7f39b735366453311cb8e +73dfc3add3e688e1b2aef14349cef68f +82a8ebf740280004d41d8d369ddfe22e +01bc4ea7525228339d9bf1acfcc45cde +63718efb29598d7a98cb088afe8cc2d7 +f2eaa42fbd1005ef619f5b0e7f04d907 +d93a617cdb44d96cde26aee9b9173b64 +e17fde92e5b67f253d6cc2a0a68ee497 +163b9ebe600874ca0f9f4f4892be8218 +cad3fc9dfeacb0bf062776b0d09d71d3 +02ae6c0d672c27c4f5c5bf3187252df5 +1b63e68404c5b96e701d6cf276e23925 +9206be8cbffdb9d911337ece834bb19f +673741ffd40830244238455eff1b68c9 +013fb3d2c6e688f6a7f8d8e310d54ea6 +5a62f39f8bac1f5ebf64558e50722bef +be508ae76dbacce12ffe3845921ad366 +ffc3e057d3c6fd7dfe22b517c0a5f460 +436a8518e3cd28d1af1b6c45da2d1b20 +830db50fe0efa4b4e0eb49ccc27a3d6d +231725b83c8fe8045c6820a61c64adc7 +e53ea8bff2e36b3d7956824b1172b037 +e229ec3c2f98b30a38e372a9925410d8 +c52aecafd67619a18416960a69b4064b +187d5b402f530bce5af869b616c76178 +a5310c7dc053ec1d619438085b2bb4de +039cdb6eb1252931ad1d440cc4833d04 +8769bc4dbc5210b1b68684da3efaad3d +4d8be5dd2b8ace90b23cde1d5a6e1824 +6e323bb70a1618bafed880cb13abeb0e +4627d9330987962140caa27243e0a19a +077c63d9232fda5f89dd66640fe9f696 +d7157325c1cd2ea0a1e5f7746fde3e66 +5223d0d2d4c416fe813a9069b963e7b6 +ee2957f52a9a1ebc084f3efd0356d5b6 +24d5024366fd70f1a38a30a55a985ab3 +57c6c338981b328de32d307b5041faca +afac564e2683d7371e438524e7f54aa6 +1d14af645f6e3eb9d6dc333aae31d601 +b8cf0ff0c7527c084d288cd0a69f0bce +e9eceeeee445e69b09f5325344dc4193 +c834519bd7d6bf9dfe3c142f31c89770 +0eccfcfbdd494773a3edacbeae318ff9 +81fb518a04343acff27b5466993de1b2 +f587adfc661209fa8a7a6b6f1eee2229 +a82450ddda326fc6b441872cdf25c5c4 +b209548b3e24695e802f2ca6f40e9b6e +d808888c79c4410f4ae5d9651a3a0911 +ed7846417ed36d78f98420b2594099eb +846ba1462d1b03210b176734b229be2f +76c9478b5430c1d4ff6fb26788a326b1 +273b29402146de8d8e434ddd25f950e3 +6eea1f41443f84723e206aa11527ed8b +9e273e0042b30e1e567be91d527c5d52 +1de6cc8795798019cf5b7915b995859d +c2091dcf653a2c57c043d05ccf79a317 +419ac155fff72132d1520b61fc372217 +3f5358f12ee26473c2cf64bbad06a1d9 +2f77aa76ea2d2c320b96c1e042a7f262 +36b92e925b56174eebebb4fd3af46377 +145883609839e99b76480010f2c70980 +f299266446739f624a3ea4eb9aab7cae +ed1fa250c86b3791bbb4148011280889 +f689611c0777b189528d43d1c52e8270 +883545e60f715ea5c76ad1b56fd2e0f5 +158fa6fc928fcedabd3c98f813dfbede +b29ca94f244c0a2562fd45ec65117d87 +a6e1502c1ac78f0a5005df81bb7d3c72 +7da2b139af813165f735e15504196b27 +ba2bca2510962a335e1474664f89306c +842776ff5dea0dddf9c3dba334276581 +62ca28a86b6f57a167726f370de98be1 +036dfa5a307083a4d3fdeb05857cf9cf +5c30356d61627f29b22c5e06ec32d6ab +6f4078b0bba469a2a8e107cf5112f7d1 +3d9a82c46f98009c6be2cc0faa30a4c4 +f2afbc2bd73e1f57fc9236a0bf6658d9 +541b503cbb36c17997c086ab58c5c8a2 +2b79c2f716fbc7341d4514258e70930f +455a112a5e40610e9bf6ef00ff469d31 +d3e77fd91b1ae241c1d2a663f1061da5 +08a1baf3d3049e58e5914dfc2607d676 +710a4cbce90c32e37f59c0dfdec6d13f +4d3d49400313f0f93a4348c8afc62fe1 +170e269070a75c871d5d62b5bff5a222 +61641a6546c4120a3c5a969795889d92 +7daff7b9fcf625b051b5a22074e35037 +059d7d6cd3d4f813ed7d834a560b0456 +2f8e210ce33847e2d2543a5ee8e393d0 +d8de9e747f3cfd4ca800e42c706f1c56 +25c5ed66b7ddc39a82133e44d45b7c91 +e3eec04616715e96afe9a79397ce6254 +44c333686e5e0bf3388f1b1435a0c632 +7116dc93db27d87a765fe170f2a78ccd +5140a49652595180f00fecf93819281f +d64f35b386609d7d36417fe5d65ddc91 +df9c725bc42797a7505de45c69cef3d7 +619669472a44e549aba89b7db278fd02 +4401aa34a7f099069709aa458ef7417d +2f2d9109e62ceff4834227bf9bf85637 +7635d7795f8d86b03ab45206848f5126 +285e5320f6e062ad421bf69b5c1d263c +dc58acc116a819cc133ae897a9520173 +a8ba483fe8824e21dd237ed4cfe97d31 +6c9f9e7e6e8e55c5bfe9f6f442ae2d95 +7e2303a5b43c74aeab99b729e7da3a9c +bd7d3322edb04e382e28b6208dad9ae7 +aa2a2ef9e0eb580fc8aec78d8c9f7a40 +6c7d65aa1b8c46936874bbb3c150a285 +cdd8a7ecbbac2a8f3d1f205338219e8e +d23a21002bf1cb18d25ae515a22ea4d3 +b8afdba7aa55dd52f680ddb45bb51c16 +e746a70844ee17fa5d89a4551df74165 +f45e2fab33603826706f6b7ef78c6768 +2eae5526e08b2d26f6aa372acee62ddf +65e2fb76ffb25e54b2c6f6a5871304a5 +c43a356339e9c8dac6dee6108f5ff551 +18c28ca30e133b6d40a4df3c8f6f5995 +f79d4e705f5a0986cf0f17359c03cfac +e8ed26b1b29dada57ceb318cee243d0e +70f2a9f5eda7537fb117fa51558235e4 +3b88129a9096746186732dce8729f03c +874585e1076c8eb7e29b34e49172be69 +e73a9c8d1d633733718487aecf75ffe5 +02a10a7d57bf1225566adf2d36682c20 +1d3e7ec268f643346d7dba75d24c67a7 +29f64868b6143c136470d0d08e594baf +28c309d3e532d440cafe684ce532a549 +1e8db1f80f6a0731310cf4d4878cc483 +a77c094b8e983bff61251eb879523462 +c597217ceaf8e3eaafb5d96795f6905b +53177de08ddc05f1b6ef3bae1bff6c51 +d41d8cd98f00b204e9800998ecf8427e +97a8a6730314abda0d36d15bd89a82f9 +8e2fa162024524c0682de92c40bd1c98 +ece0e93eb2e5883915ed7d9217279d92 +63d3be7315f1216bcccadcef7e174a90 +c9d502e8c6730c238c176904dbf02127 +337f2c4053dfc60ad249e5e325c3fcc1 +736e60c6498a72aae23ef74450097b5c +57ce5106101cbfde84b66867aad7ca6a +5ceb980398f64b4bef1b27c039a18383 +e9ec73b30a8bf84c76ffa1d285b18640 +c3245a6e40f53873da20c7bf3f7a8fe4 +c4abd2192e089a53b444f3d816ae4ba0 +fc03f7572d44187bbf3615ef090ed9f8 +f290b89d98ef6af6572fc7b0af686bec +fb8205612e55efa78a7362100df145c7 +7c75667c367ef91f92d885c8036a2dd1 +7078308a69f8f75d3fa2989eb1bbb28c +d41d8cd98f00b204e9800998ecf8427e +602e7740b62640f4a9d365eca00f61b9 +09fe402465ace4327e2d1df594efbae4 +dd5191d0cc32442a939d3c730d8d7d1b +fbee8c1b708b3c8eefa477517ad6ac22 +45e04bed69a43493dcd175cef21cf0a1 +aeef2cfb73596bc60ce2ea7970dbb842 +9121129230a6b2a01a5aed288e7cfc8c +62f5853289420fd4e182426495469d91 +e184a3afbe1ebacf56169c18a0aa1940 +a5fbc7bad1bd76b96472cf9bf1aca7ff +525d42b2936e8b5e975d80ef64a0ad14 +674e516b693bf1378d4e575aed3a1b45 +130c16f3c9f16dc331e85735db299dc7 +5d3c4ab8057979dfe2d42993843330e4 +7914f2b356a89cb1f7fa3cb70473080d +a529f6bab62f052edca66ea7a75a159c +d21edbcdd48c96c213e5f2610303f5f5 +3cee85c2482e2c1e7bbdbc8d5c6ef071 +3ab74d2eb950af01880ca22068d363a0 +f48ee7afdeb40e4eae188b6546fe9533 +472247fb3ba328c09aecf722b6fb7226 +4396157e4134af62b899603bf84a877d +4e40a5ec58775faf3792dc510f7df5b4 +1b27d4f77e981b3edae64bcbb01b9493 +27bad79919d58ed5855d9ac21e727d75 +0e27ae61abdb240be729cc5993f9062c +8d5b8bc5b3fbfe717dc93f9c9534e0ac +ac937bad15c76147d0a5e8c48ebb2c54 +2f5762e8aeae63eaf7a8b559819d261f +540a36e7e0de6148cb49d74d75afafa1 +54f4abbf98e57af8f00292a8e765370d +0ec0ce7e452d38d2599fe2ceebe31a97 +4f1235e055ed7fd08a517c82890a7056 +ffd0834ea82997e0a01cd396e6380280 +7a3180c94c7ec6468d82fbf9e2a7279d +93257d0579174b017c3f6854a15797eb +19efc67c11487676713bb80de3fdd4a5 +782db9da8d2c41958c2795ec7d97505c +4a7a8e391f2781cf83b7aed0a00e9c46 +0001ddd02f5b61915e94ae9736f88057 +1a36f75907f844ecb73d2975ae685027 +42a661c55b1494307cd67e73c1fee23b +006c830bb67d5b8dd61f87ddf9cd4607 +33c5972ba5e98a7fb9f2e05f9bf98c37 +56521902e28f564d2f9fe70f427a11b5 +7b12471d5d3fe3b1c6465b88c67811d6 +8f66c004e40f0d9544e8c7e5352d7f53 +cc4422362ef0f0d1fb0161482d717736 +7e87b58ba4e36ffb04d1b0a0e7eccdf0 +41e31d915f4b4fd7678c3ed39477d16d +a12530b07c6ad4f5e299cd4d66146719 +6e0c2fc9953fdb791256cf681dfa4e8d +f682236bf243aa17d08cd2269fbe0b42 +c6a1bee94729b38dde9f608af064ee91 +51f4cb206aee1b6086cca6cb88586481 +f55575c0e639b158dab393fc1a2a2821 +758a0cb5f25bd4002367bee44fd5904f +331524d6f29a189be6d03ba342d8f185 +72047dde350a4a4f69899fe1b6ff56a6 +e4e7cbf20011ba853d7f19d23031519d +99d2c1f0f42f8f5c57b07f864ecd654a +5eda3e801f710e77daf176957f7649b7 +8b279982513c2f892d600131d0801844 +5a9a8fff968a3f96c943af62b10e80a3 +c4ce736d97556dcc3c180ab3540b034c +d41d8cd98f00b204e9800998ecf8427e +0a956d71668ddbd22b595194c8ad06b4 +18c96622bb9ae1175450a06bfa01569c +4819b8dd8dfd0cb7e10021f3ea88e3fb +88f4c3384e5f65ad98e14a102208a5c8 +39d833cda2d4513f19d47caf26611e4d +9b190cc2d3d711b38da15e81eecef6b8 +4e218c68d13f9bbe04d20f5ff5f2961b +83cf4ecafdcd9d0b51602ca9e584f728 +62ecd647528b7a00d45d219c29c29efe +d01190db7d84360bf1f1f909b11b86b0 +5ba081c8022beff1e440c6e126ff217e +1cd4db0a832ee94c6faab490020de1e8 +eca3e3f52bffcde84df927712a74e7cc +7c370c0cc5e24f0a63267d57924fc4fc +4e0b3a94ba52d61d0a7981d70411db8e +4498373c6bb417f85522c916c5156f32 +e68c467fffaec526ed3004738e437a71 +de979200a23f2d610d6b00e6ead86301 +67e1a319475dd1d7f3fb78a3ffb55227 +7d0d013cf75d148c6ea0bb77c117a75e +5196423778cd8fded2c39a873578f536 +188da6655b338daf88537d8cb708b0de +d768b1ea05947d97bb55817d9e0782be +4b4bd89706b6366154b2c88d03cec3fa +24dff5acc7e4b89d47e4c690d931909f +8935b4d6d495a6488a7b2420c278c1c2 +993539ffa04dee088963276a79832a3e +2f033f1169473e7eb12f01b9fcac81ca +eba431e41b8a8c7d87327eaed4708a36 +0a0754abb47714d1921de1767a2180b0 +b3a6952034d88fc2a8c00d440255c5b8 +abb95454ba58261f1bd5d5d8d9a81d96 +98dcdaad52d04209d944cfbbb732e8c5 +b9440145a44f8303f92fa721c2fe7614 +f17933d9000f48122fcab89a9f8fa95c +7083ac1a76cfafa368e2ff8e99385cbc +38c9f4a086c9c7475d20809fa9df1cb1 +5d30368d1c795ef2f3ddb4cda3ecfabb +1cc055a881f479d6e9246f0defb3f436 +8202fccdf8485525dca0c27cbd1e251f +1a990a90f2261b6fe556d505e38385de +157bdedd1886a38e72437ba620020528 +6d259214baaf2bcfd8e9a5d09d1d1861 +ffa88c345f1816b454baa72df3fc31f3 +1611c6e835c52c2286f929e17a195d5f +3e222dd2c5fcdb5e9b8188c21cd1cf1a +276aeba3884b6fc1415570cebae0607e +aea03dd2ed2393348525329e4765e69c +50308bd790cf2a4a4bfa31ccc85379ea +af10189e5d7b4dc3b11276af992207d0 +de384d2987c8a1e863a951dc13624d35 +ca5fe74fe068f405215fc74c40b4e402 +2212cd96c055ebbff660a497750ca59c +72bd3776a666de5480d5eff4e21da294 +cd5363eab1ad2f627c109074188dee80 +2141f98d28158e19e4017a72b597feaf +0e5ef57b0b56755d11e3b562839ccefb +823727dd5b1367a8106d821b4f13a3f4 +637a436a679a0e5f749e5a6a7ff7e1dd +2806f28cc9d16c4785c0306be3e4f48e +9090cf67b72ade3bfbc598da4d6dc09e +9920f3580fe9392a0309d6bc0e0edc8d +36558a5d9838a175e2268217608dc0c2 +c790df6dd6f9789707668587414aa9c4 +f05c6b0c8dd7b3989bd088ec4c639cd1 +74f8c07f190e859a5394c110d96605f0 +789c8abe1f385d658832e75fad188f23 +4e909610790ab63cdada6d2767488ad2 +94e20482fb1b7433f5d56c27d3c89206 +c9337d8ac178ce47372ad9104113868f +a69edab4165e1e608867da7e15b226b8 +ec758e39b57632a35cbdf28dbd0218df +88c20a509f20d684d0cf8da47358f9ac +e3f6fcdee6f68ae154a53df59ae68eb7 +2e3b5e81c90a0914d1d278d8de0f1133 +31561abc5bcf6bf5e91aadcecdf2729e +c7b74ac88191a2038d420176195780b6 +2c1193e60fe6fb6c27d332d1382baa33 +0540b3bcedcca3cac5e0ed93ac4d37b0 +e8135c0683f9c4ecaca16f7e6cb5f93e +33403cd90ac1ec29299b7d8be007eaf7 +385d310ba6a72db2d1fb5027c739694f +a895130283f4a81917a23fb048e054ad +10f6f6c3f1625309154b5f4e6be7cb0c +4d5213d93d524878e5e2f6d90ac21d28 +329f4b1b823b9225085028a7ccda77a1 +939c689d69e807bf4d4382a423396255 +c6bdb95ae39dd5d0c9da38eca8602b63 +10bc4ea1775cb5d89a36d6e1ca4b7c14 +1f3a44704481685dbafb3e579b5d3978 +d41d8cd98f00b204e9800998ecf8427e +969c1bc0a9de01494ab7d5a42b0a974a +b015f77e1523ca63adc14a3f1a2d5cbc +eb01e6972fd2d642689f5d3827fa6fb7 +1f2d06e041b9e480067e5b5c6b3fc21b +e4e8d7eeb7e515a7d87a8442353cb707 +c45cfdde702e22fe6e7a24a9ce39979b +6b692f0d6a7aec63c00d32eefd6466ab +80ca418df7604f6c251af921e21ca35f +c4324e510d50f74c978c61a385d45594 +86c0ad2fbc28a11d480fb59f9139bd66 +3bfde42cbb4c17d581209cbdae1e9d6f +f837a2bd92444e2111f1ea8df121fa0d +6b08a61a880849c935f4f9f69b22f52e +133191cb2a556d4592b004ae633be70b +92571c18fcbd5662d9065f9d4b05f0ba +a82f094baf7a006043cd355578c11034 +c4103df69877142fb49c8e3feb6eea46 +236306c7f43a1e78744fb9ab4ac6c896 +08c37e6f7d59ce6c6e4096455924bfdd +b2119e6f6409264ab2304567b6584c19 +c0feb91bdfb90dc0cdee121295a47b10 +a3a70b1f454fa517ff4fef07b6a3df5d +ac0f5c154abf85fb276f2c048c8cc833 +58b54c520a88decb53f53ef0ad29f4a5 +89c052b6d81c1f2fbd44f310f46bd400 +ff8de59f2b4833287a6f0e78370432fd +640d70c72f7f78180f033a7d2d20f41f +7145557ab368bce363ba9d468d0825a5 +a3fc07bbfbbb26c8a17e52f70193d5cc +8d8bdc542900f0fbe65d4df33bddb818 +7b605cd0ed194a386971a2c76a0242f0 +94b5b3502c6059dd169def9e803361bb +1ef34c3ba3579a518cdff9654b31ba5f +726e93b10942d29ddfc8ad8a5f95bcc7 +a357fdcd17f47785554ae92406f8333c +6082f0ae50c99b8ed7cf9952603498a5 +602f1e636abb9c1f0b709b4106752a8f +f573365f6d7c9038df90e3fed76023e8 +7553e6bc0733134f80af4c279b81e9fb +6a65982e062d713a725eac6ca3189d5c +5628da9b1d5abd89a70e34843109b0dc +e6c4c5b31ad7bc24caf88c97033496a3 +c09e8ea89bb7757a0dc78f264c43a022 +939866a3189c73506de64562af21ce8f +f5ec67daa330a2f60dd56295c0d7c0a7 +2532ad02034895b62d5e3f7656b99bd2 +e99355b3d5040da46f5053697f0afc66 +968e33d5e886db57328124b018d25cb7 +cbc17c50c806de645d3e7147f15b77be +fe7e27dfbaa37ddfdfaebc58514927f6 +ec639292ea827e4ddb65786aaf3f6c9e +c390fe77eb876cb08510e4e66890ce33 +abcb52fc056dd9f9d68a21aaaa483fd6 +459e01170a669df337cf3a7d0b9e152c +3129e9546ab643d10cf573e138c7f01a +f10b7cc9e7e77559007c3d6eb149a489 +f3167620ae797cf62cb7f055609de4f6 +b0d9aec585baa36397b5aebb62fc2c90 +586ad35f08acbdd5e6f16f9e9cca0591 +ac0dca788e0061140689a2a85b348aa2 +07725fcd3059147c94391addd7fc9036 +1c804a137877a543137cb46fe120fbed +1d54bc8bbdda19d44eaddc13153daeb9 +79692bde4e803c99028ed13b9a20c71c +e2dbb5cd68053dc53d14ad48590dc487 +6ba7baeedaa79d95a001cfe52989893a +4e89b539fb338d8fc68c851a4988c41e +868f74d4cf2764684ab49d689d5551ee +f6083ff32b5e32a42b5839b8b8bf359c +22663efd4a322036dbf23ea05cf0f3cd +bdd32885fcfe8cfe2f81546afff7251d +68a77f96764650f30398b11ff94f6a7c +7bd24b5c1124b1d703082d2c3d0a568b +74ada87bab26893ad01238ff854216c2 +beaa387024756852a656bb400f2bdfe0 +348b03bc7e83443e75d37135bf27867b +bed7918c8002e7f06e1547ed25cd6e48 +1cce866b69e92a40dcf4799a1bb24369 +7fe22101da301959eb94089c81d2c575 +494242a26be35edb02361104e6b23b90 +8592f675a816019f79fb7cc39907f493 +323d9b8ad0f423e4e32928db81eef159 +830296b674205b25dc10664bca927cc1 +5e405a0e9bdb78df171dcd60557e9196 +b73a7fe0f20f6e8da25280cf541adcbf +b2c5292b3445c3698d8c70fdc6ef9abe +e4bbb379b8df82271bedad79dd8af2b8 +40dda55b0ac7a1bda0068f8393a6b879 +b68dd7210a67eb778faf3affad3aedda +73806305de20dd837dceb0979d0a5ef2 +2cc2b2d79009859b9259af56c7a92404 +34b5c98a316684bbd7f2e81be07f3c01 +3b9cc788d55f0bd1671cab7612f192a7 +54ab8e644bd148abff3eef20137d2a67 +2ecb6789ff278e498381ce9428a74689 +ca4bf67ae9e2fa7a11ef78ea75591726 +90dfe5ab3ddec301858be01c89218b76 +bc9f0bc319b27420eb2280315e47c9de +eb07bb5115d89568a1e2c07c73bf29e9 +ea88fcc9b7f7e07542eb86b745495a54 +901d3b336a6f91b504a76aa59ce9e8b0 +3eea2b0406e4238efe51b50ed07a8af2 +f9189b71eba04ce2a531d3be76445f53 +52ae47111cc1b6c49c6d74a5c85178a0 +fc102c1f58c8c261934b03bfd86a5860 +df573074b46388abf7a6d85311af0d31 +89a9bf16aa0fb4cb3ef2816ed84425e4 +b74160aeede8fe10ad82d7138408aa92 +a9c78f767baa8b6c47ffdb626142859f +05368b5964bc7240857b574768d35202 +d6091c9e9f468278dc35fd54bc33c890 +8c4ea3367866c4fb9956b65afbaa3a55 +8a0b4d4de91fcc4fac7b51a3242e2447 +6dd6b5b98b91c976450dd1255b91e220 +e28e60884b2c6b59209c93402cdea067 +ace7fd136d81977a8f5dcba91a9d3c07 +22e5289c6d955140ae835c698239333c +b728b50e5e25fc246f0780cfb6b2a53b +8005c464fbb83ea4bead9ef92dd6bd29 +72221006d93f64eddf8c0042e3a56fc9 +d8d17e2ee3fb09e259385dc4161a919d +2f35fe335846e630f8fb53101f272e34 +caddaa43eb08b6f90174c3e6629c7d5d +3b2b777c8fbad698524c7808c9b76d63 +88d0d95d33e0394b8d894db0b9b93a5b +eb9356302191c9d9112cc9e26a102679 +f239bb983942061876ad6cfdb4af597b +41bfae21bce313f7e72707231dcfecc9 +10331f5aa2ddc3ebed66669fb4c0f06f +d41e3711a573396097ba0df19a441676 +082377f124de597348da2aec4ebcb0f1 +b79f1d4fb660fb13fa8743d9bf6c1e05 +c1709c9b8a5f41ccbfea63faf8498921 +009b81d5384a3df7e6ccf5fc4d3dcc3d +c8de16f6b3f11e6b610103b260e32267 +24fb5cd80fe11627ddc1a0bed5e8ddba +6c666425a425e6e3b140587aff1ef0e5 +9110497cc09777eba1ca05f82674beed +e7165e5ca77fb2c5f6dd37ea91e622fe +0d3de6579739541f457d199f1f93e0c9 +301dbe3da485e3743d13295c4bedf15d +de81b2afebf99fc165e6f6a7d6e100b1 +6ddc7c4b4373cb7510ac6cff3d362502 +9576f0edc821d674187d13523d79db2b +c3cddf960e7b1754d2c3601529f4e40b +b760d08ecdda0fb899604b3821a5e71e +f6fce42e0ceff4d44af2ab6c3bc434e4 +8a9b9e58d746546f6785e0fe5c841df2 +7fc626cd03c6d66059cc6295312bf466 +e5df5c7bdfab5a873ad46209237392ac +b7df5d17a8ef9bb8c29d28c3059c8956 +d70a1122a2dbeed22bcbcc868a9d4aea +bbed738387f2c48d96da841cfe612e3f +9a85b189ce6adeaa4d0a243aac02a85f +fccbefeabc27788594873fef9f7f5c0b +bfc2a791fa9a6adb51730180d8fc4d61 +df75d55ae79d90848d331f043f46da94 +4e7159dc22fa7f60799dd41000e47b58 +5a6d47e536935fd706d691bda183e08d +4284c657e48111f5cbcf834e3098ecb3 +4f8dc069e8d15de56b0756f6b0a64369 +7bbed8a381752e1db15f6a1818c9d890 +760a82a6f45877e4ff82028ed39aa908 +c7fb41e35f07281c47fbc6905575dea8 +4a4f161085bb0e752562ddb729dadde6 +8a76184b236341f5f42ad77d2504160a +2ea2afc519ae405c531a7894392e87c3 +f220cbd294423b9088b8f9280d44afe8 +5811583721ce4a4c2f3867f8bbcdc076 +5bc961cdb6b0ff06ffd7ef680676093a +d5d4f7cecaefa9f5fa1875879f10b4ea +efc4ca77f73c22a36e87b2c61a23cf4d +1c32bbba50600b4593368736187f619a +574723b960f63c52da97f349e8cd3f10 +e8d6c8678844dc55c5dc6472e9eb0cdb +4cd93ecfa096a4ff8a329ed67baf801c +58953c92246b79fe5e490486b19ea633 +a03ba481174e489580c6c122e7d45d54 +2dce5cdd52c6c074bf06f73ba30c81c8 +a1fe90b526d4e5f4c673acfbe85e1d72 +642b02b33bef57d01c7698a8be86f8d3 +c5e2bd10071294a9d32bfb409a1e9c20 +e79604c478c0523724972a936ad6e69b +866773852c991da1c7707db2cf9132b4 +f6263a5dbb95f9d1b1d04f3caa6ba805 +2257ce94e17a1c80ddef1f303e28723e +11d4992010c407674ba7d6c6af83528f +c1eb277232d2596a2ceb62e466f2da36 +add917be60e4dba3dec4275b7b6de6fe +73ae40bd1760bc0f2c29b6013f587243 +7d114db66f6ca0edfe296310ddd629b7 +9a8a3d99e8a09bb0b3bd24d59413573e +1932ee0be6c412b30e1fa7cd114f2538 +e3e0fd4d7a9de1ae76be14a45a25510c +b6688db35a3f569182d039ac9247a172 +3762c55474d0040df7524cf7862ae7ad +b2757464316ab8f8104f0d28e229cdf2 +14c1344ce2798817ebd66d5e8640071c +8f5b8422b03507635276a26a54e8e757 +a65c7e89c0700e2d7b2dcd3f4debba35 +75f696c7e8c829e8c34ece5a353ff6a0 +22ac583f463385bba74c420676c7c418 +029bc7b4cbae932623650368aa65e51c +c8f8c2d78f9bb8351b0d84402390996c +691b50819795db9595951249507297f4 +768333278e5b26173ea3446be597add0 +461297f1147867e59a28af8378bf7361 +d9ad6953146b97cbe57ecdfac7d0072b +b6ebda29dc94b7cd2513f859788895d7 +e6f2de877e433ff5af4d1d3b091d306c +296504fd41f47e159d769648c06ebdce +88068b367cab81753b72da2d14c66c93 +61a079a7f21cec1c90df3872475014a8 +ed894a80168234da9c3ed1fd1910882a +fd7d5114bb083109184f258c3d91733e +b32fed06a7a30c9458048b7fd6c7e1b6 +18167793391010422fdf013abf500e7b +926f648787ca65934ec85f1910589866 +f9a29ee6c973355f9a0d39839fb30b87 +68f8288f55ebedd8e091396004aea917 +4abfe4e85ca1c6936f6979ef3dfd8530 +e1da7c6855cc529e36ccb44dc7a49442 +6886688f2c612167306a0f1a115efa85 +4c96e85ee57bbd19ea962744454456de +b4e22391105612164e2fd215355a8673 +a4048424d891b6b1e1e699fdb774bb71 +2fe2a8ece0021b318fd98125ddd579b1 +fee5cd43dfc0f2ea3d7442951578a0df +fbd3afca19ff4ba98fca0c2198a65a83 +ed7e7308a031bbef2160f87fb86ce3e0 +36dcf70a8ed97a700a1df6cc99884893 +32422c1a69f95874e2a8ff46d622f71e +ed759b61993b77eda1b086ebdfffc0f9 +2f75c3975ebb05e63f1a44cce3510b3d +0c5140b4ea14a5c67908bcb0346fbf8f +6b7b1402606c01fc5c64036e8537a445 +5b4c2e5982a72b06aa1571a0aa7bbd8a +96fb6f21b885a10b469dcf5fa3ffa22b +7ae78ebeabe4fc9faff3f50839c7cca3 +5b72be0d44cef72feb7a3d3f0118e67c +84cdfd49a3fc7b2a167e2c6858584e23 +8da436dcd1c9363ec9d58ce407364c7c +712a3c44d440fb87df50253833522b43 +d41d8cd98f00b204e9800998ecf8427e +e3545446cd4af65fed53c322989e1324 +5110dde7769813e01c17deec258bf463 +c6af50c71c84cba5f5790fca923a43b8 +3c9741af154cd23a7c3f8839970d1fd4 +ed7e0a1b88527de2ebaa0feba81617d0 +55d1b2ef749bbb2ed43680fd9edb4453 +ba8cd0ba79694ae2c8c3801283684bd0 +bcbad1eedebf1f34e89a482d557b23f1 +da8bff83d559c6a3fff954b82eb59c21 +8c8282c9172c224b78a11eb2ed15d847 +ddac1fce45b90b41e5b5610e95eb5338 +0a1520f5cd592370aa92f24ada273c3c +3ee21d3d11e7e4b2575177d910f62355 +5acdf3b3bd900baab0705beb6656340e +22e71eb092855ce7dd85b3f1808cc36a +a39e29a3cff74431917ee870a01f5f9c +b46324e50da91d3b2075d806380b74f8 +a38a403d1db7c0ebd151ea063cf63cd0 +dffc39e8b7da5d2c1537df3bbec36c29 +fb005bcd9103415894bfb3b9522c6d06 +a17611775cc73e53812f3fb59b903cd7 +dee3106fb5be832fcb5cbe68f812267f +45fa998119db3f62fd75cfa6c060c7b8 +5c928921700ad05c97b7e5361f863fbe +79164b63f6fa36419f85d2b412eeb5c5 +808ba0d0ecbc4caab528ccf70ec32451 +787f76a37a8b700617448d39f07005e7 +580c709eb1fc3c91cfffa7cb29e27625 +cece116123523dbcfc8e30eb6b997aef +cd0a3975d363c6bae6cfd009d500dcda +ef62d7e27e5a610ae7294e4016b04a1f +973591ec4d6f10baf79743e3e8a361e1 +ee8781e78afebc6d9d7bf838d5ed9804 +064d84f4191b9962171b6e6e8c8b86c9 +729e550f0ef26be0f18013af0653540a +79c5b0849da364d4af61995dd45cb409 +2e0520a5df78f76de83ac5fba174f512 +8fe18a9490f176f6898f665ae2e9fc6f +584ce6ec4c35b3874cac3f066aca7626 +d41d8cd98f00b204e9800998ecf8427e +6b9de5fab007c4c0a9a51f000544ba98 +842e8880ae8747274f75b6a9115c10c6 +c2c6984c4237cae262f80dd4a9bae2a2 +fa3bb15742366a55462806a940119054 +617d8ae2a11f835724db24046810b82f +311a9f4c041fc0a56d29545de621d2b8 +020456a0e30ffe62f63bd38786a369bc +942a43ec06a1bfcfe5278c8e76fecb13 +e6f9e9296e655bf3456d523d776706ba +c51b15d81129d8948a8566cc8634ee3b +2dd02c79399e1f22497807fd5c75c443 +ebeb0e0b63855644cc87db254bda8f05 +5d0bb1e4fc9367f0b314ae3b3707764e +d74d4417447ddaa99a0c68a16e5c82ca +c603cb2131f1403d92904172decb1df1 +bb985a7c5c102078a7824fe54d8bec68 +086287e38be751f51302078beb205d58 +4c66ace3d90918bb4eb53ec8957f8d13 +63a3bc9d017d90e358ef3174d288fd5a +0def4092b28bac86388f554d3a594f96 +813a69929b73ebc701a94b4c109e7d4d +e1e04e7aae11e21ae99b4e425aa4afdc +81c2d674999b4f589acf4cf8d9757df1 +1deec4fac809b4191945b8f345b22c9b +615d28774960191a17e63dc9120b1424 +5ec0f861876a67b28ded6add3459023d +65ba21ad8539b8c31e68f57b2108b392 +2fce06e38cb4621fa25d6464887576ba +e513d214e6af0d0cb60d79a9b07ce945 +594048eccc11350942358b41dfbe78f8 +117ec30643a8170d058cc4581dfa5f63 +ee1106edb0be8cdb8245cb10af0f9a3f +686ea323eb2858db9ce088a021993bb7 +0ec41cfeed12bb4a766f58bcb7d50a86 +995d7fa89f2623cba76ac418caa35a03 +d8de0913476a0afe2e927456ea5e10a4 +7c7f0b615d18d97a3a0890b4e10e8f3c +d2141580c54c790d29f11ea85417fab4 +6abf4f3c45de08fbe2968dca3fb98ec4 +f7806bd589b2681fec6c784c81a8fe7a +542a5ec70662c4aa20614ab460b9ef61 +20137c05bea6a6c36b01fa30dbb40df4 +8894d7ef26d046fbbf8e449caa150cc4 +e889fa69255ceb5c723dbc82e1b540f4 +476e8b8b7d77a9c2c40fe601ea6b080f +d41d8cd98f00b204e9800998ecf8427e +19284825ed85f92e90407850fabe34ac +c15f2b924a498407a4c402c78c09c890 +86ab3fbaff80bfc68969401c8c390567 +84a658d3813aee7fa910ac9af539e041 +767c4703e78253157e0e479ebb424108 +4ab537ea120f531b5bcc7606a504e6a3 +869b2e2ff2a108bcd0079c10851dddee +ca3118603183d2c24b4981782d710d25 +7626834f36e25e71695d1d1f223ab532 +0794d28cb5ef942046013a6ca207840b +61380866738d2c67c3efd5d5268ec90a +e95f361ba68fbca06c51e06c18949df6 +e3817734c23da4e7056a6b82bc5691a5 +19e75579adf38b6d5958b8dbbbe7ce45 +8a1ee8d04032f78f390c1c4692c20de5 +bc09a2f12d47c91684a59cc96035e8cc +a3f1fe3c6b7d7e52f2bad0782e4bda6d +6519c37fd554e9e8ad02205784726f4e +5c5782eb3cf8d4fa57ca961e77f0481c +1687d7e39483d9b1e5410c13cbf68ad3 +3354f7050da5409c202ae9533a148921 +b28a73f020194a630a17caf31a19bd5e +82d64b3352e32e3fbc347f28a5e4b7ef +3fe14324442510aa58c73e7096771a98 +dc64b980cc0c3d3aaad2ab07d91ed1f7 +0afa8dcc227f5ed32fc928512e34dcb6 +f7fb6594c4b255f1e6b9e2fc71b31cbe +f0c60ba0a1fa5a0a6a36257545b85f17 +4a4ef6b4070f9c899b31a001bfe32a0b +b3250e2e75ae5d29ce66fc0c0ec6f6fa +1e3f7abcfb294650e2ffc63ce8870b79 +03fbe0e3473185fd2632cd42ca3ff636 +50655bef521e22835af34f1fb06b1390 +dccb6a898a49fd91b4e9b6f227cb1834 +7f04308510cbd32d07834677af7ed17e +25ea2db382b03216a293ec512d6e45fb +27ecd173a8d57124bb4c0e2b9d3847ee +d91fb37bba1aa1ce4c18884b0e01005a +ec8d3571eb9f4a74ae9e81d3827ab50e +73417850390f05c3ff886abaf405145d +e3e08137d355060a374004f512ce4180 +73e7a512592f1535addf9af65fd34ec0 +e980689450ede8f2d1218a730cb1ecd5 +642f369f7f55dfd38d33b3f61bbd9d21 +ea6efba8f88a29a81a8b52b7b18091a7 +b49821fd5879e082445162ad8ec4f6d6 +cf93fe580df516fbaa0ebf115bd7df23 +90ac0487d268a52b95ed1e629f5cbab5 +8f246926207507ae5afd44aedabcfa57 +a762ed5322d64866a896dec6160a768a +6f5a8ad64e9db1c62dfd349627028ca2 +b89186f62f7fc6d6958e20da0785d44b +872c76b95a80ebff03e68fdf075af38d +7453c78c2d290a71567d5f3f2259ca8a +81c6e29ec04cdd94145c5177125a26a9 +5238c613a901067c1e747e2296dd0fe8 +c41befc8de622b1d5bec875739216bc6 +6a8952e9ab762148ef6fcf1e2b6a07cc +96efc7a03c87624fa400a2d5f7d9af5d +2fb790059699a8255d15a09e687d9d3e +6b98f5b9125b41b6045edf3068667b9f +51b0a4714e70536276cda9d02bce4b1a +a6da6c895528f9923ba0bde1438526a1 +271f4ab4dc651996ad4da1cb1ddce8f9 +54be80e9ce779dc23540b725b4ff2dcf +462bc79b6584f19581f433042f761505 +ae5a3d39fb4aae4376af0eee146e1bba +d3094bed037ed34346528da0315d60c7 +0a30b84c8165eb7f173534b27592c153 +65681f866a47997ac0a5c0867f36e010 +5dbde3ccd0b34c20f460cdbe7c0a1515 +c45328779299bfe8ba0939b3219bfe43 +4477ebb38e4663d45d257b9121b344c6 +4dad8edd9115271f52411ef353cc937c +0652090a9fb3067ecff144f2e17ea26e +2f16d9179bd485da39a488e98ad62984 +e210eb2f0543c5a49fe6cfd6fc536c41 +f8b7f0d1521c3b3b047049725b371f0e +35e1a99fd8353dce1152d358787b1ab3 +bab7870b5dc731ab624847ceabad20a0 +7cb1bfe202dc5efff5fdaf4b4c96a24b +b6acfb6bf335a25082898900aa0425f2 +e8aadd1ebb4557cd59c06bf2dbb16007 +b370e946f7c4f03fd8d2d462da7ec7bc +8cbfaebfd05e5fc13edf4ba28c75b87c +971dada654f118021701c4a04cbb3a29 +7d681d8d027e95d328f851a4e493c830 +35506360400b4f21a2db1fac7e467ccc +b33243b6212317a2fad35e2a4fd5938b +58647860b27ff13d8d02cd5110bd6841 +51d4367783c20bc57ba01a090621da8c +2513e413a528c41d5cef96fe10257a36 +38d5bd432aa379728c94e18cb6e26883 +3f882f184408a428b3a69ac678d958f7 +52acc6fc1264bc9e8d129cb786df1160 +25c267a0b466556c2b35b3aecd5c6235 +795fcf771e02c98764c5d43524341796 +0a83e7f102b54a328aa241e12d925315 +756268e231946404abe982956235b0c7 +fe4f28703f73568456e1a1beb32a5e88 +c25ac11e39a207e1ddf9ff65f37ab426 +8a35b2ed4e56da50e938810422c7f560 +ef8c45517a739fa5a2bbcd4e69d957cd +c4f1ccbe2f86a0ab189f8242ee1bc511 +e46d367d815a108746db9a87fb98aeca +3cfeddd2b3e63b4b2d3ebf1e4fdcb212 +ffa4f40292498dd18efb5312dc81e5a8 +94e2bce96fe1e5b1f704ded0ef969e52 +81581066f0c7b7f59e381abec7b0710f +d6b92fefc325251878685a9a1301a5b4 +14a0d48fa771f3d682a4be43a131b976 +f5508adc859920a03b24655645e1a737 +a138ba6ad42f6b8273cf097eca56d6fc +20307a053780870198081db97997b3fa +6285d4abc7e854e475570efa0149b92d +5ec212656a99ea2b30106c713be528bf +540d180db2d41eb3120081c9662038e8 +8bed65992eac4ebeace7a0a00bc1f137 +6a60ad795eeccd5dd383ddd5d366b849 +eea312dc49acbd9fdcd70baa4603ef3b +ac40332e9b28156cb82a894f70f3b914 +75d4ba712a5dcc1d7123e38e8a7474c7 +74a4ab216048a6d64ceffe0f92f90287 +6b54dbf1ef40b53e836971a1900bea34 +0aef067780fe62c9115d9a395450b6aa +99d19f78ac3ad06469791c5e60be597e +93b3f6e78232791ab215c4d092d40db6 +67acb553241a9cea0cda68ab9bc8abf8 +2b9f2d6c552ef123c685c73bd61dcddc +534113ec3d88360918cb80e7781aaebd +4549c2dd51ffa5c86385e35a65d3889b +5684e227d88fb1e4209122e70755ba66 +a0be555408b5a03b857b86ab12d8f8cb +5d7df8424f0cd9227208f454ac38f04a +afd1e90053bed1e205d8caf55a114911 +a06c2f79b18577e369bf515ef840805d +5c85f81c36b99cb21c2ce6d52fae0954 +acef97a0ba258c04bdc78c7933f9d12b +55ff7d29e9cc2a9cae7e117445ec2af2 +ac040b253196f2f71fafa0e12c0dda94 +b702d4f72e34ae46049ca38fb35ae5e6 +6adacefdfabeea71f77a65da1c012870 +fe2e0daea77284283021fd379430ecc2 +c15649a50a9494cc0ad553e061d5b2fa +6de19f30ccf186d6cba8af2b47427c5d +8b77ce4b5642a295c861979695ef8ae6 +2cffe11861213c771a3c6495a1c6d8d4 +819aaca44ab95835659d576caa575b39 +e41e3d8f2685a64be4a5e649adf6b547 +8a87bfe5a44452b86589f8312d168a2b +7ec3cfc808ec31762686f4a855c20e70 +3ebb187c99ddca3ac21520f17e2c76de +bee6e08f02072b48c303b801350b3b3a +2ff0a5522798effca1f9c4044a6a6e3f +dc28e3ccd5338c7962f7466f634eb615 +232181ec82f80997e57526353cf258f0 +63c2e8793f6ab37201635e46ba951a11 +55026c183dca75c30f22973c171fa91e +169c11e7d35758125de32b2cbaca11d4 +4a02ab882a8fe258366eb47d869dd505 +d8f8439ea73f4a9b67108165af8cc6a2 +e60df20791eda7e41108208baf20fa2d +75c9738f0bfad7306dc46ede7ffbb3db +2347baa7494881de2265b48f4df043d4 +cd73cb4cd1d2ee9a3a3fecbecf6507c2 +eb60ca3dafb4061cfaf17b77822d644a +4076caa4ed51e45eef0a49f7ed0c7be7 +4f2b73bbdb6feba30c021fed162e3230 +612d24bf39b0d0b82e48371bfd0c7bc9 +b89078e40ca35f1a59022333360d5ab3 +b2b0c55681b840c1f6ad76a57cd2908b +9b7c65fed3dd5394ebb66bc3199cbd97 +a6fe526eddd29f025ae10095d31af6ed +7c4b1063d360c7bfdc5abb0ba803c924 +b86e810ccb3a07ee03941f15837bfb35 +09952e69da88b8bc814d6420095d49ea +9674ab992d6bf5e1950e56f0bb3e7795 +b2d0e03f153cf617447e1711191be346 +513d1d21e76c6f3d7a26d1937864ca09 +b7ae441389fa5bf1a749a444417bb89d +f80ba0c3bcd685ebea6f3c1776ff264b +36b47cce3927d0335267de9dd44af37a +570a4ee8758a25856d67cce0b8764a83 +2e0f6e47a235693317907abf21b61f25 +8adcf9d09871466ac68384b261908cde +95c61aab39dee746d5fa9d141dc533eb +bcfb8f69331dbb931dded5225827727e +16fec16abe4aa240ca63e812e012a5be +d140f4cabafcfa2e60f2c3cd47b947b1 +ff9a2e2fa9acaef7b1e82728401d0649 +cf1cf3375b1c6ac27d7cb424159fbb11 +d2a365c26504fce9e766b1b29fdfef65 +a1b3188553b656f24fcefc9d9f2e6541 +04aa7e80753cfb39eb8111e6e08e5675 +da02b12c62159b4cbea11a38cfcc5236 +7a3a6388503765d7b8e9cb1aa4be8eb0 +cb01efea76ca47791e0386c753249fc9 +bafd870edb3bb192c797d71a4ae5858b +8a89f7ffc91fabd9381630125b980cf5 +cba6a04deaf91360d4bd65b81e3e9f23 +4561b4348d3c3b0bd506d460b0da9f6e +d6f890f3d67d5ef924e893c7188618d4 +88abce1ddfa228379e23ed0e2479c679 +c10e344bda762585faaf5142e58de8aa +165cffcf5cb145ec2e46ceb950c27450 +024c334a457bce0366205658e90a75b9 +184b3c99f1ad68a2dc0a119133cdc4dc +46945aab6d2536490a77ba1cd0f645cd +33b8f33af56b1466969bc5789784a3a0 +23e84f72bc6062841c6c3b756e601a32 +ebad2fd5cc121d37f489c8f46555ff68 +4d72e592e01b1aa0616c4cda47076d78 +c7f7fa64bd28d2cf9b19900fc8a50076 +3ef5d9f053d91b6467a7bbac463fe14a +edd3d1dbb0f15780c8ad5056e523bda9 +363f90810453835f0f66a01a2eae9baf +9a609ed9858a3aa97632605ce86314f1 +aa43b3ba38888edcfabe6e76b3ee64d5 +7416e404161d5efae4aabe2250a856e8 +4547fa0c5f8e3ad986b5c7c1bf661976 +4d40e22ff16d0ff2be14b8ce5974afd4 +c785a9ddb0523622cb293f7281fb647c +6be8a533391fb5f33a32a1801b6f1b60 +e188d0f31dd187076d06dcd0c47e517f +ca1176d23e0a248384c71b7e51971db7 +3361617251b5346c57a53dd38dfc15ef +024e3441fa9a2838033c6ce703e38d91 +83279e3142acadec21b235d94a510e49 +7c28a998a25c5954c80f6b365d42529d +6f7378914b9150c08c50e6887f03f827 +d251e908b5680302bf2eadd34d2fabfd +40dcf1dd3f72a1f1574d0deba11a630e +9e7ca534d5b3c246548ed0c4c45ff35d +e09cc8a1ca1bab3709082eabc53818c5 +78a3412250128d889c7d5a5b3d700152 +506f3f610c246d9f9aae0d41ed326ce2 +7cbed69b3e41f80dad4836322d173690 +fd9a3adc60b669a42ecc2dbb52cc4461 +7fab4b1e803723ebbbf893fcacab7058 +9c58a6ba8d0aaebffc4ea9fb6b028eb2 +1d30a0da4a109ef22fc1e93f0ee4c420 +2daf73157524150f4e332577d9262882 +9745a3b08982e1d6913d6ebdf2ff4d69 +d4b7ce0fb96365f9bc16ef62b694d259 +b8660c67030ea79dd9b9b186d01116f9 +a875706c97466642c4350cb802e39026 +7af78c263d4e0d2c9d52d23d4c8fbd70 +42c6842acb75901171f1fe6c4ac0bd60 +7403c7ed9390f128605aac7541819157 +e30c3264011b4ca431653713bb6ccec4 +f78403119d29b807a8dab4ec8b949d48 +724ea9d5bbd3723b2a24925c5b2a84f8 +752ff23317ced2be296951b33de6058c +b430dccf182ff97fbb784f3a21b05a36 +e548610d655c60de0b210f51867f3344 +69dbcce1ff8043d87adfc224c079cd7f +04f5223155b1499b21ba535f133b6dc5 +1f3024d03f530dc8e926e3fcfb6c78c4 +5b8280768d04f7da5430a2a46ab771b3 +50ccd2eb5215ed0aab7abe56afb9e3cc +475b9c4f2653c6903f3bddbd203084e0 +f0603a57251a01f3a875fae78a7f63f5 +a8a4d9710bcc53b5eb0f38d0152abb81 +559c455609b8e56df1b7d48f69232b8a +c7883e04223dda808756d2ddc533007b +421e6dfc0696764bb9ec9567790303d2 +2d83be94d9aaeac57bf91b06a1efbc68 +e1a903ac72d7764fe4eb5c6afe21256d +ec2ca25d57fa63b071990939cbf2ec56 +d41d8cd98f00b204e9800998ecf8427e +a574d23d3596e1b4ab1d282975aad638 +9719e67bc20e812f051aecb58afd46ae +589a8e554b66cdc0ea8e86c7e32c77cc +7428cb8bb43b574626dc9b0d3b79823b +720735a866d9ae080b5c01c993a6dc30 +541ff42e454d6b53a2d15889b303ff38 +7172ccf127e51039245bfe7c92276bef +76ce2c185071d05918b8c87bcedde183 +4f0ff8078089d328bdd9019976e34291 +6fb6fc10998fa81baee1088156ad9e9e +c016a915a9587e21242528edfd78ef44 +a1ae3c5f480435b98938f257c0874f3a +343ff050d2d172596d377126b46fcff0 +5687149f81695ef2c6ee6f625b9eb1cf +64db42623fffd5d948d26d2f44eb92dc +0ba76415e1b9dd96dc84a553d1314efc +967c8c410e19c0afd862cefd55311662 +e17cfcc4d85470d9042c23ba99df86dc +eed8b673f96f13b31b7e2e421657c040 +2c4f3aafb774b823cfe51ce6c268a0fd +cf3f96943dbc0ffa9d6b13ef82ff270b +f72742df058f1d1d3c2a517763f2e775 +af8f23c15c0cfacead6bab0a396ffe5f +6c4175f49238593308d2341669fcd69e +24150ddde13e957562d975759e3b68e0 +efc491af74f7158739cd4f5e638c42dc +b7051038eccc2802f0b1d465bd7224ba +03366bb827ff250571d94a3a7812051f +1fb2ce8337bdd76f2aa9dcf49358ca70 +3747897faae3b8a170783f510a8a4b17 +04d47a20af1e41ed9d52bdb1a09e7ae7 +3c2aafa7f28bbc837d886c5b5baeb2bd +9b9824cf0d4f19db39b620409855f2a1 +13cf34337971081ad3eef9efc2510ccc +bc1917a7f57908c7ce93578e44070cd1 +a8f3ec3cf2b4ef34e8be6ed8c373a227 +318be22b280c5335b0e59e2389e3f591 +ec3c066867fb324f4dc1774a824d297a +55415fd578f99e0fd4fa997e57364b7d +f751ecd31998f5caaf6eec0f1c7b20a0 +e92b5b59ee64b86817021e66d73b8719 +d06fff632ef7c699c43535d5619745f8 +04f13dc3184cca1e9de4b335c0a8d03e +7c12ce0c29850af19777354f057ecdff +8393933f8c748dd3b03ec76fa12284a7 +fdc798b00fcada2d7d33b1c466b5d462 +5238a9ba8c549eb107a2b994ac928f73 +34f8363a64cc4c8614539676402403bd +2fd7853500be96714d93fce194e794b7 +7a305f7f0cf03309a1040344df348619 +b2e8aa817970ad36b66474a1498a77d1 +ce8038cc94ffbd0ef559183708abc73d +0e9df29e8091a01b8f5bd8196d1e38ba +25fd4b906cf1461730f08031c2b9361d +928303fdc1427fdb3649a6acb4a0ec77 +8c254e59d91daf0d7edb1fcf941e7678 +84d090500c38a4304617315ffb2956f5 +f32ff7d407a2265bfe7a6f164d555e06 +5f39b662466be3ab17fc4d5bc7cf1217 +a6e849b44a7cb0aacb4ee5562911d2c0 +be59d844147d68dd863903647ec23cec +29dc50707af80637bd8621a92c4f1903 +3ec1536df49c6ff41807d2da5be3033b +9f8f15e6645b95c5ef2c8a710bd1b403 +661b62d0975731f3b3eefaff3142a32f +412635f60376ecd534dc022b0692dfed +49b2cca2605cd72705d04fbac09fac28 +ad980fae2c268f4424041953678b88bf +b2313cfbf70f3b9e5430e2daeb4ee828 +99c16d82c4ed81a74bd111ef19620159 +b2fb903d02ba9623c8db14b5b51b4a15 +454d1298fe8762765ad251a10e35c4b0 +5bb6a6bc3e2606323c36060eb954606e +35e93d8ac68cbea85282c9546c80d13f +2a5c9ee0b0587fa54253b96b464a2f07 +e61eb3042caef7a7519b7b60790ca6b8 +d14e5c22c0bca438139a86df71487557 +8bb1f73b81c52ed872a04f20da256164 +3e0b6d311054f6e68f582dcf00b5ca67 +ffc386589a93b394e2b5be9800d24042 +b0facec33d1f61fba7638523b9bed212 +4c4f18783a575b92fb0943030d90d2d4 +9856beb4c721afb7e00e67affc10711d +18b3da1e4d9157ab6e9706a9d88d2d9f +b96ddde13bfeb6822e3512fb335748d4 +191c3e02edb1d753f490a411b251ce3a +3eb1c8cbb4c3ccad43fa5de198490dc4 +b325176ff0bb67ac5ef6e9862fea28ba +de3d0d218abb4103bc2d11057cb94803 +c7e502772e79803383b2d5d706523f32 +ba180213e38aa932320416cdba7a2921 +b2d622ea6af1636f897f6f919099fd88 +d73726cd47a86af5cf0ac2ae08aa29ab +cbba3629ff6f8818eab47391855fabde +05b909b9c733cf01784e32bcdbf420fe +86ac9e85a1be146a17df81028f348de5 +7797d0eacc831e34192c37963b797383 +0f15420c0792ed6c04461fafef1920e5 +4190aad8f52106154452d4987a2471da +729ecc9e7929500d34a33111e50997fc +e28035446408171100922a39ecffdfed +bd55bdf914330376f692c97fb35e86cb +80d09e1a4cf708cedd4b7b3bf7ae56b9 +4791d44a5913907384e25a3b667c67fb +7d65b2cf38a9671fc87aaa3e583388bc +edb3cd668b9456b1f96f3f15818105fa +8e729f986138b1376c83c27611f8d5ad +9cf4c53b7ca1efd444a05368182693ed +a88556f2f78fca0d7d9f4daba8eeb72d +bb89a5a13aab5223e9cf9a3c7c937e6c +6f85277a8615ade68bfedf2cefafd483 +c7b51e4860d2cda25d24e26c5baacdb3 +6e0793b24bf5ee23a2f02c4af6707888 +820bd6760b8553d84ed541cef13aa492 +17420c643906bc71f48a1f9e4129f54c +248a1b5bf9b846695f67facf31c969a8 +4406f8e13838a88d9137aaf90d4784c4 +94963c247437a2a0321b9f3122b89ee7 +e6998474cb54233b455cd43b4d0d6b1e +eb0cd26a84332fd8aad2c97dab602519 +2e8e6df51c063e513a8610d997c7ae75 +67fbbe89ffb34c680ba3e290ffbb4662 +6e7e6dd83e1934f492b48e809796292c +0ea751fb39bfc32fbb6a80e8a5cdce40 +eebf2816b3173db9f71db4c57aec4f5d +0a7ece842a143a88803f02d33416585d +fe00f24c9cddc3e591ab0dfad940fa42 +ad93377138d952ef9447917416ad7f80 +6588a485f91255dbe04127ea3eeabdea +cfa45db1b095d64786f413aab5db97b3 +24bdfa902ffd0f10d8ec1fcb76ece6da +fda45cc5b7c2345d9db1cdfa34bf77e8 +d0dfd207b651e4b89085cab9fc76a13d +4cb354f2ddb5e52849130a8d08fb1b3e +fbb6013ed3bd4cede1f5c13c5d74fbab +5c4269d258b4ef70265319a91aa49e3d +0ffd7a752460957cc998c4932e51f31f +b45bfec7cf02e5d7c26cd4f20ed6c02f +3655265d5d7cca8c4740c39ff6a9de7a +16823ee3c22cbe522fd60286e8eacef0 +5d8e0926ce0ff9c7dbf2e8d100a141be +3e6ca19ee3d3d1f538dc09c7edf2ec32 +4b9bd5af359247fb5c73c5f1c25f672a +e023eeaec2e6ad3c869a2bd2823dfd5d +f40a4adcb4be700d51cb1955c19b566b +dcf90197cbbdaa3279b1aace86491e04 +f6905c9ed93b57393947452558e10031 +111c39a99781c5b61ad4a754ac72715c +63493d9fd03a39e282986ca636201086 +ac224b50b3915b3ae8dd696efec6f41c +874c7ca93c05288aa9eeb97ddf65582c +8ed6dd7337a5d652eff78b22f6473410 +eb258e4afb915492adc7d69e17d3f088 +1f428e265d734a7ea228dbeb5dcb98cb +39eb2c02f77ab8455007ebeb055034d3 +bf70179ec85506e6295a411760a08d66 +2fff297f8f68665b78e7356916866889 +81ef76f2946e59e4fb4622a813f8fcce +bb2bb6ad6e689c378d00d93e828bc780 +4f99005dc924bb17e338f7e7a9fb562f +c5a729e225658d9effb9a99c52f33006 +8483349fb5ff09c5978703adba3727a3 +50191f7e96139ed9dd2a4ade41d423c9 +fcad5ef757a5be8f05e15f5a787907c4 +095b64f78ce8bf5956f84aaf191595f5 +f9a1cc194e80cb6715ee61164d3dfa4f +5382358f3941e801f2e266ebb224e3c2 +c2558eafb98c5ec5d3cefeab3cff325e +92349774488d804e96fa8fa2eed325a3 +2afc0190b90cea14c5a716d571a0a139 +786349ae1cb20f6e05f2bd8f1d5c6dab +837700ff5d6207ef227d649d945997ff +e52e567179ff7c6c8d1090960d69450f +afad950cc5b4513d5b90b03eddc30339 +07a7a1c94e40ddc1ead7813d0afd54a0 +23dfebf8fec0b9eef2811a3df1fd6e0c +ce1bab72b4b737c60fd09168840569e4 +dbd54915af3d9e9b22453b1d4a7a1c10 +3c26926b84765da1e234116b65d64512 +4ee63cc922a581adc17241569e90dc9a +68891912130a823f0c87fdb3354c1dc6 +cc1ff896a960097a2d900f522166ae9f +84f6bb11a88e6e5cec616e570218f935 +85cb14681e62ec5505c774734efd5dc8 +c76e6e63795d2e0863e5ff4f6c047b54 +0c965abc895cb60422d53e6c062ddd88 +e5b8a8de65abadfe9cf578ad77a95a2e +60a7a9e345b9c2365160a377f48f1c35 +b01b05d61fabe4f6fdf3eeb5b8b180b9 +e08732ac999c9d62e77ab31bf359e400 +46f0d604d8dc8c867a62216d20a89ef9 +5626278ba823c38157c4a7c416b65478 +91b96623501cb7599e528915d1321eab +cdcbfb987955ad93711f22f77972af9c +ab81e7b51e1c2b0d579edb2098432fdf +b391dca4260cc0090e9d4afa1daa4de6 +fe0acacbaf1a778e40b30e06ea0b013d +5e1ce04ee017cd441e7fb7b173d1e70c +7163c7c186a38e5e1f08d53dfad84667 +7faa2d63d58d20dd39de141f78f1c63d +74bf8cad064462e43aaf9c6803708786 +a8ca117e116e50892eca2b2c0da31fff +62a86d7ead8e934e64233f96b1219dc4 +ba6be1e5a91298cfe7bf75e5f88a6084 +eceeecbfd9a9c128539503305c110908 +ac68d49138187eb2915d291723273529 +c6ba3ef85ea436bd45a6cefd46608662 +99438c6e987b9205948cd9b882a0b08a +2515fe0a59c0feb43363751929f1d827 +a14a75cadd2eeed824fdde5a97e2442d +c1b62df20bea02a5e8504d251589696b +4270095fbc8779e8bca38436bb630df9 +3efc5ae6d8cf98ac3b319265f40908a5 +e902d74a3a7aa5ce6b8bc1bfd6a8d5cc +fc467d126c2cb5239933b3a74dd956fc +df8f375c7be629ff2186f2f58edc1dcb +1c0d245d5b8b35279632ff40f33c332c +1673de403d79010676b5d92c75fd204e +8f3f797a56d38f671fc8d1a5a7217959 +b76258cf8faaf7b35486df771b70e72c +9e29ca0e6e7208674b5f235b17b46d39 +9f0322af541df599c111210d3286be38 +560091095c67d59859a8f2b575b6c2df +badc9f1f747b69f4f7a8eed97ebf2df8 +d35f21290c9fa01686d05525fbc0a18a +cd7d4912aa5fba4ce5390eac051f8b75 +f52d029ba4ebe2aa3a5f5eeeda0d455b +382e88010e26d8fb5a260061a66d86b9 +28ef7bfdbf48b818d9dd837e2606fee3 +cbde3f1d4520675b2506422114ef341a +64a8d8faadefdc6e350e8ecbbc722131 +e2eaa5d498d40d77c35a09ecd09dfdac +b85db1f1878a4940bdfac1b34c6d4e7f +c0fd40cb560e2b0499a2d167d8a755fa +fe34b6d6e1f948aff884f1f7006f10b6 +d092976882a4c6cc30eddfe1e41422c7 +4a6abeb51c4bd5a119b7fb62c5394c53 +011fb657a74c41a86b47f83ec1947b86 +71c8d0adf6fc92d638592627c6e44841 +6e1f4db622d185d1c727afcfac36e7b8 +457924a1442ffa5cfb91a38dcbead9d8 +29f692f0284a61c171df55d9f111fbbc +5365850c218bf59d9bd746e7b7421d29 +4cc462aad936034753fa7a377ccf8512 +b870d627975c2b958bcec5a6e911d86c +54846205cd855dade0e1e0a44d6ce3a5 +5961e3108669be833aaacd95f339ed78 +3848909e4a48662eece0d95e0f141510 +8dc98bbe7c5294ea918b442f1943f1e6 +86341d6698b7749cf9ce8f2fe87da6f3 +03ac44a68d8db04fdd807a80f6a59446 +d4e7b17fb6484df71d96863c48ad4a75 +fc83adbd8cb9872c22e8fa36156dd45f +61408e3455428ded0778dba8a0e3887d +be7073e377eae061f79b805f51bc467b +e20c856e0c403f4bf94da58c3f019a68 +75ed291b640df9fcfbdd3bbbd9ee82f6 +2c15039bc9f13c1828905e63a2d45566 +dbbd55337db640c0369d2fb21c151677 +00897c5f59507348e44037e3721e50f3 +1b80acbd68a10e697c86dfa67dd05f1e +bd5ccdada6054f21332b566cce86045a +3d8ca37b350320d5ef2c34e89c3b7cc2 +8b13ec7071557461c93bbd9327d424ef +8963ce38b7e11b2a543d9176eb169ca8 +220e601168d5b2339d1cc6dcf3589a7b +56a1e906a6494cb17302bb2ed0615894 +ba05fe64fd2fbc5741f5b674c0273cfb +fada6568fce466ed5b1930c51c9327e9 +75963a4f096608b8ac5d79b4d2dc2503 +e76a00a72537065b97a82abf2261a0dc +3006f508b865f2af8e48c19e87aa551f +a45a3bf3a634b3c5df356324c599f5f6 +8ccd66639ed4dcc90bbc177e2229149f +ba296ebdbb9da36882246d97a3c9ece4 +c66595aecf06f46de17d2467a1ef0869 +f8bce5d21d4cd535e60369969180813a +2f60a287c964303eac8d30ee42776441 +11ea0146c55d8ddcf5c9e563af56ed78 +2a0e3aed6faca24e9be06cf585ed1ba0 +b536c082a8b74ee1f84ca947e2e88954 +d19f5d6e9ed1ca7d1bbccfe02b9052af +8377cef920bf50987269703dae23739d +a83aa7ff0e0873dd00439da960301280 +880280f09094f4229dbc929f967920f2 +3b9d76129e9328515ce39e728a8b180f +8127ec2ed7d6bd1a79c55b45e9b3e604 +231c81d3bb7e82c7efb2afd0be2e5162 +268106c36b0a068ab748535af7fcd84a +913f2a5a2a286afb3aa81b408fd5d0b5 +5c56d7e40a06f0e4fc79b7e9e3b714d4 +e827c4660b42c4cf83c0118e714051c8 +bf28a2b5c0fb2aeb1f1f1ef2975004bd +8a6a7fded4e9ce0a32df35d10c0ac3c5 +00055af6fb33463c16aa806a680f195a +a5452ba985b7999ef13a58c959e729a5 +9e34828fd2cbf34aa87375d63c4cecd5 +fae927d76d9f998d9988ae2ce71802c2 +6beecb4c3839bb79969ee0ea4ddaa17d +de344436819af8221d408bdce462a013 +d560afa6987114b88234c799f4fecae8 +1ddebbb9a35c9005eab88991ba9e9d25 +9315bcf7a50db259105845f76e5781bb +9a8fc373cd8be213e6d027af216f4a54 +37ea07f0994970e6919676b583e5b1cc +3ac4d44747988a472ba464ff57573280 +b08f7854091abf8e24b9203b86502633 +7d1f5e9bf42bbd250ccf427d7893216a +60bfcaa51c2d4831a7fbec9f8724e8ea +5ddbb2aa02607d4484a0343a4f5e9a1d +445c9a3e98b897caf0fdc994c6cd3fab +7956dbd5fcd1a4f7a11d0add5b2e118b +137d8ca6f772fd749c41e112e3952ab6 +194cc7929a1a37a352a95c70c4d5e7e9 +4d48b74523c1e4913b10b9106dbf8066 +e301b4412fe210b0f9d6b960455cf7ea +875da1d7bd6669699ffd862526a92bfe +4c4ed36652ae12eef92f5daa24b6ed94 +597e77bd280a17cb63eafde38592412d +c441bc2917969077b021ea76faa6ba16 +993eac3100cadef1e433c5b269a65359 +078ed98216f7dac5a0ff9bd7983c974c +4e7207afdfabd4a094b6a82ce552d517 +6778185aa0c457bf10488e8c76f6f0c1 +e4c1a1f6f34a04bc4d10e2aaa83217ec +2bd77f818cbda0ec00780062fb743c0a +e4ee0de04992967dda0225c09739f9c5 +8d47b9985e8b07c3a2823201d54350b7 +bff1f0737cfea692a11aeff2c8c0ab24 +fd9848326b9c42fe61105dc48b2251ac +c26d049c180a518cdcdb05060da85924 +a059f92e0cda1baa637acfc4f679711c +6c9fe76be62d7935c9bdc16d45211c81 +aa714bf3f10ceea5ee383f521c47bba3 +37dd9c7d0194bb159b47fb042ca90298 +a17c2b313b2217bcfa3b7a7c142c73d4 +9dd0d03934e71ee13c298f01fa8ff2ac +d3dd50ab4aac31b47e769b81d4b6a84c +a48d82d71c9cdf0be545a0535ef1c937 +e3eb232a8073b1e0e24747612878f997 +966658b90def87ac168dd4d6431b99f5 +9d43654bb270915eb0115affbab7873c +859977f8563c88a86f4a36ff80363821 +af203a0ed56b9c839ff58b84b7be4b1a +e9d62965308ee4809b45433e659e9341 +b94bb919b381d3e18211007ae8227ada +47568819223e3e3967cd5a4cb5975eb9 +be609812bfbced98a641f2b00ff3fd35 +5869d7394c8790877cffa9b10337cbd1 +fbb0ecc801779ab7c77259cc9df10a03 +bd7354ed9945fc05bc6e388c3dd7c9d9 +a2a54de5c854dcb56953f5407f8c5b15 +8ddac008bb9939c619c08506de70558c +56237e9aa797d86565a28b5f636335e8 +f188ab4accfe7741a258f21b3d313a49 +bfccad8e927547fe9adfa1e016170529 +0ad2f73b4e16a30bb31048509ca1fd3c +e9c90aad416c38ed235b20a1df2093b3 +7eaa7b54f7e273849b001966498d6252 +44109828700f9270185d230f71c1019a +8d4f12fe53cc443f6cd560b6be47ceed +f8df3781216438cf71c27c16d0f948b4 +458978ca7d3f8fc43d2108854c45d453 +2dcb2501fe2991556db609138216423f +ef623e7e2c61542ce696c90b4aafdc85 +eb8069e3723e125fb192e1020534e966 +efe60a5807e804a22be0d2dc91ac1956 +4ef75453056622c9e20f3e4aad1d9a0f +90b45d2b7bea8a08ae97b6cbfc650cfd +65b4464a7ca0e9e99e4ff10cc2a60d5d +3b713ec5e965e5f9b4e4a6059022e813 +ccdcf000cf2a98e370524fc1d54a03f9 +e6a2eaadcf193405a752eade08e0cdff +dd0b34b50c69af5dc231b8e37f74f536 +6651a945523b0b627ff81fee4fcac2ba +a26025ef000af35da46c46167f283169 +ef3734521daf5b747cd2c31e36022058 +7b7e0494d8081ffc20ac0202e6de591c +898c327751710d708480fa4ed43c42d7 +0648aa52c9d4a2fa78f7d4ad20df1fac +a30eb55a4a0f39359b858a7f879182a6 +7de9f67250cb29f95b060ff85625adc1 +519821e89034b51a15b4762e3bf0bd38 +405f4f0109c33cf53cf3c9b2eb3f2a29 +0281f84f34ba13139942d45aeb21914f +68174b435ffdf244773108d61dc53ca7 +cf227bf9a4c97de472950d5e36f5fb63 +ef4850c589524d350f91984f12a69e72 +d42942e61c952519e4681253b63b86a9 +7204e2eb9113aeb067a6ae30ae9ba2c7 +3cbc34dbdc6fb1ce92bca0db522ebf86 +b6e7422df11f7a5469841361f2029d88 +e0f73869de30ca974b0f84f9f5b3393f +d41d8cd98f00b204e9800998ecf8427e +6d1d8d22e4afc378b3243a57a80cdc45 +2cfd39028d07604aa5c5727a8e23f1da +e7d0243fe48cdcce24a468bc6725b5c2 +763ab0abad61e193a991640ad05deb02 +211683ccd1b7f77f978b9402d5aad918 +f763fd34908f08308daddf2b2c3e0b76 +72bc48d45233b1029d5581415f00882f +36a59054ce14c4978965d976efae46af +5172c8170c68d3f78dfc0dcc328cb607 +e7b07ebe5f21f00e38750ba1ce7804d2 +68c328eb7ee1e8e51c7e71e9d7ca6887 +12f787423dedbf3b8bbb86aa379ce2d2 +deeb6f1c3219b9fd8c2d8a4710abd928 +f9164f5f3671c6df4a876f79a469cc2a +a50ec6fe59961ea2589dcd1966984c4e +14ad69b85c5680e8f695c9958c145725 +d8630eb68be27a2e4de913bbc916364a +0c66ef0bd73956d6dafecb303410cb9a +057dfb5d356adac34b441ceafa4eec91 +d63001d25353eee66e6b82e720021c86 +895ac04bd7bfb9bd2292a1211df48921 +c8b7ea505761350437318cc5acf337c9 +05316c27946e60a03f255ee6f1f932a9 +58b8b5baa12c22eac5e3a6ab3a69ed61 +1d3edb9f9609230a783b7bcbc0878d22 +6f27ea38161fe8e610ac35767a980cb6 +90e5f5e04cd1b9c199a781f46c38decb +00c39eb2bfa287353804bb8b23f0efe5 +1e5b2638b26b004190f342777d204333 +62480226e50c3503a1594d89c9f8420b +1a7ba1eabb067b9df7e8782ea9113e29 +f5620b3eb548e2db1ce1eff8dbb08ee2 +1e1bf32069c6441bbc3b4c78a5995292 +ebbc06ce4f9a4c7b5947c53b2c224a34 +6fedfbeed6cffbfeaac88e9228ddb799 +e6e10c0f8cbd4e6ead25728d70dc25c2 +f80fd653f63ec2fa196906c62f2fbdec +daa667bbb50ea1f245e5f5667c402cf7 +d18034fbcf6058fef7bdcb6917fbf7e3 +c0d7d694e3b393df9c07e3664bbb32f9 +a8ab222471af79501fb5e9031f55822a +1f2a37dc55dace5729272d0691143166 +e5439cbf939c3b3e09fedabc77f82154 +886413caccbb3f4ee41e6e9a6e4659e1 +7c52e65a1f7f1cfb646734532b433795 +eaa5129d137fe384d33ac12153aa593b +fb761f3c689baeb1d7b9dcf6ca357bda +83a75a5d1d1efe9ff23b23de0c340fd9 +483afca5ca1dbf532e550a3f168dbe85 +346fb22fb2f786025daf614c72dc1f48 +aaa067b99a416a93e95fb77a99abf36e +5a50beeb7298a019fa9573d94a825598 +26023ffa3427bd139ceae7b785d55339 +ae06e1156a0eb72b38f43d6b0cecd024 +581ccb0b750ccaebb3196dd14657ad55 +b2db8e20637b650f36c609b2efdfea46 +e5d7b1f31806801bbb776b6e248479c4 +6407959c2931164639b07814a30aaa33 +5c06350d91eaee806fd62578da0eb4f3 +74376f0717a5e81c496745c5481b9534 +f0f63ec82bff702cc4858fad78568b56 +46b49e7f8001293ea59e5c991fac85d1 +71bd5ea87c3523a431451bef1949063b +941db9ac142ee2312a3aa758a0c07049 +06767a2b5f92d47d601c6235333b399e +59dfed6060b228b82f84f83de81b416c +e2a4bfd0188fde2a84d2fd0605f13b5d +805c55f9cf33dfe4bc3a0cc66ddf523a +a28d1bfa46ec80669c3052f42b7eaba6 +9ab741a0de0bc28f4eda9509e8ba8d93 +e727b363cf09e1f1a37f2d3e7884dfe6 +07b4b2ccfd7e50b957748d273201b8c1 +21f075c82f973a2ed32d30715d655f4a +ee4a45b4037071fb15d7e52ba57a6f2d +47f5df89b56d4077bafa74fdec2c867c +b7af0897fa651c3e9395d5632684f565 +746391b4e5c8c9a69bcd09ab9eaf8702 +0cb31388d764ee4cf133fda06002d424 +61ff1eada56193cde330ed55bac92eb9 +9a9a0e431b6c38333bd14ba8508c7ec3 +48b6f84e5603f2ad2329540ed8a0c3ba +8a55c02de2484c0f8b4261701a44a96b +58b26b5667ca527a309d72a3a4070cf2 +b0df46ee625446d4a7790ecef072c642 +2351edcc86c9dfff39f49190a5532d1e +796d97ae041eaed884a5001a2f8ff73d +d45c58d83ed00f315eb6f3e18b5dba5c +771f44740f3d1ce2d8956426c2ad2473 +77a6d4337a1f6d3c2d05719274f7c3a9 +21f2959b6dd317fe5bff5ff82b774f1b +3ccc32bcb693415490ff385f0858efc2 +e2f590909a4d8843d9025712316a1dcf +1f6067da105455f4fff52333793624cd +a806d50a24ae23affe950a78c7078365 +252940c576a3fd0d6833fb633e010fed +4473e7b1020ef3d9b95d3c1a8a61d478 +7c846016e5cebff488cc68d5fe7fff84 +604dcaddba4bce8870187d1c774199df +dad430e08e42d8e6ee1d132aab1a6db3 +f6a72777ed2a3e513bbd6e31acee4c5c +3330d1cdbaa1a926163704dc88a79c5f +8e52011180f2231dc2a7c9c0b1f8a1d6 +2eb278f0d2616858c07a5c1ce0455500 +16207f3bc7831a1650faf423a3edc170 +820162001e1798c608aab874f4b672e2 +5192a1e62cde44a16966de8c04abb76d +36f7422b95a65d5697506b9115f6172a +42f4f568b4b729bc2b9f991b60136bf4 +53e77894554ad2cda27b3e0f3c74588f +eb9eac3752782ce606cbcaf5e6f6d486 +00c7cfb5a1f9b8831fd1dfd59c8c048c +c4ef874c1ab352dcc18f09ecc3b9ef57 +33d1f78069857482b58da5f2d93c47af +0f3a96e6df02258fcad5eae688ffa0ab +60543612ce2298ab1618cc89d641ccb9 +e46796d3de230688bbd5fd6f1d73b269 +b36bba112f683be744963256f777dc07 +8b3c66d58caf2bbaed1df27d20848043 +7154fda4b68bca83f6ffc205c13f3eb0 +eafd71951ef720eaf6454f9967f08f80 +ea0bb03f734b7fc4d46f495860530e44 +e18208369d00086ace33b638b814a81b +9ee84ba8e65c51d9743235d23452c079 +ef5232de491745f69a0f5d503759497b +c8f80fb5574d8924339f09011a98f0a7 +ac850473a1a8db004413fc750b44cbb6 +3fd4c40549a254a23bc5a8fde4bdf186 +dd41122b979837de77eae7048ac67492 +34b88fb6be0d46f5442da41eaa19bcc8 +dd9a10d5417b6a091f79d0891821bcf5 +11e6c5cf70bf5f694ef08251931c2c82 +6c562bed6d47aa6d07f9dd853ff8a52a +b942ffb22adc4e4de74ae0dce0c087e6 +81e4e51516c0226ddfd8ec00d3a4ee2b +0f32d1b5ba58a6b85c0679d2955183a5 +b2ad17a9649e0c389afec45fc864567c +60c6bfef7a0a50806d5b0ccb990d1c7d +f7d9de7051114a82146aba43b412dcec +c67c22c17d9505284a783e16d4a868da +169a5970e430b55887de994c1fa075b0 +ffc11d2cd12696fa8adc9da0bb9af252 +002476bc3d34971bb8f780ba4401c1ce +12a0e88c696e0b1f7c3aba18a99f65e6 +05a2a645da2303309d85b173505647f6 +2ebb437707bba3ac8d08cddfdc9be8d6 +6928625f6ff50c045b98ffa7b89af73f +83bc7c1699f97444c31c0ccb725f9a9d +b604e85d510b5408828479c3ac25df41 +b0a0d71150764c1f17b554d293a6df08 +28274530a94a945ced36e5e98e224ba7 +6769e0b88320af7f4331c6a4c68401e1 +9240836f66f1a1d9da3919973bb242b6 +ef35856628695e4564400d4f582f5ace +dea4f3ed6ba37e05d8787a900ac66917 +9574bd70b98fd773aa002fc3855fd3a8 +931eeef1aaadaf1acd9c5a63f3e1c603 +b9e487441b516a0e7115455763290c10 +5679926a0b5e5824484c75eecd590324 +6bb90787c697d2a64f931f27cdb8e5a1 +1486b7d6856a7a20c35047936aa2396c +e28c46efa26219ef558c97f20ed17dc7 +5d229ce1bfdbb3e3d0d31c969cc9ab1b +df30d0091d652dccb434e21f69a98771 +715f17e451bfde4fe7dc4e4d704a8ca8 +d6f5d8411a6ec0afa73ef5579e567a57 +5d968eb396a7c38f2bc43867194fe913 +35e092a07af227191a67ec28c50a5410 +30d15008a352063d18d5c2ffece00500 +561c9fbc02e8b58de2be4b53b71cc0c8 +24f7afb38fc24c07fa92ef3d3a69bb46 +685d218c976fc88bc6a7b55130dfdf0f +48630d0c39310d6823bff7f486198067 +271be356679455218722174cfba26a2d +4c4386cb7f1ad25baefb31306b748cee +2f4e846f27f7f8bac6d3076b2e8bab93 +86231d00efde51eeae6fad0448a8687f +07585ca66580559401b20669b2cb1144 +2589e835d39bfafd27cc9ff7a1f1a126 +c407e43fc747fefcf39f086ddf46f75c +6c756343132fb6126e211eb82e4bc0e2 +26272b6393aea4547dce563f13cbed0c +7159fc40c3442e4abdd0f0d06f9fc20f +57fd7a9b56d69dbdeaeeecf7613ed02c +39f1ed93b0dd88e5eb9c3860c17bf3bb +70bd3a44c951b04767e312c6dafaa3b0 +3170467b59b5e5d7d5af325d8ff8f5f5 +43fc6099791dff2003876cd6a3814a97 +f2e872710beeed6a12a97e134fb5fd59 +0db81a486b00a9349ab4bcf37c9b464a +7f9e241d0378ab27aa6b86c68f3e07ca +ae62a1122b8a6c65caf9233ea2dc7dfc +96831ea0c3bb3ee0133c2075358f7bad +3e2c87d612aece0643031c18a543066a +53ce7c35009c5f69fad5ec6f4f0d6ff8 +311973ef2e072243cb72f2d890bf2db5 +28100de2174b14846fc79cce4daaed21 +0dbf099421307dec0ee22640645e29ca +ad576d0a4e5f2e6ab4afce61854b2928 +95421a796eb3d32a2a9ae3c96644e4fb +5f470e8c7c1101f9d510d628bf611613 +9349cda214c6ab540248a95a8fabba5e +3f50c79db08c438e463cf75f76df407a +b149c33ae151d63e9c182659dbfb40ba +ae523b2549723c5687a5adcf48f5addb +cdb442f9bc43207e1b1cb3c80378b938 +8a076393404edc96b5883799c6cba741 +190a3523a47fdfdf94fd331e507959fa +06b68f1813aa162e02a0c6594106832c +e1a07069c78e495e88865e7784301cea +1de827f2984789483738cf5e9aee1837 +27d03123bc9b9ced76f961f381b3d9b6 +58cd601c11e2a7896c5252b9db0d345c +b5d4d9b37f104bb476df95cd0d86f504 +363497ab67a09a0baedfc7506fcdbbe0 +b26995c8adb6bc911e9a0aba7a656da0 +7f46e41c7d1909cfa61fec8dc08a3a98 +b51986ef1667aab62ffe7670bd9fe502 +f437f61809b5c80f494647cec6ea4551 +fde1f359d31324070922e88bba3dc5c0 +41a9ba31540db0e4e4b0678f8ba80153 +053c6b0cc26295fc9b74b9bb9cf919a6 +d3fc70254ce4ecfe69c2dd2239316027 +420a9792b5e7aabab5730880db65ba19 +e38c842bd54f66e6abdf3eb73c1d4185 +47adb7d199e1932263fa99900a68b2ed +86975bfa628b8313c927e06062199acb +b4f2d43c1c3ebf0090d094b9b3ce8645 +66e273f30cb115642c6b619e47b3f6f8 +1a0bcdc66673eb3e32c643a95ec3d52d +d24bc400bd9233ca85693a84a00f6ce1 +a104779fcfb6aeb4304568e232a11ac8 +f20f9b6caa1ddc7288e84a801a636e92 +9ba1c49b36efa7778c6cd6d06f63ad34 +449bac648103339d16de48915f8ef82e +e19a0ad0a55a21fed22ff5a0d8f8b534 +cee75956090f16319c74d92f9ec6f086 +e71bcba2ca56af52c43a365ae0d3f5f3 +93e7b5e7667284694c6f1caa687499e3 +00232de0b7ba28269ef091acc1e3aa2c +7f8982add39cd4a6fe0dcdc1e5fb6551 +d711377b466abdd32aecf1c1d4706986 +50dafb59bcf1dfce3f4620018e6196e0 +95b55fef58ed5c495405682d2310b4f9 +835ee5f732fdcec57ca3f132a430a3f8 +6ebea99af5fa69eab407e629823a7405 +a1fe23130317f5e54e1f5dd40f58ef3a +0ef71f7ffd9849b846c64f758b695bde +640aa1525b29830bc9984a7a9319c787 +d70d70fb7b556598b46aa89508448e34 +733be0c91e68bc8b4a66e88dde7a2afa +0a4efb51082d6a47df59c07f210ddadb +545816c7e0530f0ae1f5ed5923fa013f +fd30215e770a609f813884cba3c41d6d +a915f3f5337a47c7c9e7821e6fcc2420 +8f20cce183a616d5055decbc444fc8c7 +76a9318370952af13474031c4acea809 +b0a9ed2549ce86e3e8d217b728e59fd3 +37aa160475ea8f1683fb815c30f46414 +540485e699ce5a5226ab212f299b5b4b +233988a25a121978710030eef3966b9b +9982fe2d363defa9b43a28c9e6961b50 +f620fcfee98add87881760f077061af6 +5c02c1c94ef9f68c16a155e62439bafe +7973511d1f1be1298944a46f979b7bba +ddb2816f1c1f8e0741ab863c1b9d68d5 +6845d791131e438d7ad25c52a5dc9674 +e3bdb892ba8748851442258246e604f1 +f426478ebf67238455b4fb961ce61bbd +dd0a5185abfd7266a16cbe5252acce4d +f4e4c0ba926e0cfabc21f3813c2a19cb +31d654f18685d2a7918d142da5b7b30f +b625225b52d8aa224b46088a8c321ec0 +56a5608f12b0a3cd38c4e39aa7597a0b +ba06c752ad6be92b8a586510815b2a7b +73d17458cc141741f0e73b8c760d542c +d902aff70d3a1088eddaac978753b1fd +e466b8b5c1bbca8c1d1a0dc20212d339 +b97d5b4dbc562c93c10a5a33dd52a0bd +39c4f530a572fc82c81f23d2590ebe52 +a6a97e55dd738dbacdd9822df486a5be +276c3ebf05ee1b31a924a2327abc0762 +d41d8cd98f00b204e9800998ecf8427e +c6e6bc16928cea4ce4f2a6f72af17eb3 +0df4fc79590a41d3853d4e75acc37b13 +825c3867ad27805d5a11ae4cd5994d25 +369e1cbbd4917b691d61d9e459733291 +779715032cdb0382754b2497a93679e7 +73b902da829be27d0353d37e93976460 +b75e86641130003f272dbc640dccd3c6 +1d5c1a92089e094260e2e7da67a380e2 +c7651be82c6424df9d6a855c22a4108a +fb2d13c1f6fa6031fa0ad3a6f072f461 +382d284fd3ce4a529b2494895d644df5 +adcc4af7582fd612c6d9f9cd8438cb37 +38c340b6dfe7b1eaa531258c838da8a8 +667fc78200ab82e30d283e3d9b8c35a2 +5b3a2235b2611b254d577ce5300f9fd8 +d04a412b34ff5f79950fc2dc879c5da9 +48b3d9544ad7399ad5d8480f6eab894f +483e5d985fa98797b584315f81aa1a1c +777be93da670a41b6cc5cc1d2fd4b73d +a7570db15b6c6f53e77f7fd91deab65c +9218a42dfe37c0ed7e1ca5dca406d4bc +75780e443237a7d52cb0dd35311ac8f3 +c34dd3dba8bfa835b23f961f8e814212 +224a220a97d2ec043314778bbfe29e93 +99cb7a508da937c562bd94689b2036f2 +0be92afad41530ad6d7264c48b02d408 +1af26722e5ff0f622943d5eb72f113a6 +2db1058cdeceb572961f2c886c04ecf7 +9b386bea8d87cd178234cb060b114d06 +970c68ee639ccc7a189b8ada52d2e874 +8ddce6d7cfde8ed5f815c383367eb2e4 +0894d2033a527c52e46394cd2ef58af5 +2cf0eaeef49ed59b15bfb7187afbe8b8 +60ade4c6a65e3d6fc6f71b978f6dfcc5 +71e4da487ca6497ff9badc5554e0f679 +c6a3d74f25b9e50684206cb3af4d207b +dea067c9122bc96b897dcf45ebfffe08 +5a7b628c80b41bf3cf0c62ae6c5113db +d3b7373def7b18a753a5302693727c57 +46874a3e4ba01ba69b3182429837c34a +1c45514ab7c13bbf0cf8d45a97be2933 +7efd8ee08786824e0ded62a770a11926 +83d1caec7ab6f5cacda01501d056137e +492f5a4095c1f63b223e38f8dd10e9bb +a7e0f43b9c1b245a472a5a60a632f74c +4dd1b8d31c75a173fb1e3f95903fa1aa +5cc8cda4cf10e3e9bc201a4dd07ec4d7 +9836f2107cc2c32053282e0f314a72f6 +71111f6e18e41350c8b62da245d02e67 +1c7d57c31392bbf55e31c523e5b25a89 +c17f5191a5ca2e1c3a13357126c31941 +01d904bc69587a03f9d06cfa0b0cb8b8 +9548f45535d5fcfe2c2c1ad33ff96cbe +0c0bfcc516071cab8c26e008abe5d8c4 +1b807e1233fa9d132788fc4ae96cb06b +c500daf19a89019305b0563257415c8e +40a161b8ccb37ed546bdfb059a8def63 +26de23d80525999b2363a49596b4817b +917672e9616e792152aa75a784977f22 +f05da931bf18c24182e09b8cb1ac21e1 +8420e72d6ecf46408908877d513248eb +3cd8d2c81a70e7f9f6e32fbdf3e010f5 +066933cb3a06fcbbe7ca0245c4faf5e1 +a51019175fb10c9719e8b355414ba8ed +6257d13cdaf350c9e97ecd8fdf625d54 +95a00f05e87c304bc7f71beffffff782 +8e232ba53025d5620819df1410a71c8b +f69d37ac3007ed1dc3da06ca658fb3a1 +367cb22a233d6f99a856df5ccf6a76d3 +568775baea23e9c4af2852a8abb8b86c +3a0417a5bc52275b0e3932804fc05179 +2e5c5a67bb7bc39a4ea87db23276e9e6 +eb7c84d7dc3a3f58ca953ebe737f03cd +61cdd8ecc778e9c520e05aa8f7f8db5f +bdcd1c9c74ff432ff60536a8050c8ca3 +bceccb2e95ac44965ded6fcb179de994 +fa20034abf5e7bfbbca3909c104e3a29 +211277a458f8014340dd1b5759a24026 +1229d2f3aa05285838132010c1faa20a +2563e3c4e05bd30827bcf4d22a6247b2 +b20ce305c146748149f6f2f4e4d8e67f +4b71c6bcd70c77b03b2c160316924bb5 +5c18263d6a62ea711eafdbd68dbb5f9f +d99a81dee08bfa1367eba393bf692ab8 +4bf4abdedd1341095709a07fb7337688 +58591b22dec9f0a6bd02a898c88a35de +914db820c3dcc6bcc98ea872a4bac282 +e92f431030ccc604b4f667a78e635e34 +dd024218655d5fa669f895f52ed9fb78 +b58d80262d38288ac3fd200378118bb8 +4d5df73bcaa5644df7bc4a1591fb76a8 +cfe1399765c007eb7442bd8d47b40190 +146ef15c0935555b18aa8bc95efb3bd9 +458c7edb1199257b58263ede35729487 +b256c13ef5a590a546ebd3df04b08f1b +24b9feb96f7a68daa9892787620240b9 +b32c04e08144ee9054bf31d547bb33f5 +8e5d2c110ac4d7b0ab713407dead4957 +96f33e8f72d814f80878d4af78234931 +3adbae70384f7fd3262db2b57052d9f2 +428cd773c5b907be2180ebbbd550cb95 +c485a8cb6b7113001d7d113b949aadb5 +f8e129bea0be242d6ee978fcdfd00439 +979e0c39a7efefccebaac86d480f292b +4e215903498c792a344d3dd0c66bbcc2 +0421899d0066a64a0e35d5c14e563770 +87c99c1500ff977f229adc1d0d639731 +c181e0c6aba3b90add59662ce5a64554 +478fcd55ed52ec1138403fb5b241b05f +730e08e10ed14f7a4df849fe1fa634a4 +c077e311c5cdaf4bc2da686609e79d9b +826df42183271d2d91536e47aa5e9cf9 +341fa12b72f2cb647d8cf352c84f7572 +452102c01001890dec6b75eb1c0c354b +8dc282c91b100a47aca065f5e51d28d2 +10897a20cf0833e58575e9cf28870f2d +3f81bcb2ee77db68cda2d5574e10dfb3 +23cdc70a5a5f19e6cbd9f6e09b23dc1c +a1cef1cb1f8dd0ac15bf0fa8c72bddd6 +a846d91b38c6f795d401258abaa033a2 +b6992b4eff175476dea7bf536694449a +a22177d212f94aa581e1cd5f957f0a1c +ae7214a3b6410078e6dccffd24b0788d +331d693bdfa2fa1a9b07e7e5023404c4 +ae73f3178ad57dd082914bfe9f043325 +d8e749050d41ba322eee631a366fa20b +11b2d418963fce1b4701152ac0f0190a +f22d70a6f124c8d6782d041a2bd6c6c6 +f1f7561c7f80be2f7850f773cbdc1a86 +106f6cde972bd9a339e4ae463774b7cd +6ab251a3c620b72d098ed019548c4dd3 +698eda314f0e18e32966571e03284eea +68e52b33f62f368293872ba4ca2d550a +a4b4aa6bc52bd6cf958d356f3c13e4c2 +31d0aa589af3f18edead86fc75776ac3 +814dd00f92c6e9288185e01d64f8a23e +fd122f237b71540a9244e499b013fdfd +69f719502d40807d7f7c4652feba4db3 +a85ecfc7413cf521a263f2ed4e8421f7 +4d38faf2c191532322027a6a36652ff0 +4a11fba7164adba57cb1cb274045eb85 +8aaaf2b40458ffe2aad28ab7ba839808 +0c5ceaebf49b1f096ce5f174affd7452 +5f9c96e35ed85992c56e676458a4b331 +aa2c254d785e14674ec160721e3a4969 +4009e101310f3f463a83189dd6fb2b67 +400df096c7ed4da8af97c23731f95f1c +8efcd1931dbdd1f94ddb40712d505b3c +046e2a9bb87b90bad31a47baecb62f4e +700850cfc9ced8ad8f9cae1731760d99 +801c9960b064a60fdf73e3dba21c5272 +549115d804ba65d89c1923290d1d2b56 +fe32c143e455ed540d697a279fc314f8 +845c91d1447cdc91faf9868bc8bdfe6b +6013e9b680bc1d1c96291d154f553809 +7bad5da6ad3a889ca6b02d6624d99467 +1d19f04014d0a4330cabca3754aa31fb +98a87d745c08f692a5530906e6a9ef2d +3411ba8b95fcfecf740ef9fd79848805 +ddde985dc78e8304fa1928b9e759dfef +28ee79cb3e6bf92701ada502a4d4bf19 +7d5eac4d5e8c97a0552af4801f863eef +c9fbd30bd4c6ec34ac3a5c65dcb99626 +edd59c3982a31c0a288a16fb41f0567e +5bbce8a90f1bc79584c8c2b8e2cf9909 +bbe656130eb95d65c2cf0f26f0fab030 +a8c1f244c196f65132de0a5040a75592 +41f0f216464ae7c3cfcc0795f7da5403 +2bb92e1d44e04fc7fbfe8b5eae52162c +c2f881dcc6c78fb3210aaff1d2113804 +69dd25561d6d7eb8a70f3401469004e3 +da149aabec16f0884f53cc1e084a5062 +e9299624690c8448cfcbf9d71b69954a +dcbf540c3fcb65c428c943043de63b94 +6dbd1ff4ad2c1b9cea9d791d5362ba2b +890978a48a7e65ecb2a296839af7188f +dc5b2f9b7caaef9fa5d7e3e768473f4a +187fa8de169057860c92a00c6f2d7471 +d5dcb9c4b13cae5531313034bd191ea0 +b80e62f4fa5be81b78126c24d22062e4 +34b47efad15f2bf482ce46edc6943b1e +5cc49622ea6dad305b321afab0aee31d +64859dc543a2805339fd63957a3a9c5c +e54bad6624eb71a2cb847d86389384f7 +ff1fb6d1454993fab667fbac63885694 +98b06498ce0b446927439eccb1ba71ce +e75d97e64eb36144d60ee142b5d455ec +036a11112fc6245efaf85d60304bbb41 +3fcd75242f4a51f1537f799c8ebe5495 +9ea96800b16f6243c5898087a999b00c +b4359b453d6943594bc04551f3de4de5 +5ba506e06f9e5660568bf45478017980 +490e7189076c3a263db15d52bacba953 +8c627610f76444d49d745d55c4457928 +424d11f3b8f2d7d5d14249356edf475a +087f662bfcb0b22cab2b312791986a5c +24b945e8d65952e2c7c3a6bfb787201f +c0ccd1aa3c3e625f7ae6d0733a7d7d66 +80ac8ef8d7647294ccf249d4c26a859a +48d30db27ec3907f4b5e33fc7559a67e +edde5842beeb2100f2e2b845c7601281 +7a90a17bcc2d356a4b53e1ce119ced17 +3632fecb702e7828d1c92dcd8e4a7906 +9476c41a85d4d40d0d6819b45993a0be +57ec530e6b9e949c8a861ab173da8262 +95d19c2cf5d78f76f377fb136fd742dd +91e79e8d31b9097f231d60e95f918563 +8287504bc5f075059e394a11302213a5 +0971acfda3a09627d9135e95131029e7 +2dd459acd69aa46b1b13d9a04ac42624 +558251ffd4a7422b84aeb4a487618af8 +d1e7876b6f677decd2062b7448e98e76 +94a5d798605fb5605119e0a3af607a43 +ad2e5cab72661889a6f3fb97bb8c3e45 +d429df6c9ae6cb26fd9a1d81b6452ef5 +36537c9e3a5dbf3783009b07c4059300 +5a3765737e6db2000fb990521cea228f +bbffae67be83c6d5b3631db86e0d6e98 +a9f05aff50294a0372c083ae55f4cea2 +67f324926c27c58212ae49d04cf5d7ec +c2996900b6a0521af8d130d9b588942e +b72377e7da0f194daccec7a562f7eab9 +dda2ee83afc3559edcbba15c9425ee60 +5ce735356f998297bd300b26654d8fe1 +d3b547f8aa89af33da474f2c758049df +c8fcc030e2e26365b1f119228b35e778 +b6a5f63a02142791ecd3906921ab2dc5 +8224dca724dd35962c756c0df4fe46a4 +c7f9638b5d08b6cda17e235f5c1fa993 +1981c77f3296fce79d897bbb0290c300 +0c1b14d84c9ec10658191b364e9c2222 +0e51a25a48f885b91f52223c1cfecd97 +1c7debc4af2e22ef64616b49bfedc39a +e16a738cc95fef14d80968de465ec1d9 +008a8cb802bc87dadf2b341a088298cf +9978c5a8330318d5e448d843f429059e +323636ca16103b9c6edc6ec913c84e95 +37b7154b3a8899e76334630f4b80a95a +f05aef7dd91ff8b807d7a14d5637854e +ce895f33e382023a81562aa6e4610d2f +0beefacd6f5dd5933e3e56bdfd0fefc5 +1be2367fe1ee8d86e59c0a774ea79aa3 +7cf52629cc8c45ef31d87b4730cc1ff8 +3378906b53d413b9448a38a45a995ac5 +bb4d9bdb0009d2ccd6dcf7b4cb861c66 +308bed39a600934cd5d7b98073e67025 +33ca01f46b5ee3553841827abf43efcb +3f9232eacebae815edf01704cffbdaf0 +884f705759b46449bd292b9e88f1b949 +0f06a105a65a44446545eaa16161c77c +44e1d5f8f55d341f3f67bf51726b6f09 +c11dbb7e49cead8a72dacb07146ba57d +f601d74c38e02c81ab361a5c548922d3 +f2875d525532a64eb40168569c08d5f0 +54eb6c4475789ab2f00908d8e3f72ae1 +0f0a8f640dfe4f106d57f1383aca24c4 +8cb16b7ea48708405b18d72a63fa5b25 +8280e33504d98b3d9e75b352220205a8 +cd6c7edae0211ff4721d1fa12cb35f7f +576806462f1cc4a6c2f3a1e94727702d +f08fcfe0c4af19f745d5db94ee8f718f +cf369054fbb560af148c059974833371 +29856a6a97e68eb3b3d190253872572b +cdfd70d1b7b6151c14cd3b845b0d3196 +cc501855a1f93bdeee7e936f24d79726 +8174da22c4d51b66951b8e60e43a230f +2f01d4d2d61b72fa0849a377d5a673ce +d386cd13027953b4c29b12dd5a8101c8 +718c44b2948e540c3c2c617f0494c58e +e0082c662e93ac3631dab8f42b0fbb0c +9c09eaf6553f4880927959acc88e5b9d +1167544435419af1f84bf34fc5594c52 +d1c571501436f1299527d479e8fad2bc +e54a5e248184b17ef13f2caae43014d6 +ab1d9406225ca392d391a124698f0eae +9c6eb337cfa1acf62e6fd6e9a703468b +83424113e24a1bce137e98a90ff09ae2 +7b946b9d8b8e4a4a703d19d42ecaf4c5 +c75bcaa7912d1ad65ad5a527431e8994 +6e71bb1c872e3688bcae344edec9e95a +4e5e81bf82e0d6d7b49675cf4ad3b270 +c23ada8dbdae470d14044ab1deaba21d +e6e7d9c97d1358a18f1d88038cf65e5e +cefdf90e89d0f677a89f136e7dfc6e54 +80aa464ea2070712b75043c6a94839e1 +b2a556452ac73b68a997aae0f1fda65f +03eed3ac0ae9f22e0f98dd1dba452f05 +7957b45a6ecd53485c8903e7c5021588 +2fa3c3db69ead07e10b4469dd15bdac7 +7ea3cc9b44b7d3b5b3b73a3cd90cb5ae +de40090127f4b705f2a2da7a1a7491b1 +720f83c32d01bffa791f0ae3f2957c87 +7d691d3022d042deab4d175943c67c51 +cb594909295491051b11b224cf6b331e +ce5439931bf52ffcbb7a8328aeb40d10 +a24ab42b1ece8dbf04b91fa0235b9be0 +4c03ae93ffa7146ccf02f00961731641 +9ef5e700f010b50c6650ad3b52518154 +52bb7c051025119de156d56fd72e4944 +3190dedba40ff311f2b1687715330c5a +1f1fc4012aa358fc3bbee6c21c11a089 +46587380e14ba063f1a471ff2059b895 +4056bb2cdc7540d37a53d2c707779b55 +7e22043f4e7441f9aeb4aeed483a2ec6 +7b4ba805a3e708a779a174c3169d35c4 +8af2602699423b7ebd2decb5c41630ac +7d87a26aad45f6ce987efa25717045b6 +2c3849a0ce9edceb29a81c9531d0363d +30ae366d476469528ed2a7c596574251 +33ee2b91a614ce9b38111e3565e6d19f +81ff6d18c1e541dfff8a46af46ed3bf7 +7c2e294112220b60f441a478df7927be +55da8d76bd01287f46222dd751216c52 +767986017ffff029870f30cf0832bb42 +1b1ade3050c084466da4d24b5fc52dfe +a9b81becc838ad275f53ab0e5678fdf6 +0fd3e7c7e3e601041ffaa351ab172bab +a4c3956c0f079c1ecfef1f29ad7433e5 +f891b3366266162f00e4d6f72b640ea6 +531cf6f2112a9d39ce951189f67a3735 +7d6f4e30f66eafc98074376d1a717f68 +7268bb737cd0bcb64994ebb338282890 +7d42e897f5e98c55edf7915acd4e63e4 +8548be52c795ecdfba92d071afe0b72f +ff5705c3f546a4d540d14a4045f1f55f +f4e530b11aaee28198f82e5ce38e6d29 +eaebd82a941bcf7df1881bc1c370c51a +c20062fc223d5c4d5a5b3614a8fa2633 +1c773cae784803bccbf490824ac4a4de +d24e2e8b62b2253250f56ea0544001ef +c5df9568c8d54065551506253c7fc523 +eec0f4812600da1b236769b7c936801b +c47def009fe0a8e206a15469c36f389a +65a96068350e1d708c5645dac368dd16 +3ebd2ce067dc54d8acf5eba1d9dfd6cb +6ed152d8526338f03888f153caddbffa +3439fbe02af492cb664263ff58cba418 +6225d81fbd7c1a2cd91f2d166e8e0aa5 +8fe89d1ececf5aa9cc1ceb6506e3d1d5 +2749f8d783ec679b15ee2c4a6260c497 +d22c49e8237780b4754095acdf32b41b +1dc080830cc6b58fca67b1a59b873274 +23bd299cbc10f8921f227f78dc556d7c +d7099eb6eab21e6985ad4b63598d9a48 +a58ab6361a3458027f634ef6ac7e153a +2f353cf86663ecb2887d4f35832aa0fc +7f49f807768cf594eef32342b0e37924 +d95494286cb08011ab48b1d05c153f3c +698623f2d8f95f234f08035f6eec7f57 +ef21c025346aa20e1d4d6ace5e4deb70 +004ba3d14f7f8cee966bfd35a774efdc +cd855e1563518f5cfb322e6d90d256de +19c4fe55a265f0368ff7394a61daeefa +9c70572ac970866c5637b5a28f2b3956 +23e6b69b02fbec866ad84c674abea1d9 +a28ee38acc41aed1e7c3fdc336d66b42 +0681ec02f4ad8626fa24dfa551a1674c +6f83096c77f059145f4dfc670e9898d7 +4c5a22b04162d20ad814c647d2111870 +53881a55d2f1f07d64ff01758df9ac3d +15a2db68e606b4d60bc311c589838ac9 +94a185178dd4db4a40a8217124083159 +990e76815908299bde59fba3fc85af7c +29aca2292b20a04b53f1f7b8fae80bec +514924bcb1e12718145aa252fbc3e2b3 +7be6743816de209ea222c6f2e8322bc3 +12666bef78cdf1e0936a15eee8bff903 +4b6dacadda979e4a2d97645699a4c941 +95e302ef317694f3d31aed2bbf535b7f +bb4e438cb3a0cf3b170140f2ad970c59 +a9bc1e4b660a604e3c26ca17b0878077 +f1c11aea505fae6bc41763503a418bd7 +cf0f541e0e70e08c8716edd7e2a43e3e +338fe9b1892c7c4824622d916d20567e +c2e5c6e10778f4017de93f407c29a83e +075e3c7748441053dd8bf126db878b17 +3407729f4b414dd495e4b0472d30d36e +1aafba7f47161739d4e497df92af6533 +eae1619ab9c1f34e613018ab21944a67 +eff1b169793cc6b0722a3a4415e9ae9b +20b237f31f13dd04e4ceb231a4f60785 +9e8fadd58781b46d1d5c283ae98e0744 +eaebebdc75e6c8fe838ae02391ee1c14 +3bb20d93f2065164105f2bb2c0600faa +4f3708ec557f0c9de4224e456bc6e3c4 +d41d8cd98f00b204e9800998ecf8427e +c8dd262235a5fded9c53811668dd09a3 +3f68d2efa31de20a87c99f09e5d5c77c +57978df8250316031b7f30155cdf27cf +c02993ae02a41eb69fc9c234568630ab +21600d5825b9b8f2c8a5fde0accbde64 +dfa4cafb31c9cd348cc3b0f756ba651b +240329779dbbd874cf5c9b3f7e17de8a +459bd157df50f5dc16f4085d90899e99 +d4146ad2efbe44f53934e125dc444462 +8cffb4363732f77b272580a481e95288 +9de04cf99f97059bbad688418eb20fdf +3b29a60008adae0dbc19cda23a05d298 +6d71a3b054ffd9d36251aa11dee8185d +63ed82be9898c736f5cf5696626ee8b0 +ed07b77d60c2306b64a16efb5b44d80b +d41d8cd98f00b204e9800998ecf8427e +dd174058d2c538970de1f464488275cb +e667da5de19504bd33ddc30e744df3e2 +9117473b3e475cdb56204d10250fdaa1 +d1acdce339b54a61b2bef6954fa1ccc9 +d268a53e9ad2757756d454116b2ff966 +b25654a78b4ec4a1af7073e8e9c1606c +6f9e0a830137a02ae31483aa2fedccb8 +6fd4e270876b7f8cc384247c7fbedc31 +677e7b3c2eac8c278143b1c86691532b +584dd72c8f9bab932d4b31a1ab5a0de1 +36357eba6fb541747f40aaabc21599c7 +79b527237a66f7b1b9810b28cd654fc8 +0287fdde969af2efc00582c383c2583b +96aecd77925d84466a14a4b8f9aa79e8 +0c626d973bcc0a6e464e15146f0053d5 +81b0579fb055ef4686376f8b990e6326 +22f1b16413d49155c401a03a3daa0a45 +58d6964689b6891437996be7c01afb10 +499311694f65d21082875efe4260054b +0ac72f255825c7b5aa67189b008861ff +94615a7833976f4091b4bd340f9980a9 +7b946a7ec5d5b622670e2b1a9527a1c0 +1bb489178ef51d9abb1cd75c8fe2b3f6 +933c5b6db3a8ee94de81cc184c8a8aea +f46a64079f92f41ed4e18d204ad3178c +9fd0287c4e6719b71cc1af309c67078d +a23c528e82b29839d5a049a7519e9c16 +538cbbc6787ca4ac7d93408d233783cb +375eb2949b6c21327b7d6a50aa7f8d84 +0b4c4a98a372b26b6ca7042dd0baae9d +05848d1f2555bc5eeb8973199a37253b +72d0b06d1f7dde2cfc2dfc7182c696ef +0680d8bffd0e5863dd5d2e627e356505 +f81ced71cb9e7f589ec2a334c2048fd1 +2787a2bf6a1cb2a1828577b3e7775928 +7d3a2f310f080c666482273103a67c8b +440214f8e8865faf70d69b7389d1fb86 +a4fb174747ffa1dd6ddd48d426ab3a97 +a1af72b11c8551476b6ec342f457e827 +06be6081cb8becd26d70bf32b3b11afd +c95b4e60054b1722086b1cddd92dbf4d +3f8ff9b3c331a8fe7978ab78f03a130c +88f7b8f92dcad947b78d3fa700e8de5d +cd9bf27fd995f16089b4ff76e85bebbc +a03ce7e41b3e112c04a7e595ac4f6455 +eb3efe2b03bd0763f90ee357c283c85d +1ded3665f1d019029814c9810e4f6d41 +91a9906744566ef74f68dacd24bb0bca +832909b8e642f3c1d3854d01f9c2b0e2 +d32f3ed9f04218f9cd9d052c518c57fc +0ff1fbb669834fdf4140a48556054e0d +6821e6d78f1614ea319c095263031945 +e33283e07b084c1a5085ac060562b1dc +09ec10917d631ff61666e65c23dd90ac +7691b9601bce81b819e9c19d3ffaa8e6 +6f8e89b37ce140296e811bc3a7b86b10 +8ab4c53fea94b45aab724fd72a938205 +7dd99b56e2cc40271a4c48096ed7465b +aae9bad569679b18309cd2d99eea07bf +eac48c6ef75a0725227aef29f9e50c6a +25cf842676746135d64203105c7ee59b +358e9d449379f34ea2595a89ee271eff +7500b22492db74eb6e702fe516bcdb6e +a96bfae810c7e04fb75f7ea6b8c257e7 +b01319e09fc5ff89dcd767ab36eadf25 +05e5d2f9373cc6ec54ed136f2fdaa721 +b05dfe1ed952201e62645b3347b9d680 +1a4d8f0c9977181aded1c9d988f3648c +2ccbd4b95d98cf1db48534832b3ac749 +889c1fbc548e3ec6ca700232460da3c6 +65793565a04cc1bb3cf29fefd2e3c6a2 +ca5f89e3ba2b2c2d52890aedd44db0f0 +1148c8155630561c3a095371d3b84e57 +e2d31fa9d773884bdb049c71c6c044c0 +f54eb5609ab4acb1664260d0c80e2b26 +ca3faec4211129136df525f89aaa6b36 +40790e02f4cf0039d5d1aa22ee1b69b5 +38d60a40ed5de9c45b7ca23f469f44f4 +609c14a8e14ad1a7dffc4f067319154a +08e92049604c227c934ae45be8a2e26f +74df4cfaed38c35042238e928ec4f32d +e734affe6f992226d9a03b650fbd9bc2 +a3d3f47e46e4ddde2e0723baf2ed49ba +6da9bfa4892ac617442873d13ec5d0b5 +5ab56acad16673f90484516789bce8b9 +880ed1ad4de50a70e4fe5ce53c229edd +dfde8a13f7e6f50abd194bfe9b738b47 +ab807d8b4bfb214a1334e074dc647593 +a5a87c676402093a6e1b117f910ddcef +afc87b9fe13ff1a8e926a87c1bbb7b21 +d9ca73e6bb9e8424412910cbf9f9b79f +02a123e4ceb95ce4390220c2bdcbd247 +dddd5091e5dc9cb115c57e4d579b0f21 +353ef879f5f0817d8590ed537a2e44b4 +023c8b1cb4ccd96a62c65dd9068d6d79 +cf19245cc3aac204a78f15a38b5bfe3a +c140f6383f4c7baebc017332ed8013ed +a017597df0d69d83affb907a76c27a6e +0a5c2d760dda6e05910599307aa0b03a +5675cf1151be0c3aaecf1795a46b495f +e3c311b1579cfaf5964f1e1970bd0f55 +256033bb7aafa131ad8e9b6be921f339 +c4342cdd9d2758c785d414cebf4faf7e +6c300a3051b11ca05e986e0c4c594c42 +adc4ac4e2b85552285e4d4dd05cbce82 +708cee22f7e68462c14c8edbae36113d +61820b829aad5b9b000c753cf77ec1a0 +574ae0db872f9a1d07197c171869266d +607890048c5940d41a571139eb83693c +56ad988e585941ec857196efdc0c550e +56063d5ec3fdcc63f5015188d22976f3 +e03eec0a3e4d12da1fd5cb2539f5b584 +2c878388194b4c94212d93de4bb6eef1 +c1e3cad29d12816afe6fb6f038d88042 +82cedbdc67b024e83a05a171505573f1 +d6c1ccda768774de82e87a55414c5570 +735160c5e38171f547330026a7281821 +cda5615653ca15065a395e4194edd732 +f1126686a4279407b47f9b1b07988923 +046b0958897f878d302e986ab640058c +3f9fc8d29eefad9bb56a48dd1ba992ae +aa2be2098e1ff63d72d1a626806c1ddf +b1b33f2ef98c3369f81b31eaf253db50 +406650193cf0572ca1dda51669649aa2 +756a2af9fb9778cd41889e5c91850ddb +7904303c30e6d4460e45d0aebd561933 +bb94f946ce59c6611067a231c5438694 +152245c577e5b98f3025e4b57cc99106 +414cadf90a71921ad0286c62d4ab5725 +12c51a50cf805a4115607995e8253984 +f696039ce93351d3fa4fa96362a1c08d +338edd3c6231bc04cf574530fd671c0d +a64fcbce82760e3b5b36f721ea5158af +a964946720af93ad890a2ad224520acb +5a999ebd64ccaae833119919811b00a4 +9a8d46d34c1cacfb564dc66b963e0449 +8fc29e7ab726793d3a97879b9826434c +fe694678d3db879d508aa06b31d9d2d6 +e02717481fa370084f43eafc351e22d0 +86fc1b459c61c9f2febc2d5701a81a4b +59eb3119e7de9ccfd639a4061d100618 +c99726cfc09110037652d8d2b392b959 +b3a5adce97452209c80ad94babbb5496 +72c73b0c19b630bf45cb01f81e4bb945 +9e6d6cb8b94f6e7c1a20add5406bbb8d +cf9ad3f63bb3aee39e53548d4d7426ba +45f078320e8d7068dd36f05e6f1955d3 +a5a2c68bbc8054ea4ee19fca1412cbfd +b30134290348ee2f141010d39dec7aec +6d8274e0dda5c19cffc13b8a98afcd39 +94ab774fcb977525845497f55149b6c4 +1cb162e589a80f0183aba766438474c7 +47a824e01143bb65405d9a11c07fd553 +bc0d64598b0cbd385a67736080709a1c +8d2eedea91ceb521eb3bcb7684de8793 +41f355c378dcef03f0c1e39389981cfc +380aa215cac31dae74077433f1051cd5 +95db93fef3ee314592f472eb979b16ff +f24c9651891fe7e04863b5f8b7911274 +90e4249635cfc1fbf866f22a3ec386e5 +e2348824203ffc5bbde01afeea86e329 +3daead7ec26a777cedfe78976a8a2a49 +4e9d98a6710f23bcfc22cb39fd776dd1 +2f8557b74b3890879bde943e77ae45da +dbe46ac02048c73484c12ec46d4edd18 +90989e0c78345992c10583e2812fc2bd +83a49f4e2a7b66e0f2f1f76ec966b710 +903e7469e3341959963521c9825dd302 +6ac4f0fbb52f7b196f1e3e105a24400e +737fa0f52dc0b702e057b069a767a608 +185f8e8ac7be0959d6ef9158e643f06e +50ab4caed7d3fcde64df3c0cc343f0a8 +6fc19bf5cc7f39a80a14cda9037fbc73 +e19adcd0e844cdb78dbbbbd11c7e53a4 +456e84c02c602711b533be9da6bb8c2a +b5e42c2517791ea18e1497674376f0c0 +8aab67be3e61ada3205a9267f7e818b8 +4d0dbd07d699c1ff6d9f4c1b76a69c35 +ed90428aba89634a4463015a91018d69 +ce27ba87e9af039e872025305abc823a +d8ef3c33bab7b650f4f7422b83c79831 +b6c358866b8fe4ee1fffc9cfe4510a2e +453db193d040bc53781ade23fb36501f +705c40110b2dd5f175aa5d324ffa5d38 +6e90916a1765971fd874bc9c9fbf112e +9da9675ca0c6bb2db8aa01076c27b262 +a6a494532762016be1052d5a14c790a8 +cacfe3e3abfddfb9545ac8c30d8b1800 +5eacfa195a9eb6bd7923ca7dc2928cc9 +3cf8e944f20da8bb1df90b0a0da9ed78 +c2fdfbb380e7b5fbae0cc0d2b77f38dc +cd950c45f1cbf0cfc603ea3a7b74b66f +d41d8cd98f00b204e9800998ecf8427e +ff33efbab120042dbe04689698e44ba7 +70f342b578f8cf78c5215057fea35dad +1e6e89d5a14d0053aed373c4a17163c5 +f22f65c85c292e7d72b23b98e671033a +3c8f6ed546f75d93a464c2d24896f690 +ff974458b61c5e85f27f4672f40e81d4 +dff193d36b212dae756cb8a46a47c97d +70904c08430f68bbc5b76616de6b9ac0 +63caa5b307e12d7a0de391091cc26c86 +44532409744739852699e849483f7b7b +7db257d129a57d06f4ed1509860fe45a +b5a54879e861a671d8a845b374dd72fd +ea1923844474e03207e072b2d4e74837 +e8b97c4f41cb27eecd3deedfbcfc357a +c42facc894ee9802d82c526e27f43ec6 +90c950012013e3c39301fc01aa7c3757 +7aeb9d8a609ff974371b67ad958db286 +cfb5970c43e854e1ab1e144799047d8c +2e0b3fbc7bed0929274ae29759f017dc +50d6ad630082e7234d972a463281987d +ca82bf1f35ca955a4ec59fcd056eac29 +7ac6cea0f03b7f1bc45466102664fcae +88b2df42fe4fec92255e38e0a8b0e6f3 +3bf240f0450f8d4f375dc7edcaf743a9 +4a2d11eb3dea7abd43c1bb7d82e8405a +4c2fa28acdd01769e654df3ebcfe9424 +0331e427ee466e4c2fb7020e9b54418f +7d004c386dacd8976143b299aa87d15a +0cc414df224c1d451f89a7f6288baa6d +032d476b549ba74f2ba4b004d61f1bd0 +0d0545f6ca2f327ae748318beb4f81ac +b80c0dac858d9cd9f83b9eb856e427b8 +18fa100c786a8a9704451c4231c29d85 +15c6260ebcc8368dfb7c0f610a361f39 +93c228f8f11f742266f4c2589d97e0d0 +e92348dced75306d81a67d5bfbc4d1df +b4756c86e3b41ab254a81db1d0ebdbdb +730acac3261563cf8ed6468f1e9a591d +b8ca05026e2ca7588076306bc6e3920e +a43577a34832be9effe6aa5cacaaa52d +9d5c63cb1a857bd17bd8157df4597452 +0d26bc126979bec59c521e25caeb32c1 +1a212f1c5b6a73487704c5c210f7ce12 +2b644c0b31161f8f068c2a3b8ee40f51 +07997e63fafdcc0292a1317c1b8b1a59 +1dc338feb4a0320b8e1958baa33aa99d +18a4426bf13db23b8ba8a17a54b1bcd2 +5adb788f366e74faf239f609edb2d32e +23a59a79260a8f73e7baf1e9d9b4f85c +534b2861f13faa00697d7ac04f49ce12 +15cae9425dca7f7b4585e3644264224c +6b72dcdf04c38bb6ca087a4fdbd72cc9 +f1079a5b29f0887835e41a924d6f5e8f +c327c9639534b82628718b30e3192666 +253c6390b575db045c39c503a44f91b8 +dd054329d45f0f59b1a26734fd110307 +0bdfaa829e3050a3b9935bda2bd49e4b +3725be0b61068ab2bc0b53b8b61dd7a9 +f91fba1534bdf761fab5e68238c94867 +9a6a4f620b0152d92a3807425a5884cc +b466e2eb79b57294306f567b26b9723a +033cd77a3e257030cdb15b3a7fd35818 +afe38aedae3bc9bcdda9d92d30f197e5 +6fd5223d5e351d2a79f639c1ebbf1733 +6f254fdbc8514a496c8cd5f6e9c380dd +a9595fe6d1fd75c2c960c6cf76aa3f74 +d8f50a13421e6502ab8059e307c4c596 +562ee743002470291ec1e25773bc8326 +e40073cea557d9eaa402aadf41dc3eaa +b3ec88915a4d7490e87f1c2054d3362d +430c79d3f5593198be390014f4bad9f5 +79c69cde5ef1ebec99b7f3f98b2910e2 +9e70b5d7325d01be5bc13f399f0b28ca +6a01a537c6271340be4387339ef2d976 +d57b4fdaa0488cd8970f19dd744b57ca +3c5e568f6ef5ba136cb50540fa7299cd +1c780a8491fd29e47a3ef511eb3eb4de +177e4af50aeef6eae988ab18e51cb9cf +7ca86dd32aa41d1098e6fbf27683292a +34a86206890a162015174e0a6b89d1d7 +d86033f36f161e20c3801ddbf1d19d58 +a1faad53f5de4431b4b7b1fd82bcddf4 +37cd3f95b1bdd711a8ea716d4a86c9b5 +de847de25ba15f7915b4969fd2a9b97b +c2867226af02706e502c8595f7381869 +029c8a296d220057e3ce9c9c92f93fde +17d2a23723cfe20e5d299871c177769c +dd14886af4ae1513c1e609067ee80829 +3774b5be9b1424d88514dba6c767887a +44f952fb9086cae3879aaaf0483e256a +3689fee84120ac6f1cb42f98c8b8dac2 +8cab110fee5c8c8ffefd2b6905e42f26 +9b081bef83877486ae69266d9826c5b9 +36816c5224bf191e293811ded898987e +ddf258380e52ea1b5873f968cba55e67 +fa3480150eeb79485d5bc842bfd7b5a4 +d64bde523e19affe349029b731c6f647 +4bfb63ebaaf0246944288021ec50d582 +f079bb23f03a0bf5b61763cc5d1d1a8c +0b90597f59ee7c3653e92928c3bc190a +bcb98f7ba5211aa5a699036dd2ce2082 +fd22e104fdcf589c59992f440ba9a2f0 +9341a9e306d3f6bf3de590ba7bba37ee +a22fe2381331e020d7de65a83eba198a +7eac6624358c55c06cabee923d4c9f1c +c8cc9503c2667c917f7cd655bf79b436 +8db383d73c5600d2b57819e51f3f2c37 +451688b39c1c99fb045c204187e7e30f +3158bb5807fb7016eec25143301a258f +72869eb5a1397cf2f715b9dcbb1f7d41 +239f5023f135782e7f91dafb0e90be65 +109fa496a6737d9b680e9fd87c9c1d9a +4707ea3db99170bafddf50cea25553b6 +1ea18cba7c4132256a4b2f47ec107191 +078cd412e3406e0036b3e3d223ce9b1c +14772ffc2c6f0b3720d39551e1d96cb8 +8bd732f36881d8f70aba862fee14b29f +ec4c806760819870561925cb2660d86d +9066e7caf07cc2ed5403147c7526a499 +16c654797a2d85161ae8003eade31ee5 +caa1037d097faa45a84280d22aa5d655 +de0d6178639afcfd3d9bf5000f0822a8 +fa10ccaaa30639764300043f5feaf54b +6a7ee83b8cff29f8f9abb5a592da9f88 +c32132939f96b76dd10633bf12aa7a49 +57c2b32bd47eab3640346aed507c2491 +80166899b013c8be1cf36853b5bddb6b +3aac40327e85bc0519b83a5a17470915 +86fc417a5a18993a370febe70ea4a546 +f7215664a9650be943ec1bb9dd8fa350 +2d5167fa6b74fd6d497fa23815268888 +b310268aac98b05ed4e5a1a3a961ad24 +4fe88cf001b2506992952b793c77278f +3dc5fddea53f506e1ef0305bd2e87051 +e0967e7a5b2e4d163c56944424f88d61 +b1602baa6fcbb0800421c68d3a83498e +1c457fb348985b240dafab3dc4f72b9e +7d7c15112f744fe4a0d0258b20a60102 +16eced126508a202434b3c1c26622108 +a60fc676a2f1c6f85c95b8d87646dcfa +08aabd6ad52d0075efcb83d68aab576c +2e5678c141431ff32b4c91e8019f0a78 +dee19c289021b5842e25f3cc16d626c3 +7b8dcf0a866dc568cdce2ea79fc4505c +3caa24b82e6bf8f482296a4ace25f6cd +9866b0bdaadbec9d37631c45ec8311f8 +95aa113463c31d765a2b7c81fcf3d391 +c9f8cb4d6f95f0e5a218e4027ced4938 +5e3ff845ecc09f62f639610a74d4a54f +a82e9d74a83aff0e8943c378bc5c0ffe +2cd593435893ab72859de8da5781b51b +fc5985bc62cc3caf7c38a79b15cd15b5 +e790f80a1bc19816458e506bcd361656 +ea49686d4e8dbf0351f686154afc877e +54e26c7bf25175104fbb15101a7cb0f7 +0797d8a83a7947f73703973e8c8bd8c3 +e7cb258d99e52856c92451c9c1f81746 +5490c61da597144d1caad9f79e2c8551 +91e212691375a930a13c7277786f321f +8bd9f4a8d241502c519c829b95d73d4c +cf02dec5a756f5524c922b0fe6e036c8 +9c8fb32498660a6ceffd6e1e0fd018a6 +45b56b663ed8de4e27158d9506eadc3e +4ea11ced9a2957aaf160b2885d8a1f54 +ba6862d8394bfde90733a52d6ae9c17c +0ac88d608e266a3a2fd1138079738540 +fa747fc1b96dc352a1d71f60b42eb26e +90ceca992dd6b2a30ed04ae29802cb48 +de0348b48e0d0e7064a0bb334bcbb079 +c1d828ed40184235413abff0a06520b0 +d8a6eb65757a8e7aa19215675d8e2023 +e3580d315e22014224d8c0974bdf8860 +5f45f90b3bc0124153d65915f10aeb0e +4e055cd5fe24bf1b05f09b6df3751ad5 +ecfef0c79ada5e3fa77e8a8acded60de +95e7989346654aebbcf054f9755f3774 +feef6db82a960946647de533bdc7410d +1a60d3833bb940915588b55c03198c77 +2f628dc62617cf2954d714adacaa86b8 +93bf9f666635deefc1c493611d303fad +43b15b19d991621fc6c6a49af363db6f +8e12e31175338bc3cdf466da25d3de38 +dbb8ace8b7cbf921a93a436b96554dbf +3b5e0a2a0ec1525523126bd4e8e11a96 +b0ad29663676d1a59b8bfdd2dda1f6f0 +ec7238f1e8ed0c035036b66f8592f20e +db1efcdff9a32edc294af1eed58b03a3 +b6dd0c4c5acf9fb48d862fe8e7f3ce0a +ca43bf2e16d1840bede8c665ab463457 +56e482f4a9726601e1185898abaacaf9 +5272e9949a8249ebf5547037e2cb3e08 +20fd0266701791b8e11643f77041201f +c6a9195bf0595d6a67ae354c3e7d2aef +9f3611e891706148aaaa53d83ebfc178 +0c447ee35ef08fcd29e382149c5b9dd5 +a957fb89f83a802e68958561756dcf5d +8ef04dfda76ba9d088c4eca1fddcc117 +49d6debe18de88b085342c7ce49fac62 +2814ce5e159abcd509800a5b1196ae3b +08278dee2a41b3d0ee84d9c265f19586 +bf73d722a51cabb90f8a998d6ea4040a +f90c8e83c6963f4f51e82ed7f4a2cfbf +e5b65da75e0480c44770aad6d476f58e +05fd647846ee8a280243842587f39474 +0d1d8d5dbbd925a2aadfe683f6da3b18 +2e2e35d5e8e0131c0ac4d88fe454343c +9fa2beab761c282397d90672b1cbb398 +2da1f7145ee1bf8924ac4ac5bdb1f4a3 +419199ea74523b9ed17dbb1648ed28ed +e55eca59748a6a1bb898fb8ce1d4c15e +1b7da8335f49e49df9a2c52c40790327 +c425165d6ce2cbae488ab746a3e57de8 +1c23a5cf6975f35becad0eff673f85e6 +ea3953d27e5217846201dbbd5f2b2340 +3c97cf09d8ebc3c1c07beee81c1a6fa8 +499a8ded70659779616feff89793f565 +092683bbba1ec542c7cc72df5b4fa583 +86b08b93e56929adb5c652d275521b14 +d3f0a91a80c58420c48d58f2c25113ca +6c43413ae206aecda0596478b7a9f96a +0a96b526bf06d956761e9ea05c1b3f41 +7afc9b589e0d1786a82fd908bec733bf +b8941c56f829433b3cbdc4f7bc24e214 +ccac98329b27f64d37d90b104b28fb07 +d218f7ac8db91e1d0180682ff6cd4bc0 +7096c2742c852f88a5275881d5300440 +99de6704481850698939d216d10777af +15da68285b95c13d60216d6743a94360 +f760ef51bcd38235a394f9c548ef6df5 +6bf318e12e78f988eb388af3f1a84781 +69c7636383f008f4eb73e6eb90fd0fff +f1bb9b417dd88c9cce8a2e13a587f17d +616a9a32a97c2872d2f9623179b7732d +1c00692888cc52240b45bbeb02e42533 +bf125a8ef1613df1ff6fd7357151cea7 +4d29e012a286140624446c1f3e5d0a5e +5f2fed43a2972f7647e9a8cc22a63965 +f6cdf263080db734b9f6b3296cbdb96f +2f0aa1a95148a8cbb39d5962d3a7f956 +977444117bc451f2166e19f2eae87711 +5bec0a3d038c8759e6a3de9329c471b2 +6ebc21791f50481f53e6af1321af5aa4 +a2c6cebad1b1e89a81b19f61a16818c6 +4aaa4b61619bd3e49ded582bcb04f2cc +be26825009c6b219ea839f2d1e9079a4 +ed3245c98b75cd7b2f53b06bac891b59 +efdc39014880b90b6a66cd1385645821 +1a2246e167d868092caa4459e102b528 +12b1b8326f339170ace9c4e03435af5e +91959aa9becd644524ae790587e89b9a +16d33ac8c2d554528bf7e87231ab8f99 +61eef34a92ac7138799938baf78e772a +251a72c05ea753399b797383587dbdbd +6ba33fcdb83c4d8e8addfcf55158df28 +fa9d15520d1d873b5612bf86c8ed44d7 +7c4a6eb1d498807f9a1e6a71c2b6c492 +142f897437c3f033344101248bdcc22e +c898f59c9cd8d85aa7e9f39868a95fd0 +8f76186d9671e4da29a50583e59e9a34 +474a3c7294aa1239ff805a28fd00a54f +994536ca4d25fbc70bfa7c77c6b238b5 +0eef060beb21f691c8ccd5e85bd2957d +6f0f90a300dcbc0898b7db26e97ac16c +26ccf6c56c2545d40ff8ac2531d09302 +d9d1acc8726086e86145f149d1f6b855 +8dd5210d3a93ec264893c34b8d8934c1 +f012957a14f56c3d16ff9f0209adca86 +078a9688326b2cc4d2628c2c0b6e7e81 +80cbb15b16c0cb014afa11ca98b71db4 +80b28368e34505f5f4eac63d87bdeec7 +1c9b58467a42451eb108a989e6ef1349 +ce6100a876a846186a1d0004e773c370 +220fb70889c721450c5b3a35c68b88f3 +b4b6914c3159a44f251b58ca677f338a +b24d98d90403d1bb261f2476cd78b691 +851bc4076288ea6eebb24171bd2ca2ef +00b3a949b697b7ed9edaa02bd8d45b24 +b1a483ce3f0a4da59868172b777dc3ed +d7565e3b5fed1e20410e25a83c6efe1d +1f1b084091fe75ebc1d753c4221109ba +4fee38393a2f7423a4f89802e8b2319d +90a18a719b9193fd5a95dd4fec64e6ed +ca48993196c114f4299845e0df27be46 +46ee10ae47a0c99b415c2262e54fb838 +e8e583758eeaf475b755e035a2da9325 +c4919339b4d0b03d5ce371b60a4992c2 +885ac6fcadd795685737d59a4e6b4331 +2683ae1efe0e73f14995e4343f4534fd +feb7c9110ddbd883bf2f2b4f739d11be +94fea7f027d4fba8e07605526585db32 +db555499aec3a979f6caf215e89dcfc3 +f797d3e82678ee9f08dd2bf6e09cdbff +29e15eb19a38b9e15c6af46579cf32f2 +d11b92eebe043183bc8b938c5f4faf2d +7d313e3ee957133470af1f05eabb7826 +dad1f1ab077a220c724067183573f6b4 +ed17b3f8209a47e9bf233e3029f0cd21 +70dbb254daf4f6df9b013054580481d5 +14b1d4e18ffa098d9eef5f1c937506d8 +33e6a484a2d706de975556fec95d427e +2f82911c0ae426d8cea6e3c6b74f0081 +95d261052908e744b8d52d263c300b97 +9609f792a152de8256557e3406dbfafc +61118dbca9f3fe66c044aaa0224e67f2 +9e83fcbe240f89d82b28675f464cee48 +3dfd5e05da9c33044df768e6ceb98376 +0806d3290aff9149c54b8b103cfdad05 +dd2b60e8360257d5df4d45d5efc3d786 +bb533844ae531e7261b3625db0c3c8ce +0dd5e25bb7e7417166dc23c19ef26159 +bc698f2deef6e28a5d3cfe8e48871481 +3683531fbb9befcff8169e60758c6c91 +4e2a59075413f4fc46e785af2c838ce9 +9ce1272a778a9009399e026b47018ebf +cbafe6f7a49fc5aba7e4ad691b405b07 +42ead541d12b50f725a978da4bdaef2d +a9407459062830dfd0fe6e8eecef81ef +6382808f3faa777319896b528efb8bea +5c3e58a40390a8db605092a20ed53ca2 +4b2fb951045775f23316a279b0bbef47 +e63084f440452f727625a23d8f335e56 +2a555e24751df641068cda10a877c0a5 +ddf1ca1772f6cb244823225d7ed90c42 +5dcb2109f69f7a26d7b55078b173baac +4c6c9c2d627a40ad36c959beeed408e6 +187c9ff07638f7b665151a7e5e4e9dc2 +d41d8cd98f00b204e9800998ecf8427e +f3aa6c01ec5dbe2599b75725b271a79a +142449ebd18dff65f1aab8f8192a942a +c366b57c308b322f70447e6f88b3e33e +6b89ce59e1a3ba75368ecea9d1594f03 +d0989b54d641fc6b28bcc2a7378b4adf +ffff8918d5775c220659969b73924b16 +bcdbdd9fc2870bb2899d3a3c5173fbbc +04ff29f455142a4853e1adb275c7ebba +634410db3329b20e085e006256da16e7 +a0ef8c3a505b1ebb6cdc0fcd2ec62e36 +108231dafeb42a3bdba7d7dc9519445a +271b8188a4dac268219479804904cb5e +0bbb305c78b25e3f1d30163d4104588c +c2fd2c2a51854f4c693a9d4bf54c8776 +24fd3f06dd2b41757da251151d4b81b2 +9a81f4af298bdbe8234e7a35f37952f5 +2420b84744a62b7cf3eb9ea90418230d +b3de425243069a681393b77f8cc80985 +2d5181964ca0d5afe76f294f6f28e516 +48c6756554b84ba1a5490d7ae07b34fe +864185a7bb32c100dfbc84eaf21f4b68 +e62f11ee366fca4370d84b04e5564d9f +80f8df1397262bfff5e4dda5c5777a53 +6ba0d26132f1cc0917d69fa510627e2d +0d7eb6b4a6ce2761c281907bcefcfd36 +6f83c35ddb6fcafd870083b32a8debe6 +0c4db591139a3e68725db013feb807fd +2fa548e84b8224ebcd869d1bdd6c5a69 +c1907774bae947e0bb9db7a6f220122e +a9401b27d3f70ebd2aaeabdd35f7fcd8 +12f2b3c51391e96ab39df2b7dbe436a0 +34913642c51f8251c839f87143364832 +b13b3cf873a6d56d69c645736d712245 +60ee6e24b7d1a633f449d19a048c411d +f28ee5e510c7f79f139ed4083c8aeb7b +1f62d75294b8bdd1660e68b13214edf5 +e8efb2f3f7140d80f79835ae5b7fc514 +89071324382ed077e6ab43c4b880b8fc +1c12feb4ea1b5335785c89375e65a668 +831ae893fbaf63ee39c4455a6e042b23 +21506868399acfe01ba486d3b1bdee60 +47c56f2bad3a085f3f478dad63fdff1f +1dcc3cdd8ecfe29d896b428f1abbcbe7 +df3e255aa8cd9414801bea3a500862e1 +f50a0a6cf186d8349c2d88cda4544a3f +de8dad8af2e5aad23fc02f5a84904589 +bd044a5c4e247edf61dc9ccf4be8928a +e625230910b6f51ff2b3f642f17d25a8 +e37dbd5baf7455e23fdfa2ffa2a46720 +0ba54194a61a7318e891392d316f0b11 +e7b6a4752f1b1dae07f8833748876607 +2abff47bb28de35c0c471b29c44bf403 +1a7b18c0de0259a1c2d846f728d73d25 +5dab3e75f0872cfc585887c0611d1ff1 +c434557a11d8a4623ee8437567bd7f0e +87e3b118384b3f0e136d89ed781642e8 +ae9f8cc43da1855e0ccc345464e74f41 +357f3b6a2c5a357fae9cfebc82344948 +9724a901b21d49c924c2337febc10229 +f2345ece6a5e6d4c84a7f16c72ba454a +ba2e4a20634b2bce1b9a444a27b96c59 +fe404fcf27d2e3602a4d85e4398da909 +f5ad70812b2ca5c964a59ea725b275e7 +98083f8d321a41302d20a6e5a2507138 +90f86b8fd8c5e74c82a609275e63bbb2 +442f94169a67727df498317a11235cd8 +04dd558be675afbf5e02440ba3bbabfc +6d33e353ad14b5dbacb0d7a0b003da63 +7451a338956970d7ba509daceae045e2 +8119009a00c399deec1cc52328e2f96e +355e5dbbdfec594c2b7e76436318752e +93fea000a851f8a86f07deb591bfe8c8 +b34be72ecd855fff514c8c0f82a4b77e +3f18e77769a6811641cde1a49ceb0281 +fdf3014f93fa8d10fcc26fcb9b2a0ffa +ff23bf8b822e914cf728644673b8c973 +c37f610465f218cb6f067a49fc93e065 +0a97e2f533aa1a79328974c5e6bb173c +ed1d9e759599308aa24b2bb999e9d72b +6d4190847c0a8c1b7fb9db734b4fbdf7 +86bbb8061194cf439cad467ab983245d +6ca3e7398bb35ff8acf98c042d9ca94b +a03734bb5ab50c4eef27ba8c1717952a +8da24c8f0efe99a0c31b59aa95f80aff +499ad0e659a5621ef66241553841abe4 +e12af779fffa25b09c4c4d1388141cd3 +64ead50a3212d45265bf37be74e9d5ea +f97b640efbc60141bee93d84ad68854e +d99eb73aeebd78e804f7ce1bec572b83 +740089b5fad41c6a239bc546022b05d7 +3acfe08b1e5c8bff5b427c1492633510 +3bb0fb861b170d969bc8bf7e14d1429a +1439b9579069b0636fe8bb17737d1716 +f7bdfb090aa901ba166742a237b05e83 +e938a3eff3d577429fc41a04844b6ecc +0aa6bb02e7927af3ac06a33c710490ef +697319e55790e19ec8438c57b24b5e7a +231972d00402faa8c7c40fb813ec921d +5a483fccf0ef07971481418d88ddaec1 +a7d3e9917d3c06a66a65fbabe838b4c5 +0492bbeed9be1a02a4733685f626873f +bfd61c4b658c68e7cc1aa7e107988ba5 +e5fa9fd7bcc0fa7dd0262ddd11de1317 +2ee228aba978a4123807f55a9ab1b414 +552c5ece015740f96a64135e1d6de33b +054037480937f6ebebedd5caf92abb80 +8854d22bd8cd934723b63d84c4452d29 +f2e15e9ba538de22ba3af4a29c1000ff +e9b01163e81ebf4015602e8f86b40ee1 +ab36aa890b88ed1881aa096873b2801b +b2815dfa95f1e7a6efd378fc7efa0720 +0309c8ebc4bc39e2fac2b037d4af354f +e8066a3d440ea82d9f3a5caef08e2cf7 +988247e7c621f92cbe771c6c8fbc2541 +e6196a68e9ee52bdbcc0ed2ff9704174 +8aa9b9b7c12787e2ae9441074875c86b +20fb0c2dd447a391531336f1cd97e6c8 +d9cdc756377b3b014b6adfbee06e63a4 +92b396e5c9de8d1483a8a2d09c389c6f +aa8f449cb93441c4718ac0ae48165a0d +bc09755812637f8faea35c9c0e93a52c +876d042eb3db284b6e8736f138501fc7 +18f947de72a1386caf3b1b5584339331 +f97a259d83c0da7a45e8d7b74c53a7d6 +6f25cdd694ec1da745dbbc4bf3db0845 +e1460c42f31f62a9a861f78b20dac1a1 +1ddf846bbe6c64ecae8f7ec2073965f6 +50b5017b627f5f3d9c40a5d45f61be31 +9fe05c10ae1b3919197a50b44f4e463c +305a466ab7289e9f02b831ec457beca1 +8a25495bc106bb5783185dfc5d4668c0 +7469dfe03b5c1112c75f1bac09dda561 +fa08b155cda03bd11fe222cdd799d70c +2af53039b7e4ec644fb444f6dcc4ec5d +2830492252e54ed3ad952826fc2c9f46 +f0a77e6a35c182efe30210b08df7c86b +0d65759e6840dd304c62ebd4f6a386c1 +723e949fa521a7ab5cdaec751d74ec91 +3db42f0b5c3a17754ac46199e186f2bb +812a5bdc4dded8bffb972264f4326174 +d15f732870d9ec18243a6f2914e6a723 +fe5d38397478635e85b551b09ca5641e +28c546f556225098eec3d7a007d1690e +01242d7f40dc88347a73de213cf91cbd +7768b3cb945856c20c2c5889ba562544 +779aab9ee36fee72883b7337a1814b1e +8378d43633087992ce374ff2cce175ab +e78b0bce6163799fd68cd94e98fad239 +d97b3951fe760bbf9ee10ca41cdbaa6b +5433c6080cf839e3e0b1341d60d294f7 +d8c13101c548265e4fb7bc250557925d +b5ef614945ea8a8ac7cbcd41e0968088 +be48f282ef2aa7a46fc604915239d81d +51bf70c10bcbdbd12c0e67d350607687 +98b05e123eeb54b0e20ae42ea95fea8b +394ae4d68c58f718128cddc8f0a6c843 +a009b82692c0f5468a4c08d0ed82db6e +7a64567996b02733f4c19a90b49d07a8 +e3331ef1e2e120cb8976182a31a7061f +d45d30b74df6cfa46ae3d08488f5020e +6ea7eb6bde5619e73ca6fefc00e17055 +afa954ce1e4757dda7a4e0670eb97b38 +3bb7be80c90592320370f675b5056c07 +1236f02c1623ac9fd48fe6da70a26c26 +5cf37849b217ef29f74dac3250c02e1a +937ae29582bcfa64a12c826066c9d812 +ff63ffa30c41805bef703a2543e071bc +bfabb4b0cae163cc1b768356605a156a +ee7196d769b75ef6461d4645c65a0da7 +d8a7f0d4809ab319fcd9193787587ca7 +0c9a200405cda487e3b138ae1c4effb1 +343b9a49138c4d2371e5b34d03807db8 +f8a6efc2ec9980a58a8afcececc6a1f6 +79f749bfacdbfd47d6c0bdd761b4c8cf +bfc943bb60dc391e4308e15b60e3a289 +ce1adb7ead0030b4ee8cb28fb1fbf3f2 +e3d585a95d96c6554b504634d3a5075d +910fa52238d5b9ae3692112eb6aa08f8 +02baed5a9e6dc1542ac46cb9f9bfd354 +7b0974849cb5986e3eb1f0f7b76e4771 +0cf1cdaabfd98e402dc780df7274b391 +d85e0ed8f5758730983d917b27346bf3 +ee4d288e32e8871e4608b4556adc10c3 +09bdb5791e7ae9ea1f9479d65bb2184d +677ff5696d1a754e3377c9ba98118304 +b43b5243e017ad92d0ad0b1385aca7b2 +47c9617a00beb2c8f22aa35715dc0fa2 +1ce2451e8cf335f1009fb88959c04f48 +6c4320b48835411cf6490662475674bb +5a543cb17baee6fd030b4fb51b9f36d5 +e6af1da8d345e76a71cf9e8c42369ba4 +1ff81c6820278501b944fcccc0cb8b4a +c3c65d0c26e94db1442b077cb625b1b5 +c7bef846d9f77927441ab8256fdba598 +63edb66312d8f8544e44569aa200f37c +35857bd519269dfb93695feba6bfc1c5 +e706c5b7ea8aef20a215f8f20f2affe2 +a0d795e147853d603c7edd7bd8bc3f90 +f727c5719e207370442ea36ab4453d21 +6365e061a9913486641e6a2786dc79f7 +d3d9501fedca9496aaec1e7a92a48d67 +bad96ac8beb546c551aef2b447696b7d +47a0bed406192a09805b1af2eee52304 +d6e0c8b5810ad12fec48be3916948985 +a6b9fcb0f8b433982353165ae04392b4 +498ef691ac318bd33daefef9f88f0f72 +c13d3290c51e5ca605106239175cb0f0 +e104e6c23ce817787afb196336d484ea +ac6fcf2ebd909d344301e3ca1c7982ce +0653009326232267705ec203209fb35f +63b14c45c20289388b62b54a625ea2b5 +475c221cebe0ae0d5b4353818640982a +c2abf6bc7809cce21a757f4a07fa5e3c +ab9cbfa8083cdfc75d541cafd6cdb90d +76589bb5b62df0fad61132d0add75cd6 +72f5dc02241bc7155d9fddf1c4133975 +61258e77bc4c4fe7baa1e1c90a3f270a +f8fd56da4022e15c38485e13c0963999 +2a8a35a62e3a2924dd1af4aad10537c3 +90d1e8fbde4e21c60f726047a12e9e07 +6c2e30deebea01e69eadde1aa355be3e +c87415d9fb563c59e40f720069dc3e70 +037a2554560e09e5c327c813e5770f4b +26aa57ae97d01566da4d7c0b4c210ecb +cd47b5aca483e8320a4277a17127d506 +c51a30805c9b471a7b932ee825e05948 +1ddfdf8d9765948988721f01303c9085 +263f5ba791a278f05008147cfdf9bcfc +81c60ec3bcc1fa5030acdad46f4676f0 +e0a4de61e01e62e933b09962bf1fb4b3 +70088b7a7530cfe2a994fad4b595fbc8 +1f1c366f62fdf304c59ec1cd98024c6a +c395fd5c671c1f4ce2182a39c543ca1d +425cbdde578d9094e04488bcb72052a2 +75f68432aec28093ab0bd212ea3e8af7 +9107ee3a03ea5a390d843e7dd58322fe +af21e3c40f612d7c1ef67d5c07bd5103 +530045678c79d4bd35c9a7badec90706 +874224e4e2b58eec8067d5e3525387de +961958423e9da23bd01dd76fc1cd2ebb +247c8a7abd9b601c115b3211043c8d9b +2fb2dcc4d4098899b64ca6ad0f3fdccf +af3dc73289ce5d5cafbbaf482850f078 +34c119728aee61eeef935c973b8c08c6 +3434e8fe92fa40b9728838162cdc75f5 +10d6b3e7254230d2cf39aeea0f28c1a4 +860ba18fdb5d55eb45e57d99140a841c +66ecb5c57c3b80df6cff8c6febbe0b78 +a5084787e2e46dc302d431d97bc171e5 +80426bd8aa9430fc7a4857b6e016cbda +60440869b1f73ed4fd63e6a7e89ff3c3 +61a26fa42cf9d0e618dbfd67e1ccad38 +6139f6be729bcfb2842dab2dd38a575f +c3357c7d2f4aee6de2df65baa47edf1a +184104b3f11f0cd50d3350d26582bd83 +51506900260aed189f288363406c943f +ddf2c71201cf577471b9a989fb6f349b +204a81eaab768c4cfbc9ba1f4ab9d005 +2147057e7f10f7ef9a77bd217da618d5 +ecf6ce40ae41b75a0f2315cda25f5ec7 +8fc47cdb49d28177c61e3d119e324062 +10609af110a2cdc751a7293d207ec2a3 +a025665e13c7cc2fdad9042069ca258c +dace91051b753440878b7ff1e00614b3 +49e36e18bb5fd2b0651787ea2d32787c +dd38a50a3368269aa07ecf5a60a51975 +84ff9c3da68d2918de3af11ffa8b3f2a +4a4505f152655cabcde5c576b269923b +380584f79a40372cc6a91cf974ac1c06 +c0c9ff1ea0c48c7ff6facb4878deb5a2 +d39922084dbaf1797dd97422955d0aa0 +9973194b709ff18bb0d782c81d868284 +69c711c14e3114a04c62c4dbebce81ce +f7335b252871bd0b06e8f8bfc2bdff0a +196cb4809da1393619583cad25b4f0a6 +aaa91b2eae89f95db91dfebd392e46be +f85057d526477457611aec5b614449c7 +4699d11508edfb43f94294b84b47aa8f +10745948ddfc67c633a970e163bd87eb +c8523e2ea785d61665fab0839f6e6fa0 +d56c53a3fec7b297c1adc45cba19bdaf +8b5a2ce78dbaed037041bfd1fe84ebce +96270dd9d4035bb228971631dac5c654 +64d35e60a30f4bc3c9956141d4fb4f95 +e47b0ae4e6a9805e12adc77e8cd7acc9 +9e0cb7ae9850e6b4e7de10d12526d9eb +2e177df463ade3a4cedc8a112d93cc44 +ba866a4b9ffd13fe90f6fab7f3c3383b +645b43c5a9b3817b4eea25a256d8abe5 +5331da7f94b709e6a6520cfe7913b6d5 +4017b5ea0564812733f702e4a73450db +d42608a15b80199c72ee7f1195c137bc +722f473848215d7db3eba5ea69694ac6 +e84a8297a2d5cac717c94dd69ec76a00 +f4af98a64ba3dbcdd6379ecb8f41b34a +3806746e0fe98048745b7799aa29321c +d3f2b0d166258300dd1736f960269ac6 +8ce7e20a10c5d73ab7e1dba1af1e5e10 +340a8d50aa3d3d190ed8a79b755e805e +670caefed28ca6856ce807cd6006c514 +b4162c75b9b7df1289ca083d622452bb +4dcab2b232f8335d9274e8330857dc60 +65d054e067e16a293f3059ed74d93028 +cddb2fbd11db453c42a3ca7e81c8171a +7d258ccb212d73500f11c3c3333943d4 +f8b4ff45d593aa75b9816c7d0d647897 +57387aa18744eca00618a32462f3822b +5f5a481a1f27aa4d1dbfe5f067825ef5 +8f9a9386847a80f7b59a314f60c81f66 +64da136621f72e837b3eb0885aa0dfb2 +b3f66b85be0839adfbc5660ef49a7e60 +f90bf569298feda92dc1aa65411dc49f +da71cb7dcf4cbe0ad85b42ffeeb435ca +4809ed20e935f75eced58590ed2d8d3f +3c19ee35f88b2b3cd0371b63092fdc68 +fdb144678082117ec0755faa6a34576e +690c1858f63e4f2bfdfe4860fe44e0b6 +328d386204447e2560c8faaa1639f5c7 +9e181288f5475414a9f9d1ae436d4600 +76f1c23887a6db49e12586726c78d52e +21f3867b090f31c723b6d4c9de019450 +2f9a1314286bf211c0d23093ee3a52a4 +06eb771bdb3759ce43b70c93925dddd0 +d58488eb47c71b5eff02df9aa7fb914e +d41d8cd98f00b204e9800998ecf8427e +0648086666c128d0089160f0a9dfdee0 +f994c57e1cb39aae906aea4f33975d8b +f098b741823e9081a291f19ebf473a9a +d87663f06fe10ef317862625b40c9c25 +b11d7073b44eab456c567dd6045e07be +808d38965df1021dd2f13eda982de83e +4557863ac3b09c047d2bbce430a170eb +619a42a557ec28cd532c61f1f29071e2 +7e126a78cf2d708a00a3a69f262273e2 +fc07660472c7558e1cad6cb5eb87144b +9b24803b9ce6333f86ae47be74542dfd +7ae471f697e88456f302a096479d9458 +c5cdc325e91695304e182b656806b9ed +827422569cf39e0bf2fe0bc8378518aa +f9a38b26c0a38e290dca574b73ef45d6 +27158336f3a466870b473261123748a7 +36a76d3276ce03b2f4ca11702f74cb20 +4b9dd37c3533948252b10d4e7025f812 +67caeafb507f7592554f26dbdac96e3d +df17bdaa2793778d433e18a6364e8750 +06f65a6554393074661a1c39aba37312 +13d5faabd775cba15ba5defae38a6140 +52c3b2bceb34461afe900a64aa629c61 +c39fcaea4c296b7d58c4547b0a8cbd24 +da0820e1d3e00b9a14b106c94f4fb306 +a24e3bc5bfb1160482199e964050351a +942dd6b77d4fbed1c718f5f7c6f03689 +c254ee3084a69aa5e4dcc4709512bb4c +a1a4c8d63f3947c83f2014974bc89148 +f2afdac92f7447985fa928e074833a59 +cf81b36a5cfc8cea63f97e257d15a41a +f31f7d4a230dfca399f0747bd38ff008 +9922f3cd25a6a3c1f0aa43596bb0f34d +b9c1917c284ddf752c71f80f66a4682c +6c0bf5979e433cd2d1a20c5f153eac7a +90e5d795a9163d67028db89c56b6c064 +6d38d12048f29fe3703b5ac12bbfe3cc +baf8189a5dd7463645eb9fcb198306f0 +41a120aaad47c576242eff2e32de8ccd +09c4ac395dca4f668f1f2186dc34ee6e +04d0f6ae23cdda1b25f032cb42f1978f +22678114f4ac361b9b05a4c8c901c283 +38cf81e64d1c2e7f949aa2c408a65447 +d3840dd78c0f7de9ffdd32bd22489780 +c9a6014199b15e3208ae6760890ceae7 +c009dd63399e3bc92443a2392b007b51 +c7a29b02e3a21f71723bc1c54ff59fcb +ec6d424a27c13964959e9d7032e22414 +dda42e61b1fd0cf78586b1c409aee77a +a75d05b902337226a48940c9a8b8bff5 +d1b916cfcb8d0781a16ad2ceb96c4468 +f352b04e348724bf64a9580c9b7e7e7f +621fd9c6bc3cb043c2519fe31ca43dd0 +95ab391351c3631c5c8744be221ead83 +a6992afe6821ae620f42ff6d717d0b00 +d41d8cd98f00b204e9800998ecf8427e +e0ae2bcdace27b6575268e260e6b221b +c1f22332c4e1ed83d9a664ecd76a7954 +4851e752e58b29fbbe81faedf58d13a3 +2a18fde1a7408e770d7465ede302d479 +a00e59854a078d7a307832bd8f39658a +e8b892e3da2bf70abd17e3a2b004dafa +aad791a54b81fb58cd08d89b48f624ac +52225cb5659539810e5755d5f207dfa1 +bba58efca6741643ad87697652936a1a +d8e717c43e4fd964ad108dbfba8c06c5 +c8baab65fb934cb07de27afed9e36146 +0f7ac57e0bb5e88887af92808f511085 +957784b98ac468413c945325d6ac8967 +b1860c6cbe604179e3423c76204c59fa +86426a5c0b3e4383f50a23b7d30bcc3f +5c5933db0d54ef05c3f092a5d2c30d9d +822057e2762d7ab08188c27cfd0080f3 +e0869876e81c429f90ca485b60e110f5 +7d819ad6cda8b4a7f50be26d989085d8 +4cbc2add6be514464ba807d35fe48eda +3ac1ea6bd1e2dc0bfb6ae3d976a60646 +bd9e9d0318ead01ecadbb585cfa55117 +5e03fb9ccd9a7664154087cd22806a9f +fe785def91bc5812417e1f7f176fb3cf +8e1a62bec8ddfdb7704d2423eb260fa8 +af4114dbd1188593e5687063e599c5b4 +ad49980574d954f07adb422712f3e960 +6e80c44351f7f6cfc0405e38a1324c46 +4809950c7b571496eaaef977268cab79 +43f3f4eac6e6145972dc8bcff59d6c2f +58cc6e8565fbd77fbc400cf21d0a0b8f +55dfafbb4223d6c43d1e9bc06a9314c8 +0565e89e6300429517a6b093152ec0c1 +697346db8126570fb5393c9814386c9b +fa459da09ce33cd4a2811580dbb567b1 +150cd1492c27cc851f9077472847fb21 +6f68752846917ef00fdfe2dccb5c215f +584c527a7313d06759fab4251ab5c35a +00dc258b49e1df8aa47ce9293ff67a08 +6f86f4cc068f0f62a5fa0c6bf76d211e +9bb000b5801422b5ee608e72012f3989 +55729755e432b835b1089c835d055db2 +18f1ee79d76d3f0df366d85956307abe +53f3cda920d2dc2a210097ba5fe51532 +f322fce1f120ec5d98a9789bb6cd0568 +8456512ab83188c5f8fb905f868ede0b +bb8d034f6bdc7704c0ec536f065436d8 +f2bad4dfb588697ff901c73f669fc135 +9e5e9fc524bdf647d5a8d9812c4a5cda +8305b4ed9b95978f04ddee4752c623cb +30ad4501be0af75c712681f7a0f276e5 +b78e6fc2d41d11e5c70ca75d2f413bac +11cde0858e83cd1d2e215fe3a217e69f +6e385a8fc3fc2a02cc99db852b049795 +99eb60517df2affd3318bafd5119e971 +b393dd7a7bcdd6f0b6f6bc13aa0031d1 +07fe8bbde8ca09dbbf4a8dd00061f2f9 +0d0074ce7bad7516d7de1e2bd599a535 +ed4a715d8a861028f36cc9b6c4f6d8d2 +0b112b73337adb13b4e8aff9660d2404 +e59ba61b6f3b71bd95408efa96034b31 +3298f58b4c3f210ca428811b741ea2a4 +e27332504c5bb1793a7a37e75e1bd0c4 +05fdff6fffdb2e2f71bdf9799ee46954 +687de5a52069fb527f4ac60c46f37a0e +6731d2779eccce86e00a089e0ca9512c +a63bad44ded2139f3bd5f68f677dd3ad +6246ac9e0ab70bd4d042d7d31307f4af +5bc1f149011d12c168dd46cb1271343e +6590be2b17bcbf986ba5fcf9fbb3ce2d +d24d5717b8ab219545ced355c10c9110 +1153a4122dba906e68237047bf8ffb2f +29722104fd05de8b5baca230614364d2 +3242842e6dd27d1a5e44f46c2292cefe +ad8c9b5028881955c5f59a6f188a8f12 +b6e3565bd87901ff58e105991bace0b0 +6c304df067c237b2120af1a807e93e3f +24c748aa592ea6274faef3345aef444a +96e5c49d99115b038e253774ac1288b1 +99cbac6a88c72f35d3306107193108d7 +3d6b0dc27334d8be3361f08928df0ddf +df2c6de947db829da9a96c942be7c5f5 +08f312cb45f32072a07d84620bac049f +01099ee933d30cf873d56b4ad98951e0 +90ce3942ae4d2271a8cd0849035f4f7e +d5b99ea0a4aff8b848b2930e6bc0a4b0 +7dc5ffd2c96e0a928fde40e2f2c53a34 +792b702112acea32319249655efe9953 +d24f0ddd0eaf04748b33c82dc7e0f371 +1e4a5cc1c4cdbfb41acc942ba268e56d +a85a138263ae596d391683caa045fa6e +62569230e120fbe4682979dff204c97d +258a6a9d613ce3cf4697831d4b1e59f6 +f3b7c168ff003863a0d545fed2309631 +64d9c159753ec2142150bc5562c148b5 +ea5a9df3b49d03e19790e9771d9f3596 +e427908e9623bca1b845f65955e6f751 +3e463c89ecfcf1a7b29912a01423eedb +b18fc1390fa422b9e1f1d455170851f2 +2fb8b13db5c1df73a82dfd61fb999776 +0496cdfca5a08f8c55fdb2bc2c584725 +4db7cc055d763af502bcab130d20839e +7da1b2e33c47688a77ddae62d0900cfe +0dfe5022dcca4dc669077556a8236851 +0be009ce3e02cfae0d725bfa9f909a7d +023c4d8e4702c8b2e8d91f2c56690e6b +7e10439a7c58fb3ef12b4efb4cacc668 +70622f080fc409096c8e48af5827525d +70d5dd70a128c30dcda5043afb04af21 +ed95b0cf76b798557bd600cacdb784e9 +b6b7ee3e4e45a3fd5295ea728d9bc9b0 +f6e17d4b77d602934170647fc6fbe2ce +be73f29204d15d5f7be2bbaf51a221e0 +8e68702bc070737d0ee11322cba20b28 +188d6ed573bcf5878d140efe2c65b599 +efef5b7352283dacdc5e6803a59539fc +b782aa2ab9be2cb09784ff309b29163c +74af6d528c356073bcd0be75756e7b1f +dc13fb341fb30b97a4ee3407136baa71 +f82a311e7c206c4eeecf1f3962dd8c8b +1903dabbcc421c1017353b683d62af2b +71294cdc12e4ca69f6708908ca8fe967 +3c03b01eae772bc600a9d844d0bbf620 +f562f4043e323031b0530f3883120f46 +5a4680c71cecf91f6d03a5cdebd52f85 +6d177ad665d3a5ee1dfc3459fdba2fc3 +901de888c20235ae5eb903263a6f4e1c +46c971e9578defe37af0bd7c43c3a6c5 +e9765e85c8dc81abdca1277e865ee72b +518259abacda6ab2d4c6b517af594ea9 +856be42075bf84e9b4aa84f75c783f3d +012ebd67990b490970fd005cf197d97c +d40d6e45fe0c7b737af5a6187a87be9d +b842995634f03b69f23bee6cbc9f0014 +1479fc466d2e08cf52f6383151915d47 +fe572e951a84575e512bd1a85589fe32 +3124dd3e98f12f880f0b92bb2dfddaac +53eaf2e06c38009cfa5eb53ede6f9053 +10c26f1e4a32e116bdf988ffefd05db3 +4329af3a3240368613a4ecc483ab4571 +46d2794296c60aae934108063e594d3e +621e9ecad38d58e4784e77fc140e1e31 +252cb66467b3ec133db4d608cf4fa5b5 +4cb0b4bc7cc5d32516e518562a061991 +657228e1a66879fcb03d968c9986fb67 +bf31dade6846a821c47c4368d82dff49 +18bb8af4f6ab896b903a45c8a7c4b10b +ba536ea2d3f21f6aeea205d74cc19b37 +561e10be3de08e09fa6c96d9f6416a92 +fbd2e36adf63e73cf896eab557fa5329 +98e0d5fc93ab2a427671f276317f2ac4 +37bdd2a54f7ff30ef63385033b064729 +ef9c499e9607c6759ffd200a9551236c +375e17d88441935ec2c992ec24685f30 +fe6543296ab10d74972037e8f5552c72 +1023735ff6cf539f801bf37861d7b281 +4bc121334989d486a6c4c65ff474587b +18dd4d7648282fe524d31407a05970ac +e467af24877dab4dc441197c97121082 +1375024366ad6cffd9f6cd134054c4fc +441603f67bed159d80c5f734f38955c9 +36ee3be10cf4187cfcee15f37b727961 +bf01e8b79f6efa36506692db62138afa +b2d7b48b49190a32b4c0da066d7f0e61 +fe28ec5fea80793688578e83669b27c1 +6de7417e95f336a07545938011dc0d3a +f250b7eb09d86c7e6718dd7158d9f3e1 +5e666bdcdd2fbdd82e7522e1cd2ebbbd +375134fa90cd496160b1f1e99ddab294 +00ecdb7fa92e2aad0961f42060a56bd6 +f9ebb5024bade349ebcbb9af9d901f80 +01086f9c03295fa58108ab9a15a4a9b5 +0cc9731c4649d012f4bd6b79299425ca +5b007bd03a5ac9bb6f48e1fbf1f4d26f +7b1182864287604735456c490e39cf75 +819b6d207737c0ddef74228bfe661d30 +34214ad9c1435e64261e9981f927444e +a09dfc836d0ae1ca36213d5e161e1fe0 +7997cb49f3b860a83387bb5641c9a485 +f416044d6c789dd8d8f59431b4239b56 +311006f35d2f842ac7e4e02e5eca08fb +5039ab340c3071cab664c686207baa0a +be62724c897eb805e3a44c3476716b35 +da8759e4280bf53324c44cd3a306c67b +d6c02512932f82a0278ca6eafd7c5da1 +2c50437b2c69a5349675c2fa9ca3650e +44ee565d7976c17bb41a30e70823416b +d41d8cd98f00b204e9800998ecf8427e +7fcb96fa3a4ddbf2f0903f7d0ca0e67a +c81e3a12f6927955fd0ba11212df8a47 +0c2218d6c273ade477607e27943bc2e4 +5ed4ada231adc6dd76761d9aa512ea42 +d13fe67bd2ab8a393d22177689bfca61 +36e804166661ba41cae09fa48ea1ab24 +ae4258b5b32cabc9de69d9a8b0b7da57 +830d8300e4395f30a3abd06121e7d194 +62bb90903c6e2b3c3489ecd0b90fa922 +4ea087f6181c3150801abb7b5afa36ba +9080a7fa27a3286f7e930ea4625fd8d9 +36b9a5ea81e16b5eef363ee4879ff121 +912b18f088b87a16a011d354829337d9 +145924a50f6b3779be89a1e08d19116a +0e382863055fff8fb76b1746c2d9968b +6d14ae50659c31685214924bd3c84d2e +99876384e2c3d43a5f98b11502c7c88b +da35d5e7d2d83ab09e1fed1dc432882a +7549b188e04bd2aaa94d8840bebd8512 +65380b0f1ed4705bf0c3ead4de7b059c +1d282237f7b80ab26f69692429e18187 +7b4af3a3dfc25897d104ddfff926eaa8 +b9982edfc688ebc670e9f84f398835e7 +33a2f78abb9f31149acc8e5d787e3087 +9c92ec053c55d9080912256fbc22fa4c +f10d823345368a09ba42786a5663f1f3 +cc97674cb92cffad1dedf58d83c442c0 +e6a491ff4d8d7aec6c2ecd014141e903 +9f7283fcd50bf37de0d2707a37e6528d +b005380273ec22780ddfa7f5958de197 +9b250dc81ef3b9301fa3aca93f890894 +4b56a12bc00724cd04be3f362ebe027a +54c7793809694e1cad814dcd317fd1ed +84ecfab19f806bcaa5940f340cfae423 +ccdc3d3146e3f4e24e9d69d9ad6a4f8c +65166ee35915b4c188843207d8fe246d +94f5749f2f4869aceb7b4b6ab0ab88f1 +a0b8cc3ccffef6d1f9da37449c0139c7 +ee66ee32b1a1fcbc85eedc6c030bb642 +658f5abb9a034ae42d6d55a86e931089 +48b1f6d934ed2cf16e5c881173355b23 +fb2c0227043867204e697968c41d66c2 +ad1a97d11d7612e694ed31b1b8f07bdb +95541b830e0186e7ae273e8ef7736085 +e7937f37441ab7fa0a064f1b5fc87baa +9291f8b38ffef4a785400add2707ad04 +5fa45d8b0b831b7381b59bb21734c700 +e624fe10ac95cd45d559dc7d4513615e +aa656886c1c57c8db9b244d57c7d8048 +47805a227b19ba2b0c0a5f50a909a88e +37d882b46fe7c4042bdd542be80d338c +a68f7354fbaadbf3d16fa7b9db5a63b8 +0229081ecbe1d01bdc3726f02628cec2 +33820f87647d03fceb48b3b3c9fc3915 +efdd161c1efdf0de05642ebf7f7532bb +a9bedf9ebe757c30da104ddf23501e1c +cbf1a89da71cb7880c06febc2347274f +8b16cb20bca31d20f97324538fe0fecb +ce205d4bf5296b0b403ea4c2498e910f +e73b63d4ec9cc2e312e1b81fe6c4a266 +de5fe53cd9b1fd30cf8ec18d3c82320d +f0049dbd6f353c41040a326ba93bade7 +9a9ed5b444e873107d78bbc73e887e1e +18a95e6eb2cfcf8f7977eddc91027aee +6b8e16aebaafebf7d45e188d7d989610 +d84ed0bea2bd61ed6ba35ca61649aca5 +4a050b8d5ffb976bdd2047fd692e630e +4c7dc0cc9fbff31c66ce161e6939fbd0 +f8a0a8dbc9608a52d3711760fee7bc76 +ebfd847c182866722aaf35d61101cc2b +978f4cbdc9607ed274cacc653202c8cc +b55b24dcd9f4561b3083eec75bb9db74 +c3e647e14d4ef2095a9e7b66f9511dd0 +8e7f3b821d3faa26f71cb0a326cc2d64 +8ad580cb20c92bae0f8a990029282569 +6c6cf9854d2f09f5a031ae7a561f7f74 +c8a55f59a78ea95ffcd1ece2b86b65bd +90fb55fde8feb5e85f7b3bbf4415273e +938f950b79327000d9d7c7eaf4aa1410 +d1ce18aa37e44df4dd0482a847fc7516 +33535f7a485dce1c0060998a1ffc2713 +f1bd4bc17090876c224a39d3a3b8210f +4843eb58b881b3cdf276a8747a99293b +424c1851c1d81f63e37acd50a10408fc +0fa9939cb79bff15fdbc2e3906955ec4 +28a47f1d705b1e2cc0aa36fa6b6d7320 +07ceffb59547b847ec7b8313f78fd778 +3ceb83ab30882327bfbe9597974789f3 +cc92a817da93276179dadfaeb12d4e14 +1c2817a3c6af15e410d967acbc8934f3 +059600303056298ec20912e6c27333a0 +b610a3a0a92fd551c5820a99e9da8c9b +53604ddb59a33ed8a088ad0a1640895c +5759a3c0e0536b0dc11f3397f889df42 +44934b69f7f0157600fad773ab52fa99 +26b1925b1e02617445156789ec975815 +04168d403caeaeebf96a097a0203fc6a +105fabc0d04b240c31ba7e1526d25225 +bc24fcb8b991212b46baab11cce91453 +fe7c52eb6accf7345d719735bc3d9d8d +c53afaad00ca4d7d0a95d028188b4b51 +747303e6c831e4a17357499fa33b442c +96ac20aa11a6988358a6bc94a69b5af3 +98581b95f9268898af4e6f6c5100c0b0 +3c5f6e841b6a0d423c36a42868d06753 +bbb6b4753fd0af817abc7983713c0249 +c11287d0071006de205190295f145863 +3ade3176b7f13d5ac95521eb8f500bbc +3ac7adbe3b6706960e26372f4f0527b2 +79ac1cbebc9a7535876c8db1ef712bc4 +97b3f1c941bfd9068b2a9ddd19cb998b +913b1f39ce0a5552c79aef5e2453e00e +2973478ca153a6399f7d6e72f4934f21 +f5d9fd06f45b3c723bf18205c55b0c4c +391295acba607f509f746aaf127b6e1b +94983a68723cdc21f10ca094a8198a3b +45dda606c4c127eb46ebdcfb21060325 +577f536d2bfc5c3fad2b2f543ef21e80 +d3b6f8e0dc3514bccd9f4d381990f433 +ca464ad380f06d8dd1aca58f1cc97c6d +fbefb47371c1e1b24e03753465fac476 +ad5517983a254cf345fea5dba33df81a +2a2ead9e3a0633514fb10007206f2ef2 +79ac2ad7c76e6253610061cac37a8b7b +f282bc5494885d0306c8131e6912543b +8640e9e88cb5ca1a094446e76d18d2bc +998f066e2af0905b91997957740a3460 +bf55fad7135c2d71df476616d494093e +7d7132882f89d70ed265bb6121895b8a +3782ec21c3bbf452c6f0c391a4c5dcda +55ef0cbbe5e9293087c7452becfff0e5 +20e1b0e127bbebded3507478ab66f8d1 +38a0b0709593bc0407786dea95798135 +55e78657debebe132574095a1ea43a77 +0ab2db239d56bc5c5740cdbfea52aa7b +9ea25e442447a6af1ac5663c104834b8 +6bebe1f969b85423e4061627e1a3994b +0001d64b01c64c09a485a565459f9e05 +20b13e15ac3340e0cf4eae052304148d +3d70a6e3a7c6614bb8db84792219582c +f3b0182183c5b84dd96d444e135e644d +f1313b2f25aa73996922cadd020149b9 +2738c3e11b879cc0406130348e627480 +fed3287e3272de856152a7def8ba838c +538e76c4430a398153fdd7c82ef31803 +9bb4a18501b696a52efb0db82089f5c8 +ac21329ab93d6d81e1533483c2c60d60 +44281c5f432765987cb49501675f985a +6c5a6a4bf1d7575d8db1aae8094a331c +70d24c6f680b3eb5c85a0cdb5785e339 +3ec08b7da42bb2b569c71bf748a721c3 +d08aad65153fd528db34391d71d64115 +5edf80575dfe43dbb0fc026f983c841e +3c5f13121ccddf92e531d816508b2241 +28b36dac963ccba5264aedd122b0159b +c3161deb01fafd41bafb3db0699912b0 +7ba263d0dbe1a52f0a959be41921b80b +0d0b0d836709dcae88bf2e610a94d9fa +d3043df14fad640a6dc0fd56551cdf59 +f5dca735fa476c7d0e7a86e3cfe3b4d3 +1e542c4365374d4cc851275fcce0ad6c +800dae5f12ad7e6bfe11b2ff37bb009c +d2da2cc41cd485ff1eccea77642b4592 +95ff8c22683da82d20d92f61dbf4a69e +8f941f56a0694ba3383bd193e28485d7 +f27f7f45a86118bbafb05dcb89783fe8 +035c3bd0a2d5181f4c081b6de9217645 +b0054eca183edc438b343b05bda767cf +32524d320ea5883adba3b1ed96af244b +8c80308c5c64e5173877124a9a7addad +7aa2e3210b41b6fe02a6fbd3f5967e21 +33cdce6b9c149d291af15626b260ff3c +2cd1f926de66573c662bc96f2a7b5db9 +d389de790c5a2aef2526bcfb5702d303 +8869adfe2927843dd895966bda080438 +2c9b46c27f459e66b3cd1d1b63f5c22a +7815c2897b7a9bbfad7b75a7513cf804 +ad12ea02569c628acde9f2e8b2c8b20d +81a9b417fb0383f44fe22b267064d882 +7523c3017935ac00715baf59ac806962 +37042c40d4a014fce5ef8413a8d6c772 +a479e2d24ccf57a506a3333025835ca8 +ec81c5976e1524d36386a6170e6b398b +15e89f749fea660804d3b65dde66c8b9 +b332d451b6854d92f2ac8f547dbee3bc +5eed58bf1e068f9a2c92cb02d8d6cf34 +c8040ffcaf01cb1cf6a9c1b76a7b3582 +217504288bc08c9d4476b4a1f1ebd961 +b9926add46314944584419f48b143c3e +55b56133c06d396ee340ab9ed3ba1851 +b1f9ee9c9b514535f336eefe911935f1 +dfea344541146c4bfd34916889b1066d +a9183f2bfa8d270ae67bd3fbbd91efb2 +93b76123c80e1146db181287cb2bed2b +706813f498a7a1fa2d734c80a189540f +6e95ad91bbdc2af785dfecf10a9bb0f7 +5ebdda69cfa38a8e925f08c8bf815f41 +1f8e1923926cc3cfe1c11546ddf8e334 +b208cfc446ce23325409f2b0b7e0c984 +fb21a21ac14f63e4ad20b9ef30e8b0f8 +e89e5528b6c7c7906b18791c92e42935 +6fada3fda1f26d5938ff408a6d26e2d2 +344eb4e55761112111f4a3045216b57a +5e3eb17a0eff9dcf99d9ed99ae2b32fd +aadb5868c3a2ee7d366c45789ccd3e99 +002917588257691f6e85a640fbd2ec23 +49039a45498807130eafbcfc3c637bea +d3fd38f4ede8173d1d18a12f96a7d0bb +b4e1df1ad173a94b9e39ebfdaed4a1d4 +82672fd9b1421f3cde020435388a42ed +f500f754291d122457ea7763b76f1c95 +160812821c2e9a52c2dbdab719702b77 +f9343f918912c3d1d0c566e527df540c +7ec9e1895137b3d3676e03efbdd56492 +c2fc75d6ab01037f0bd85bd8d5a7230d +12f5004583e396ad6ff09af7a6c83893 +f20ccab668aba6c45453e5f4c83d0ee7 +78a8fd7ad3759637682464ba6eae00d9 +e2ad56cf1378620fe7bbcee065e09c32 +973a9c2d1b1011867b225eceb4c0c01f +53a7285f53b81e4dff61aa4808c2f939 +8fc8c8a0c05a8e8ea519e0f4e3032698 +5bf9753679c8245e211d051f12257578 +f4603cacd51bb303e91559e56279ee76 +cb70ee30babb54dcadcfdeb9b90ee3c3 +3bc9026c409e1eda182448b5f85ec33c +725b6fb38a145d82dca75e891ba1369e +a88e7c807e58d8b1c5ca4d71026528e4 +acff928e8389c92c8fd28c1b596081b2 +ded878cbed3a4560a00b997ce4b167a3 +b585df08c9c7860a23b130630c8d8d4b +ee258a23b017967b1d63d48f640cc994 +c49a4f3b4663cfc62d49a51a244da1d0 +2d9d151e31f132a6451fad74b3101e04 +952e911d7313ba7b2858ea7ddbc93c65 +6a3159254b8ca73f168342d9b7700288 +c92bb829e69a834511c377662c2277be +047e7ad03143a415db65aba041bf5455 +d2b333a83bea88782bc4c7c3b0ff6036 +299b88c1a04c81ff5845c9275b5a3eb1 +9eb37b720be5025e5d876327c47a0fb0 +38f32abc5b377c7643bcfdd806eacd22 +e85ada34790cbc65b8d374544166e61e +0000c03ea84ac40ae7ab917674046c39 +de0991112082093632d0aca39d8b75e1 +279501a30b5a40197a212a592af891a5 +f234a5a9841ee09ac389877b653560c8 +7ed5ac12301b7f7fa07c8eb8ae0eb12a +a4a6a643687b8273ab2e782ff836a341 +fd1fce0bcf6e95e32cf625186d6ee16d +cf213544a5e2187666e11ca4c641fd5d +177c56251d80028d3faf1685c4b249d7 +80f34a424def6f22a1669138ce181c27 +4b2ed09baf596658a655d1eea21e4cf6 +eb45ffc3a1d6c99d25c81b7a79031431 +c307b3eaa2420f9382c9b736c16a3f97 +feaa82bc9c8f12a1566df292c88d6a6f +caf738f8f97326d9fd0fbb925f02b9a3 +9bdddc00bc3c1ca55ba5bacaf0e98c9e +ab230e8af7b380969e2f8e2fab0cf8cc +362c1ab8a2e006ffbfd333730c8ebfb0 +db410e04b7c2d90b49d0c0c2ba9cf83d +b3675c73dd314633322718ba69f75dcf +56fef0a52c313c9e6ac262527958f2b6 +514f9e477cdc5dfa49ff02d6b96f858f +663e8e8beb01a843068f7f223ccd954d +613902f10addf7c5b8664f30985722bd +b6f268154f8520c1d9705d12baaec3c1 +9cc7cae510a1c5892ddb907d4e37e4d1 +7b34105b90f2400eaaac342def39bbd7 +cace146916a4ec0ea74d379d7ce75f9c +49813d5dd3c517e3550b899df452eddd +afa1163bcedd3c8cee5b32b93a30e72e +82833e5373078a76ae1ba44aaab75544 +1699357ac8fb8315f217de86d90257ac +b7ac98cd625367024ac8e915fa585447 +52f2da441768144e5feeffa226fcb7e6 +5cabf2ee7bfc7cb9c4e0a1cd230d6723 +eb61dbb5ed79388081dccb0eb034f5a7 +1a67600051be4edce3bb07a8853e58f7 +2bb6bf03d31889201eb06d84eb19e656 +002acf1c93fe828d7c095edf49051b3d +233cf66cb14e590e9fe5c70779512e79 +01d537db75539a9cda25595583c5b2ec +a9037faaf87205a4932ce998cb5c30f5 +37b70a87a4d045330cd92c19fff2ca85 +351fac152074c1da43b3c9bfe1a83d9a +6c57fa09b5d87e2ba217fe0d5255c716 +2d9f7f87dacac7af0688eff1ac7ae38d +04e496335a2a7507534e113f8d0bbfbb +772be65e33e1e4277493e5125db57599 +e1cc23c8c3e1e603da9e57679ed37431 +01df37098924cb2130252bac60812546 +8f71819835268d361a8da7d2ef048f38 +535fdb87c93e8a62a6563dc5a2f22748 +c048affe34cd4948baf3a44b15fe4a4a +c772ab5c6abc2800d40b48e4c9b6f738 +6b28d261367fa5becde05d13dafa000c +6dc68a1f9e492ebf5185e9056d9fcf91 +84da9afb3dab9d5dd23c5a5027f248a2 +ffc2dd7b62e2603535f23b4548d481c1 +fa958b99635b0b2a290ad8daff06bcde +de3128801afb559601f1054dc641884c +748696ad381eb55719be69059c93a961 +77627c4eed773c9f38e414d7c7e6d108 +99074da32a3af3a9b3e9395fc3e17d2f +471edf53579495bf9db8d1919b7bb81e +07fbe4fd47b0b89d342fa45cd814b391 +ebbe7ba0eb1aedd267a8da72fff978fc +653b978e9d6f0c6ca8823bb8f439a33e +f96c3b388620d418a48255e2ef93199b +a309a07189018bcb866e17ae7f287540 +0c7412da572175b63831f906ad23a5b0 +fc8e3a8a55cd7c9c1f4cb554be5b7c90 +f75fe1dfed1533b31b2cb23148e72935 +b2b2c9fbeb6dd04cb1556b5ba304bfd9 +8cf56d513c8b14d595093c1d634c627a +a7082e5b7e98a4c8eaa4b8b34d205dc4 +872e5a5a162b60efb6eeafed3d397d6f +03f931230390994acd1dc8e2d6bee046 +dfc7f7e57e73cde0cf44228a7973d5ee +cb42207708df42afe7ae992e289954da +de4764ab014c0b3af781f9a31a18e357 +8c39e1d3ed452995a6e639558a53c53d +a8b4da1427565af6c9cb69eeada3d2af +90f7a9fb5b4f42483331ff7be4e79c30 +39aa8f470aa9e40a4bd584ff27e11059 +5786ae4593ddafe32deb24ba86cf7148 +8029df09229a9bbe60f239fb541e1356 +7ab51d2f7330d20afd84718924b54ae3 +0ddd33fb231de67bab50072f1863f715 +9039e7db1f38dc5fa8be1ea1bde9e7d8 +a946d90c25efa3ae9ee6f0ce0a63b18b +936edfb681ddce84c9a2dc8762f354fb +c62a2aa2569247c47a676b68eb3edfb5 +790d9dcae503b2d8a975f2a0b23c9d14 +88ce4a1991431f9e56456fedf3c73b70 +4cfcc62c4486cb214ad417740dc2d975 +3613356d3445786666d6ae64c33f9f16 +a7d0ef1378bd982651abcfce1137e390 +cbd20a4abebb543e1e580576c08b5966 +8febe12d6d0be7aff7049469d253baec +337540c5e05a890156e959bd2cd712d7 +b4941039d2cc5e636780c0fd1a079e3e +be27f11e3bfd2ff43b01c517a4fbe182 +b521220c5bbf81ccb437be6746abea16 +bc69bd5b7ea3adffc80d6b60d2063de6 +9f109f254504a294cf47b8d0a391af06 +16bbd48c7a9221f06f5d4567fa31ea0a +739626d342b24c89c1caf808886ef06f +73ab15af2fa97d25c2b99c0e46478e01 +b2fc677381c9ad2de5755fcc570465d3 +b32bd6ed78691e3ad69a827fbe45dee0 +2d91fa749a11a7c84d088d26aab953d8 +d404c1f812e48d3476f1fd4388807be0 +4ba46db0de49cc898d332a4deb58af7a +b10fc668e36baa61120a08e64a011382 +dbadb48b6a939948452d38095958395b +07652321b755c62b50e493929fc72717 +e41b8d9aee2bf6cb2ec9e9acbeb3dc4c +f8204920318021de13af1adfef295196 +adee1a1d1dea55315c091a7eca38d927 +a50c1746189fc19161e9c0c8b8adc9d5 +2f4aa242518a62bff1793274936fd5be +6da5c9555ff0cb4541a67b03bde01744 +5039195b3ddbd8c31dd192d7a8d28289 +f5d4951d834308ccf50ee2be552ab63b +1aef178db49aaf50dd1dbdc73a365e74 +cfb5273746ac52336d0d43cdc175cd8e +014795df06c3797a8ecbb5d48ddb0348 +9e7e10d31239f79e5a32ade073b37f2b +a65f82a39e1ae3c6cc37acd7a1024e5e +1d73c3c495dd06d9d5394d50a1c1e091 +de9d801d670b0b0184bccedff73f2a10 +03a386a157e1cc49b84be04b56e0c6b3 +a1f0fef51074d35d1a7bc1203ba735a4 +c48dcaab4aea9d1ea4c21bd3636106a4 +7b74976f9bb90ae710173e7de01c8d33 +2f8af399a84fa92ca8578e5064f66796 +177f1bba4d56fecf932e6961cd9f391c +34e67fbecaf54a9b86471d345f7da433 +9df6bed8293c1e9b1a2623d8f4b8a13a +b58ce75242f99d206dd298b54dead6bc +6d84c4a5a79b444530dae5abf0cffb36 +a4755139ce4e31708aee465b6f54377a +8c3f9fa37bdd50746477fbb14a29d75c +747d50aaea724ce0c96059a348e4104f +496de6606c96823942fa504a59b0cf4d +a758b8efb0a7fc01c1869e9db27dfc1d +6a96edcb644cb5062ae261d5089bb08f +302139ef9067da27db1651cf91515b5a +9885033840bcb99a1a23b56b46e493b6 +ef251195204cbdcb0bf7f118d062fbb4 +c40081ff4f15acd850f06fa566c4b35e +37374a73f87072a486234cf9008e7884 +2d668533a8ced6aff635d774bb08b78f +380484816b1d5224732738ebd622d89a +0ea0c667e738acfaf9e1d44f16dabd3c +34265c82bc4d004dbe25939e31ab4894 +6e04a8cf22bbb911713c85a0c1ea9629 +ed2dfd1f0658de06faa8483c5351cdd8 +bd649782f0b418ddef2dafc6574315c8 +78b3a4f8c267387503a43712173e02c3 +d41d8cd98f00b204e9800998ecf8427e +b0f36bdffe8976950573512a90eb023f +14b91652fae7ef1aa6176487f07d1a73 +9e3408f31529096ac2add195dc5049f1 +347d0b374e5315496c985999e46d15e8 +60ba203f1077db34901d07ab3fff62c1 +9e3afa242300c636db43bb72e37c113d +d100e0677cf6eea0c38711ca1bd9d5a5 +a36163ab557dd21a796e0c5a1ac84233 +8dddb67a599408d9ef79b4f3d91222f9 +60a9d112476835957dee06533089896f +7a8991cbbbd6bfca8c117afed33c6be5 +8c5bec2980ff30f31d78dcddb1829cec +8612ecd6db17727ea7634a78e73e7d85 +5fd607be465ee70298b0cd5fdcfdccfa +786a47a3a6f257dd40ee3c7dc115116f +dd54eeb3187eb05c54476fd26eefdeb8 +50873baa4446b5576b7c7fbe0ec9d177 +4dec9bdc4587199303285c424b5e458d +ba5745c5a6438d64fe46d8f5d06cb343 +49f3dc27df5d21bb5568108ac571f341 +ea0f718e6fc6ff4ef0060bf38c3cbb4e +952b57506714088ba9fd77f0679d254d +734d2df8e36b998b610f265660d64733 +0439fe2993e0c5be486d48d1cc7cb0a2 +a334c15643152423fce17442def1eab9 +7de3de4b60f80721ef2b0bc4ac5bad39 +82937e2782997e5e74dd83e4b4be5dde +2cc9144fae68c0bd4d3d09a1f0998373 +b682050a5ecb01c3d73f056c2eb8053f +87574f655d6a134b3bc227b0fba3baaa +c5d2243184726a441bc77522035cba5c +ea78d2c6d50d0fab46a26f372357d6d2 +0121784e1eeb9fe82e7dbc89a45272ae +e8ce15c954c54fdb3214db9fc3b8b286 +0db1d61b3198bef64a6687b72e2f5718 +ae594cdd590827a809b380681fefbfd0 +1b9a7c0d4f3ba57a0d909de37200506a +72d85aa9b6aa6ee61f78ca1c3fb5e5c9 +d1395f0f012155fcb2122fa4ec4de531 +e8ebde4780c8703e370c41fcfc3b9e50 +f44a5f2a42ce905249b78afceb91db51 +29c3d5788721e777277284220ebbc7df +41bd8acee87af8a82f9126e5076af06f +2b669dde5178b11033d233895b23a4d4 +a76d7120bc50ea07e6231309fea43e0f +34844814ca91f3f65cb245ef2a8fbc40 +40e3130c948c9784b62bc1d02c0a1cdc +d74dea2fddfbda377fb235777d9d8961 +1ebc1faff2ac71215d61475627234d0d +30156e5158de1cb0e85f6c5d972f49ca +56ca37f017cd3f07068340c85327dc27 +c081b0efa23f0cebcde8dfbd0544b863 +3c823b4ec0316b3114179dfce48780d5 +45d86d938d2f94eb90cb45725e379675 +f140c4d1f1eab9a58b0af2d0ff9ccc64 +d8eec57c4f85e54d8eac260cc522a421 +780a5cf78b73e82ac0cae7a44098ecb3 +37fe24f799090ea85e1fc1d9f4fbab03 +dd7ceb5ebeac354ceb42c26bda62e2dc +cd4170cc12ff95f54d282117bd184ee3 +c17cc10ea9c58c58512cffed7b45613f +6b53369ec6ba717cfd8b84c0569bfffb +e684288bd269ffd30ad8c70178ff4775 +8c33ac417828532321ba476e2b007bf5 +eb28414c8b755e8f2363ce1656a35432 +267426d67f616001c540eba5ddda05a1 +7d73c7f0cadbe134a4ca5fb843f8f390 +77838cd1810e53c5eb362638676d09ef +74bf690e5d8af9aa2ab811d38942b0bb +1fc34620d35b23b36bc7ee6d9e13d638 +89c7e184cbbf02f3312e48b3b06ce185 +a37524a4f8debc448bbdf5d9d421c55f +42de5b0d195c06ebc10d0f1f33b98158 +0623de721ea88fd10ba693712bf3b6f0 +544ac40de50bf392ac8819f94fd1da9e +d117875d7184426eaf193b424ce3786a +21f6755cf86f1a9bafc5d88da94b5954 +636c29e30cf3d052653aa399a70cecd8 +040b08cb5e47a59ab1d10254c1191c28 +19cce3d4047359277afd85a8b57a3b68 +65bd35c3f0f31b72e41668d0e6c365d9 +7c130704fefb2fed2b0c4787ccc45430 +42a7f66906f1589db729693237e4f479 +c71b710d46da001abb97898344389410 +d4665ffcf313d9cdea501b42f275f67c +b051badfd2654eb699191b59db8a453a +b0a74b74fe7282f0a5c9700c0224c71a +f9efe2042e48a77752daccb4ffffde3f +5aba533165785082ac262e4136bd0107 +31bde200fc65a577a231b6cdd0bb36d4 +2caf939d1359f298bf3ec69b8c871e14 +3d7e123a009137030ccb780c3a47fd0b +dc6ebf6088832cc3633057a9b11d19f5 +84f46a47581c921f488355c8880c34c1 +b34c4887ea999e6cd50e156459a67b3d +af879f023bac30838f3d5610994376bb +69321795c904409f02718dade740b428 +f70660ca03c695d3141055b6aa670385 +55240f2bf86d91d7b169fbb35053de3e +c1a28899abffb96af5dbd2a6b66ae7b2 +c91da32681630fd902cfcad122eb1e39 +e53e3dfbef1d00c7df8b266010ac97ed +b20a2c37ee8a01614be89ea5b1fef0b9 +8aaff9b32c34452335a20c1a83375875 +ff9c109f2484d1b4f90563d7250776f8 +41d7708948765461d3a83fc94858344f +2d0e59e3064483f09cf3a4838c710631 +66a3c244d3088f4f184b17eb9fcbc365 +1094ade8ad14e2f69d11400b949ff8eb +a5883b0e76a2527d04db574ad66f61f4 +8c7facb1812ad04e766ecdfe45dea4ad +9fd89389ed34b86bbadef3944676acc0 +2cd0b9a75dce5064642b4718a28299a1 +817995463de4ec70e328e51f5ad89e4e +352009faed0a0f25fee09c0052170874 +b9c74be94fe78c1ea0108ddd7a083833 +448b2d84c9103a07c04d295537df8c9f +311f29ed305605798342ac0725408339 +f103189ee7904ef8d37360dc37216d45 +50d0e9c58ca38fcc3de0fcf5f4147143 +12932c88840d97488bea18de62b89952 +ab07ecb291edecc1eb61a89008ca0c6c +abf3e66d007a5d76cd57d61a5a35f5c7 +cae289debe0fc049fda77ced1df41cdc +12375d654321e7f542cd866c9994c742 +5d1a4de24638befdee4e6cd916649e4e +652452b9bdae23af39a8f12b13f2e8f6 +d3f016a31ed9c38266f62a0405208422 +65b1871076b794b638dc71eed3f9b68d +70c4d686bf75a30ba39711b50a8f0542 +8ea4680a6b083e6249221dc6de5e0287 +896d32e1dbb50ab8d2d9cedae8915538 +bafa0e5272d90980406592949470e86b +c6bc242d9f633ae64d92ae10911e29fc +ea8dd44624d6efdcc019cba1b76eb141 +3d8c0e38b7feff9d5fc338e29b8647b0 +bf1af2bd1ff8d5528a69a74f16f8eab0 +7527f35a567c5143576f8a77f808d37c +0591585395aae178657a55329413225b +5975492b5f55edef36cd86de48e1e87b +6e1e29417d5a085764b0653e852ef7e0 +5489111e82a92d165bf8f69e29e09a0b +a77a82ded2d87aab5699c40cfa91050a +d8cc8c3a89bd442032984a2ca1640d51 +bf747a6d073acb0ebf92f1e1e9d8d58c +dcacc28b4b68fbce65f36bbb9034b707 +8e1e0cfd7f3a43dcb107669b4e43124e +8f7703eadb590e7c8fc3995997605dc8 +94e772bcfab819b13f67e4033b991079 +de1840f7bc1bf7c4a8950ceacff291b3 +b59d681ff44ba0eab7b56ac86686ec4c +69e08ceaaa6f7d206ab8c30a99eb4598 +560e89fa1ff6a94df122aa37d544b1c4 +253f6c052136ea37b9e13714ed4da64b +83323cc32727ea67862fdb4ae0423009 +c3af726ba87c5e8cd3ee0669254ff0cb +34557dbb523875080aac36481c5fbeea +8746e24cbce45c1563ade20bb260d5bc +9d981b8f5f22650be444aa723336daa6 +020cf33f91c73a005d0e2cd71eaf8caf +ed4db4bc773e0056db8f380139b6cacc +0e3afe4e2f3ba01fd1c9c6dae7ce875d +056dd0f9364fc9befacd573cd146fc6e +d1d64d8a05f5c9c86aa03a7ff17d0c77 +31d912bb0ac42539577d335a73b41e8a +0ff32774fdca85d7d0f581b1656e9fd7 +c58c1edaab980e208ab3545c1687c889 +482905bb08f96e856161e141a8c16592 +0d00818e769391b57d9688f7e9d40040 +2474cd7c824bb6b594c948ee0e4e3a3d +bb36a15ca754d024487caea183d6ba41 +f139bfaad0f3030ddfc469696e180511 +3f51afdcfa43e5a761bc7e102be231b7 +4b1e93b43aabd117d72d6552bbf8f090 +589f1278bd3dacd0aac2b84c8e428739 +ab5d90986c28d6fe3cef83e4c14c4bf9 +3d9009e1a994c3a106a58c80ae4b5813 +6b54cc96c20baa7f77bf4ce1c05d5d1b +d01dbcf3870c0539754ef023896b1baf +024d4e95274b9527c47fbabdf5f05fc8 +187aca2ad4337b99a8388467c360bfcb +1c8696e714e17b43618f41e1cfc4cfa5 +d67d33111692ce5b277fad12dd96f34e +a1f60f958a232683d1947f1c442fb68c +5d400880fa56bff9170d5bca21c3d614 +fd95b75e5d432dd5ceb029437ae3f91a +91ed5c558923dedccce892e2fc45de65 +fa65f6581bf27efee4312029844d1cce +f0a38793a7582d8731aff565c5d18830 +e6939707b93fe1e0347247c244c3a717 +0dd118fef0d3624bfb4f96dd1be3e378 +29dc99f93fde523d62bdb4c11e2fe517 +5aaf5c712464e9e0165b75b5926c25e0 +09ef5c1e01ce482cf6619fea0ef5de80 +c20e6cb2a69fa683845706d2cc59d72f +1590ac46c1a22cfe9114158af94f66c2 +c6e0635fe28c1d6a4cff6b64453a4db7 +b56628bbf80f8f194ff702c1e4893e8b +d564cd7e7091c2ee807bd339705a4891 +07122117cb90f83d15e5421a4d77f487 +47a14d8df48e72bdbfb011037764bbef +b3eb84dadd3f98ad16ed03fa3d9734d1 +c802ac5b22c36ce7c2043386855f9cc2 +663b580c5b7d557c6dbbf943acfb4d76 +c212460c41e2926c28b8c0d4c0b91403 +863fb4801a4efc5470f2f9ec50260d0b +ebc27b825f5a1c76326ac1f5b408a953 +001c5029b96d79c123683268303f87a0 +2db3bfe3515f929436b365d0f49ed442 +8b8f7f16150a6f8fd63e561d0b229a5a +9a7a2b3190d6fa74922fd77185848a53 +cf704e27baf34f8aab3b3c481858da3f +fbf635ad168555985e065a483f972efa +4a38ff967224df9c8c2286813cd91c47 +78992865ad204e9babd04c0a3e1ec7bf +1b2fc9f3e0bf6066d752ff4503e0bc7a +1363c6c7e66d6ac40db7004faec90b6d +741992e4a5388e2ba915a9942eeed0fa +1f02b455ceead3c5ca43589510123e3d +ca80180771cbf91bd68cc2ef2cd15b68 +732489483ea59262386c44d544dc890e +71df60e1a833725349a548359323987b +8d63c3cbdb2f16e3738698c365be3e96 +920c0a312bfdd8479b19d7d12ea48285 +4bc65b4e9aad864e591da277954b5bba +cec868bc929a3b1fe722ca503c4569d5 +ccb716ef90b4c3e9ec9e7e45bc3ab9d8 +39b622c38144c3b37885f2f18296d911 +7e1d367b23d8514be167db44fc2709e0 +e72f96c262638d8c5b33e3be91178164 +e5321dee9902e1c41c161b6b5d6df12a +e3e0f9623efb26c21face9b4a20ec921 +635cb2a3846d0a800adf98d9155a3d63 +a39bd73a2bc6329ab5c86189faf232b9 +0c72b6cc7e9a6e9d9a257c43316fa93e +bcad28749b4142a32fe2f15784debf28 +8a481c744b4c8dd8ad7d39f13dbe5d3b +52e91b707066b40de5c8749f21727225 +74d48587632fa6910a32ea3b0932c5f6 +4dcbacac7b10a87548b5f9e0b25f51f5 +d55a75183b131b7cd196a866204d5fe1 +98e1e2919020a365fac271a90e2310fb +68236690ff0eaab4742181124ef97090 +182f817d8a508cb35ed6190c21fcacec +565ea6d07b2ca6cd5079d9f9df680888 +8995d2b9a605d96b72e35ce6701d2f08 +2aad87ea604c5969433f24f6ef56d08a +c6f8c1dd25a9a2aa88aa026e6e4008f2 +818341abcd5a98755156af1f8f210a9a +8b7cf23672000f42b79eac1345b9ed84 +87507b8400f0bda2baa027d2a853070a +852938cbf52e52cef4728198922e1658 +aca663d141ba6f9f5851a599df2a7116 +026c62c53d747def7d0ffbf7dd4e4361 +7542d84c1a84bf050701b71f58167d4b +62a35aef5ed5d2c9455125a02ddacf36 +b5fc744e0360ba56b10cef5ad39e515a +34d2c3c94dcef7c55b4a0888669b5b76 +1b382dfb2509980bd20c2aa64d194451 +6e5389a7039bd4e286278d51da6347e8 +fcda24bbdc36a1c64af566fa43721d4c +601c5d5ef6c2c282819a76eaca787603 +cfe34865decc0a2ae3786ada6d7fa00b +d5ffecf5aeb4ecab1d2be0c760a57fd5 +d0156faee4be6c86cab9a3d30c5f5cd7 +a4c94b9da78f462b07766ca52e5eeb4f +58f4b0348039c8cbfb7d6a7233b867b2 +43bbbae1b21a9ee746edbfd722514e8b +e0f44119ed868bc53b65b33d56252fdd +11807f60720fc47a0e1e8d5836281a01 +b94c05cef0e33a816f860fc694f51b4f +ab4b1b283902be31a59b5109ed428219 +a198060ca220ff692437766a1cc39acb +b47f2e0a4dc59b56f63e35bf166512e4 +0df61059be0d952670d38dbef06a3da9 +ecf6468467386ea42825de4932333a2e +6617d6c83d178e2345d23c38da96581e +aac4ac442e6156dc0e0d245f818ed124 +ee258f77887e05ae9c239749e070d244 +7543f4ffdc0b26e281458b091a20fa9a +64cd1d9549b5ec3be35af5f654a89e52 +2db1a5be1df6341c107beea762569302 +8a0224ff7232d0473eff6811bf36ce53 +138c3aaebd1f295ee0f5dde0f7a74a83 +327c0bbb4beafe847824c229bf8a8006 +e32ce049b87e513a5444154762016536 +1f3c1745938f1135ac818eadb3cc3bb7 +3bd0c86ce40c7e05475cc08070ed8db6 +52be51b9eafb1cc0692fe8252f6e0d77 +9c877e356037c07e7042f469484ce4ad +58e7ece11304621cc08a1c2fe4f47c07 +82c8b86f2113b7446dbcbd59c816604c +8a50c2d99903bbcf37bf0aa40d4f5553 +3e132a3cecbee105f560c59f90fed6eb +50e9c504756b06836fdf7155c852c103 +3470e971674971a5da195a608695d0c2 +92388c23351d8d9599d6429e0717e610 +e04b7bba0d829a98b6458b8e2d192f19 +f1f54f1669032ed00b9fa16f4ef581d5 +52e920220e49c298150f0fa97f27d3b1 +de1ef4fcb19c417ab4ec0f3d6aaf79b4 +547a5a048f02548736568920c310a27f +6420c42302bb12d87e49df9e7e38e618 +36aa0c5d676bb66f758f79e9ca461274 +cfe045c435d347d32ef3831230f65f27 +b972ed7abc9b9c3ac00f1b7b54072446 +3068b513c85e765d4c93ae4dac6a6d18 +7f13ad85c4073d7750d0c355d2739df2 +8c2bf0fe30e19496b2122a8cd2cf60ad +861e9be89cdf7679af12f57d465c1e01 +a384d9e365eac1e308649fb94c38ac16 +8e52eca5941b343debfbaf298e9362c7 +0f1bc6370d6776926a062a788ec9637a +a2966fce9331045194ef4989cc8e1eaf +a1102f28bd4817f2599dbdcf281142ef +2ea7790f27d76cde013405edeac0a174 +fc26077a381047acd132bcfa269e76c0 +87f482715515c7383f9aba248639dbdd +be45e929505b30d3733fdfcd24a61844 +b6e47d3d971d7eb2cfd05cbf18821a3b +9aa7d0e31a118d160da2417a4d024802 +2907ed5337ca379835cda1b07dbbe366 +4d9f30dc2d440519173b4f31d526e5bf +54930844ecf9c01bbde1499e36b69064 +d75648194b611b0eb18a33e520cbd051 +acd3a886ff2956f0cfd5af200661b8dc +8e67c6673fed1fc02e571bb44c665e4c +11c7d855341fb968aff03b467a6eb62a +c6694fb36b1dd98f82ac2e084d8d4a06 +cb7d66c30de0d59be680b537cc642218 +b67c5be4321aa3667c84018f56cc9dcb +e406f82c0de803ff524af33184b091e1 +e742d5d1172b1f26698e7669c0ba7bb1 +60c1fddd294871f6a6a7b482512046ac +cf32a769662bfabad675c8a3aefd7364 +43a4181e3b428ed3d704e6738528a45d +f46e2a9ab5040ec0247dd510e8ff2c1c +4cceed49d4001115d720e658f9f7ab6b +30285bb4778fd5a1d1c5ffd0f8eae605 +252fcdbc8ab50663e4a4a3ad77807c51 +1699bcaec8fe7d785eca3e38559a11cb +0af04d3d0f4921448b5887eb3c75c162 +2ff4e0d96f11851182cd59015512baa6 +a09376b6a28ab752034187d9a2df2a36 +07078e3eacbf637c9b7ad7acffa86d4f +04d8967bd40091f91c3eff19594dd273 +f9a9369613834db725215956f8b27e6f +1d47b7eecff41cd729f2a9b36f9f80a2 +6034dc41f471dab359fad4f7dacf4deb +d621e8df4ab2288bbe7ef7a9317639b5 +873d440c7bc8bd84403400fda40e9ca9 +a5809d5fc69a58a03d6f3fa4ddd0bc4b +c0eccb2d3ab64e40337f524671d2a795 +93589557ccb4dd0350ad25f9b9d64389 +4107c48c1601fc467c2a37b5962bb5b2 +a7f49b553cebb7b440345dc865719b84 +f66147e6928a07ae05c8eeabfb87f720 +c38ef681b90d26ba2ee69d488aa1cd05 +c9528bc437768d9c98ee16457c621bd5 +51c0ef7499c25030d98e92179b923140 +3abce79b432977c53efdd59df1a54ef7 +84e1b0e5c6a6f3e1b5c5b566226bbff7 +b905df2f05cc6801b73702693febc1f2 +462c5fce58fc8029334151c5c0f26df5 +90bd8ebe33fed952f4888aea39b59988 +41e9e289d014e31c9313a9e63b281f8e +59a52851617ecdcb9ee6eea5e7cf0122 +3d2077c7b417881336b06092e482c4cd +ca7f01139977b4fa3449382538aec9c2 +00bf0286aa25f50f06259bbc6ffebf34 +98083c971f45c42e27431e5e80bb5bf2 +ac10879916ab496f8a7e8350e6cc8cba +1c76f0d247a217ac3ceb9d92a2648d47 +1d6afc8faa2b2a3e5c08660dbec774e9 +0a503caae1972be84a665b02942e5501 +30c3bba4f297c5254d1f59f703504531 +500e9486fedebc72cf542fbb49acf39f +d0dc69ae27df4fc6e6651bccb1c78222 +872739f1dd44248b4d07770a8b41a23f +a41de2d2375dc2564945d2451fe5dc71 +6f333878a13dd64077a71070170f640d +ec86ed884867711f33f045382a3877f3 +cb7cc69cccb167b026a99910c69119cc +6e927c4601f2b0abc08833cf036e66b1 +55b61deae06b840bbcfbe44b25dcdbfb +a1b8d38926f586564038a732f057006d +85190dfb710f6fbb1a0332595708be2b +b5096c438699405a2660fc98664b2fec +e0451775e7af067fee770ae6b25a39a3 +b3e925b7675c453f36eed97562bdb6bf +f84d5f84c110272fbf227af599a115b3 +36561b99dfdaa276e266bb23dfa7ee76 +64f7214ba3e2cf01ff381a94c38835c3 +9d251de6fc19665e4df287807cd9fe0a +57f5cca51527d5026302812cfed848b3 +6bfb7f4a859edf58a0d23832aea87059 +f300003e20e3cc451fae3ad3dc528e7b +dbf8f7a91e89a79867a2792d5c63e1c8 +932be88d398e681fa89eb610b5a9455f +aed0f4318d121b8d3bbca31b8495049d +265ce5a72b45e807084dc4202efeb5f9 +e6ae281c8a75c7d385f7d167052776fc +0f89a242d3d9a085a16d4eac917ccce4 +02d36429aee3a5b8639d58d152db0092 +7c8c4047bdf525a36d2893738a02a81a +fc7dd9fcd1e674f611e950f023dcc288 +72db6f9a499be5c5c3cc029e19b8c434 +9f6036e7479d38c9072c67931d7ca596 +2ca25f56810bb31234e173e220bfd225 +3ad70a3b560539b55f06539df3a4ed4e +0e817af00d21b8819bd24d14d4dd2a90 +c3e099471cedac9bf6f9aeddfda5d5d6 +ef0f97dd5cfcdd35fb23e09f34c20332 +176661670fe9d85f4d750aa96e56474b +c54f354e5a0f8a2797a397f79fd4d5d4 +55d86b4d373d84018e6e0a2b8b87719c +b275a8f2c1e221666fc2cddf7b2e168e +7436c52c89c5316e81957040653e2d4a +ee4a782a680f06839ae6d6d848f96768 +2f880d97f2b422796fb13339e7a7c890 +434b6549bb662dd288d924f6aff30800 +88b49c268bccc862cf215da9f20e1bde +1bd82c1c915fb3ab8993b14c32690658 +b1af4b20462144985b0e3c65b999a032 +2f403862fdb25c9aa3d33c9cc2e94779 +635794b97bb368531d6b26f6c7788e6c +80444bd3b4cde529eae62c15aa3473b9 +b913dbe40e0ffac06cba3cbb0b2848c4 +6ee1ab3c34488315315760db88fc792a +bd821d5b4f26f07f8e553587ab64ebb6 +dcd578b2b3ade94946583564d9d87684 +57c798dc00d0aaa1837e8804284cd7d1 +596b0debdd77a442a66f723e71103255 +f720d93f0d2e50236afec04b43571172 +c23b0c89fb51fb869b83dabaff9a9b02 +7dfb15a3546f6a0712e131e907139671 +605662e07c2c55a01a8e35757da965d6 +e0d4ddea3cf02563cd4277adb00ea493 +45b03633bd0884c19c7fb87cc5b701e4 +0bf4f08c0bea39a406fd1f6520b1ea3f +1248037200184a0e3e9c2c360a1ddec7 +7002c1c6b29072f86bab0567b608732d +ffbfd4c0949de36925b82592476b262f +649d39b096eac829ec4499dd33b08629 +ca4bd5733d1e7ef7708ca1b2e24b13af +dd5e1cade1f4bcc3684f0bc3f996f084 +31ca0a6f7bcc14340642620b96d90e53 +81df7e6fe8c682abfaf546ca2293110a +53a5eed0312f7625f0442d9852aff3fd +03238cb9e7773f103bcba5fdde92bf56 +cf9352aa2dc2541b4c15c6624aa3d37a +c0793d4854d9479e57c08796672ade23 +748c381528d7e8b5a48a34cc77e9babe +ab88d33e4a763461be154cfb4221b5e8 +4601d8fc56f8bea38eadafe024885cbb +081cea4187ddfb13d0741a2dd1a7e3df +1edc807dd7f2bf19adf2809afc161c16 +f761e815004a3faa1553463341e84c61 +57ac6fd87cb313df8918360e3f0fa308 +cef231fd0462ca643bdd4553c1abc673 +267aa35bf89e7a9b780608fae2d6e67a +3c8d411f27811610a4c09d6f5a87216c +f0b7600b2dae756c68f1e80b3be02972 +db92a8b4cc0b2264ef5d06238c6bedad +f4b1b5577707ee1e2619a8f7093bdf6a +62d098fb60e1dee42d1b65fda1728916 +c09c298d28c278ac33094f3e243084b7 +4b03d7e5cf22f9def09de8583d071950 +e731104a6563626453825555d5ebcfeb +98890dbe6bed546c00132c635d713dba +caaefb004517ffe15dee18c7a19513ef +3c57ba4bd744f4d69cc3e1253c40b74e +bac1bb04cf77fda2670efcf7a135b915 +25b4a836c84cd9e1000c953b061664c9 +fe7afb330ff124f4708338ac6a40f1c7 +9e3300cac7f7ad0b52a2fd758c27bd3a +acafbdd23923f77ccf5c0bb121faba16 +d86436487ba7ad3b87409847eb5ccac6 +c7bc9bf1b33c8556cded256396394b5c +c4c949aff0bbb0deb70ea5593a9ad271 +bde49ef3968ab90d3b38f055e0ec3aba +0cbd6ea60a5842196a9da45d943b5bc4 +84f538e78159d91ff24b6ae361ebe2b3 +8001d0c0a9e2bff543906aecb1deb392 +a9b609e403411252a653fd1088dca848 +6421d8cffb03950f39b51ae5827208d3 +afc91cf84c43f6a4f00573dedd25444d +d94bfc328a0cd73fb8c8872b598e4feb +698b91793c612a682bae6099530ce1f3 +4adab43bc374fe9c3a18866803745d97 +296538a55e5eeb47210cb5544e552f68 +47ebe14e44743e9fa7e30a1c14b4f3e5 +45ace18743fe4a8a4d89a3af44540da4 +892416cc9b12d2417f3978159fc10cf7 +5848418206a27e847c11512c12d31a2a +518cfb6863ed49ebb500f84c840e15d0 +6fd14b5ec039806f24da698085c85701 +1606137f3dccd7d3632ee8baa3c8fd4f +991e9c79935882eefced5b128dc509e4 +f37a53539945a187e89b05947f5a7ab6 +8c26023eb3e1b15075108749a474beda +c47babbb490dcdc33aee6647e06c15fa +086f1708e9699b6ecfa1ceecd22c1326 +d4d64bfc7bb6f24fd881b2e72fd563a2 +2f04492d2fc7d86a438f47d7e7619406 +f5f28c6080c42b6358de1243b89a1983 +e5bffdc84fa9edfcf370fcf780e0f29b +c3ea236482b8bb3e573e8692edd21630 +f2e06d1bdec7a15c93dabab3628d7e9c +264e45652f0594f0a94ec719f72ae68e +b23eaaf7c03eb28914eb4a82f91975d0 +6eaaf2fbc7fd6b6cf101fe64114e9dad +a76134468f94e8fcf479c1548dc5ebe0 +5aa8ef32e753691e7e19bf553d7b7cbe +38cec0ed06eb15c3b7a6773dde1e259d +c276d9ef63a9530a4740154761a12c89 +40092b2dc833a70bc2af5f599c270d63 +8b959a72479db02245b29034ab7c8dc8 +ad655f37d8edf2de14604473dab05105 +56a4dd2257971d9a6ca7c8689eabef5f +0fe0248cdea34f7485fe2581e88517cb +80c24e7794e910d2d5442b6679fd0181 +c21b6a44848512fcaa5517a02cd8ca99 +6d3c5e1f2704d72d073121632ef7d081 +f38f82294a0f801ad02e887e01948203 +70a7f3b13fd528ad34f3ad1befd7edf9 +342d746b877e11a94f0ae73262d2ae0c +ebafcc5c1ac767b53c399f61d44de10a +7487b3bbe50af6415570acccd90e2e33 +bd8d133188f6bfae72317ae810f1b06e +3754429213111c48eb258677928fb088 +d41d8cd98f00b204e9800998ecf8427e +d2281b347615e93d94a6a9ac77acbaed +cbceafd0b606a08a02e819c2f0073383 +d56c37613e73f314a6b2a002f4973b69 +57e39447e9d7ef7caa6c767469eb9468 +b23e97cc84a3120cfb447e42e1ca5e6f +c734125d339cad2b934aae25c5bf82d0 +2086afc54d6951629571457f759c61e1 +109fc7a174e5a615270d9bc103fa0df4 +0e14026302ec62189d2baa78dc6bfa2e +b079e624c3706ee3ba6e4693dce553ea +9bc97baba9bd017debcec5190ff0cf4c +d98c40b21b9aca9ee89dbc1693bd9c64 +812b47fef78f2b2caf831f8ebfcc7dcb +e244dabd2532cb90777fbf676df3739a +3cc8bf56cd7c5c3a9f2915ddeccb7d2e +b0f04f42c3302d4008d47295c7997642 +40462d7ad1ac6bb3c5594e3dc5ddf414 +00c06e0dddfa64432b5dd9c13e360e1e +7cc8bdfd9c81651c83582e29e44a0450 +a1af1e18132591113c2c4ce7c86b0884 +c1e0e70763d15c8d74c5d5b83759c68c +a17b5832ebb5f328ea7c25d9303539d4 +484201442c0081ac37a50e8ef4218b63 +07eb49e409860d21ee81c75f7f532cba +560f5fe64f143c2be4fdb7e33922bd58 +c997f75371525aba0c6c19bdf22c83dc +2e701d65fa0bb139a7a0a869bbd3c123 +005a3044ad4e526242f52a81e2735bfa +af9af5f45f7f38b7a179904a3ef4f06d +f3b33f3926120d22f7735d4510d49804 +6ce87c7c6812509b58b681cddd8a6e23 +5ead0b06978517d4ab7173b07fcda95f +69d4401391dee609360bd35fb5cf6f91 +198213f5e82bb42bfa0395371f6e8745 +738de2944226bcb8123d2ad3670e7d5c +63f8596254f72b59b3fd8822dbd75127 +6051dd00b2123b9b942f3d86cf9b4c8e +36492754103f5cea78e3701a3f60b23c +e7b4d6cc857f4e733c79df1d14317703 +4ce38f8e2af2cfb5bb8024a1c4933807 +29ef5cdd336af08f2bea81dbac36ac6b +d4ce0baf00864c3c2cf6d9224d581605 +49c1dce8208b45c3db41cf85bb6bc648 +297ed79cf98dc25874f7dffdaef06c85 +26ef0e912dedbbd38b4e1d410bce390d +a8ea1a19e7e9db40ac2eabaef38b6124 +355a1313734cf35dc8eac6ff12f9a9b4 +56cfe89f595b05b0706a30a92b0b5951 +3648194b25e6dbc55088dc5243c91682 +a1e813b395d5d6aecc733a45c72ace51 +8b0abfd89be736c99e83fdcaf3b2b09c +025c754306ce1344b58ebb0540a21fa0 +3645833a41fd1d08532ddd8173a0e82c +2e6b61717a650d2c36c1f463dfb93810 +99d1fc8c23b8074cc3df4153d88f4beb +b4afd3a3405657afe81081247f8c1727 +4005dcc5046efdf885bb6a1bdbaf7eba +7386a96794ca87d6076263d5abc6c44c +856855224047ef910bd48cb057894397 +8b95863ce7bb88882882e9f85747466e +da96c146024ef821b7d94dfe5ac93a81 +611c4612d44b8dd04514d226075114bf +e685170c3f2f3e886173c7d2c1cd4fd8 +1939b5dd3c305d9f5463c611cce9f046 +cd916a0216ca5f0f089e7034d5ec5c56 +f8abe8ed661c4906720e20a11b4e45f1 +d33c57b5173df8e5efc3756302e1e488 +6c82cb956fd8859987e2f69d8ef49900 +b8641309ae05c484bc9ad10a359b16dc +a9e3674e8d5fe7e0fc0ea6087e84ed41 +e426ca00a1f96b734c4f5f13b6107226 +e6e25a5913a60b8941502a6279f61458 +5294625f11eb0ae830dd4b1735c473f6 +c5d33539e73781de024d7de72d99370b +a083df72c880525313a699d7e1a71b3f +025956230158584d81f3008100420ea5 +50025661b52a10b1aac17536d4f993de +9be08454e12cfcfa801c0b94c987b122 +42eb833c611ba8796adede13a3d87ccd +4643d0059ef74b744043750247eeae56 +e04f3d0cc5c88841a7459efb25398e64 +903177ea93f1848b49ed69aee3fa636a +419d4384a73055f405a6f63964e3998d +52ab90856ec048ec535e8e680e5289dc +fc9cf5c1ac3be7880e3bfe4557ef0693 +9d3f6766fa89e53714927a85e053c3e7 +04553cb82ee5fa0fdaef025d2771c660 +5fce2938e71f75e3ca2349674fff6cec +079718201f431e2ddfb68579dde110f9 +013e4d43e498c1a0c365583c1ab95298 +04137ab2d64bf156c195745ef4e4ae6d +d41d8cd98f00b204e9800998ecf8427e +6e3367d63c82a0b79c687bd04ce62064 +5db9af4ed349ae3750f812bd0860f50a +b6ee0e6beb34ef63e89df88dc8226157 +3aa80fda38ed4166db55799d0f8a3119 +39b6fbc68a5241a0faa0868dd0f7c1a4 +81e898de552c8dbdf40260ebdeb71ca1 +dc3e4a28e05707340f9ee4024687485b +46a2fda480a60406cd1359eb97ae7231 +029ad55aafa2887108797fe1f1edcd34 +9478fce490174b8c05ba596ba69e6c5b +f8172a52f77216d9088d1ee5776fd81f +45fc9a361dbe58feadfc77a26448fb54 +7d50556e3d86b75122a9d843b1586fc3 +472a821ae83a28adfc108755b0820218 +48c660967752078f394c21dc46c001e4 +8379b1bac8a8cb274511416686a7599c +d41d8cd98f00b204e9800998ecf8427e +980c3569106188b41be79f12df8b4a8c +7748695ced739f2df90ccb1c4280a5d8 +6974f31d8593850c985146de421bf02f +c06f333302f9e3b4ecffdf5b8d7684cc +c85822fc00ddfa25ab2ba0fad2037b60 +07c3f86812711deb7cdc055ff97d48f2 +b2cbceacadec87a93f8678d61c8b28a9 +b5a2cea6268fbfd5374a35b397161adb +03b6aae2ef9652f8aa5aabe3db32683a +e36e18470b806f644d0514c0b2e9dc85 +94969ffb0ee54eefe98b6637ace1298c +e34cef426bbc279135703d4fa4ab301b +1e31bd8e473b39b3fbeaf9e73f05efd6 +e0e7cdd1984d7293eef8d83020215869 +50776f0067f7c349f88df6708b2bf401 +326d9a553de65644ba5c31b1d4725b69 +1d77beb17fd0ae7c011a20b50a253f9e +a589ed9ce6ff568dbb891974044fffe2 +a222a1e9d06a8223f9a0a6a34abe5163 +b46d401368cda7e5cf6dc08e7f9a2b19 +8dab09b74e3cc80bb5e18fd1d8d7228b +80339bf36aca19cce839bd6e6f09890c +e076909eda0a758b25749f28ba0afa89 +545d7670a3882c7fd014fdd7899c9e96 +99ab348fc6f415e581217249c77073a4 +3fac64c281d063ed68f1162201193326 +c81dcbef02a3f7abd94df71f85acf7ac +72d364837f0604a07a4fe68083d95fde +7743c9b29c4b6b8f63796a9c276e5ca7 +2be055b1f90ebd206937be6568601ea6 +268e6c59b9e3509c69e574ae4d9ce23b +da80ea9f108c2368fa07dd29a8e1f620 +42530f39414d92bd4ad8327989b05b40 +3ccb371bbcef4afb05d3bf8902f13f6c +5581c92902fbd1bc1ec7a3474b950aa2 +640534fac72ca0c8f99cdcb215ddda95 +8039afc92430bb764872ed77067e60a7 +b28e8a3b93afd1e826567d1cccc1d373 +8b3416e73fbe405d2f3ba620abf5ddd1 +551e7d2f3cae4c489502171cfccf866a +debabc89ce1cb5459bcf13e9195141a9 +69f09d1344dcd4ca4c29c3732c8e0eb0 +6c314aaf77d840ec246c1badf9534b51 +bebdf116f95b13be22c85d7db490fe43 +ba05cbc85137587899a0bf0cf8727092 +6c50745bf30c58a5f791917dddee3741 +c8f16126bbab90d972fdabdf1f10e093 +b215de51777fa22a3b92c157d2c0feb7 +4ba8be63708d7f0303ebaaa86b313314 +53ffc498d04de26b7c03f820a773dd89 +2fa5760377991b07cefd6a3227fe6cc3 +723f77c8881f3e9b94841d5a3f235b9e +764546a1e466434b85ff6f9ddf82f3f0 +f2939e9b42670b364c18276f8c730b1c +749b23dc9d5179682c5705cbf2185a92 +1f6d96777d404c5ee697718bed3a69a3 +c110f5b1c4d9a1358d14f61db5de755f +3e5461c457ce5ee96e998d5d3a21e5e4 +6075caeb03877627dba92567fbdf9f99 +5321286cafa7d1be1cd60fca27596bc5 +097c1a6a8244c2637ae935129c56c331 +63e9a9f014ca00c851b08165ba21075f +eb414a5fc2bcb3fa0148eb57dbd2d565 +a00cf8e49c89081afa055dc358c88db3 +1bc6582cd823fb85bdbe8227591a20c1 +58ff1c022c3a25ffed98969a4e43743b +99704fd953b212c231e39b2048bcfbdc +63c5878a1739f01235b59f5e14cc7738 +29f9eb1e50d4dbdfb57dbd705168177e +37eeb111b958b228e24a4fa4343eca1e +6a2ac4927992c9e271f430a23a0cbecf +3cae007120af31d6ee54aca35c292d24 +beae552b2b24b1f6b7bc36fb4139aaa4 +3cb66a13e284bb7b32c507c4d89dded1 +65572fd4817cd03487bf57b8232b95a9 +140147961182264ec78d1829ae4145b7 +8ac992916414353588945ac29430852b +cb820a43c759b54b793b749cc803c902 +668170561b975223f54b896e968b3372 +660d0a05d65557b5ae0db2b736319bc0 +137824b1b95d70b28daf4c49db550341 +713a1df4752c6f8527bb40d0dee50579 +c234f4a53b689ad3c903df0bd5209b77 +f42d0805b982e8d2c237e5b86adc1fa2 +20dfb28f8fd055946f6b07246e4d9d10 +50e7ab222240c7b2b13bee9fb11597db +3e192267cdab8a6f3613379af26af299 +d03473ff1b45193d5e3c93baba54c6b6 +9ef8cceb20c391157192c45c1b9a5a4f +c468c0e37d6ef7ae635e69a5b7ff90cd +2b50c7fe6f9e3fefba1963b46a969c14 +98b5419f21c2c2dd50fdbd1a28332048 +8908352604a59c1a1fa7d50e51f61c7f +a66be97dbb29f0163469cdaa1ab485f9 +d41d8cd98f00b204e9800998ecf8427e +81e11730913986c5b9662863f4f00e64 +6ab2d6cc0843b8f6306c0f17ac64e668 +fb39b688e851cb332ff8f78189f20c4c +a42441b26c856385de051712ba5f4462 +b06740a5b9bec662c7a7949be2cd84ea +bcfa8175692e1810ce99bbff7d71a7b5 +5172729ac07bb808936c04998c9cf6a4 +d95a7226eec43aa7f2c25bb68c5a213f +3a1babace2667734065d55b762b530a7 +0467dc9574328abda2ed0db46f4ebe5c +9a88302eb2f41d59f6b47f151a5cf6f6 +f44ed8518ab85d7186db62342c14f56e +b8f5fe6d0ec1413960d5fc05df1424fd +fe1fbb81b1befe027ca92740fee619be +891344e9f9a829b766d1be5b5f1ee822 +6f989bf96cb8208fd0dca9ca964eb60f +914fc8f5583c6731e6f914faeb8ad042 +81fd9bb451911c897eda75d3c25354e6 +d327ea7eae162b2f125af0e35bc59272 +f2147a222921ed909b97c81939bc592f +fff4e067273fda1bb0c49fced92a55a8 +776d0bda22e871060965a4d2e97d853a +74be22f2f7ba204c0e4b8a98a22215b5 +85b612600ff8a9ece6674ae833197867 +5bd75a9d659812e10c14e04a35a89b58 +e28a221de79ab1336b5b440e80cc7bde +db0b82531f80de232c391fb647497304 +4348c09b8cfebf76ecdec92ed5a34160 +0664cd43711668ae8c5b1292cd42248b +9124ca16d971d6ba17255800faa10ea5 +57b10fc7bbf375372a11a2eb4032d5fa +5b7bd2e3b684f0f5a24292d8ddf64ec5 +2cc3922c868376dfd268fe72a28f8557 +8c64bfed63856284ca3f31e9f9d9eb36 +796fee97ddfd7c3d49318dde490d3979 +d184fe437f4980001acae1effdfb6a3f +a90395e84f4f4a9680b176483bf7bcb2 +45f5a3f3764bdc5fe6142c165025dd0f +23e8bfeb52dfe8e708a82c89da487643 +ab9f20de51855d98cd301f41c50607d6 +50d6e0cd4429b8afb1511fc79dcf4f10 +85ed00d2e23114a1ece84150daaed7eb +2a2badb39688c204984b349aaac2628d +21278305cc0aef07da9d69a13892e7d7 +a2493e0f5c6175d71edd27b97d34c854 +fff022590d1982a9153757e80a0f8be3 +ce02fff1b9a2a7a8981fa67fec64d305 +75d85cdd648553a489cce37ad3193aa0 +78d61927a8991e4667f864eb31ead9e9 +2c980f1f0a9e959e186a8f1f594d2a84 +b92a20cea5d978aa22280bcab473470d +989ce492cadc63e356a5c5381c530e55 +1566e5ead0c94c2bfcc69fd2e3955603 +e1458d42ab475780485341598854aecd +b7a2095bf521dd5e855869543b490b71 +5a6111b967ec2c8bbb3da0a1da6e022d +f5f69c317ebe9554591d05149d726502 +38c185d894ea1d3b68030a26bae4f4dc +953b6d0c32bedff55f8e4f544a13bc28 +8b5b282994c87f81aabad17cb992b547 +c53561ca4c45be89679203b2ec12e9d9 +8d2b55b21a0c44a1cb4511bb5197db2d +a11e037ccb85882c6e57722da90479d8 +d3d22b265199bd11b158c679bb2d3e84 +26ff4c4041f9b5ae5ca67bfab24c7fd5 +8cfe82c53535aae647879557c6c679a6 +ec4e78462e1ae2903fe38a15c10270c0 +d3dcfab27b98beaa39a2b96d3b126210 +92f689c6f3face988b5ad117ed251d66 +c50926bea38293edc1ad61709f1a0131 +2a7e3fcc4b8eeff5a31eadcd4f848b27 +0b11bad6b9cd761709c69db0bc77c145 +3556e23db83a1f009d97e93b56f7ce4b +9183564bf4f9f4c2c7290e3eb7c42b80 +0d22201c0d472da358d33005db53913a +0c47296420dcfda90d89beb5a8ddc6ff +4159d0702c1d8bbc5765ba9c62e50293 +3a3033a7ff964b8fa8b95959319f92cf +962a46af55788879d64a9595bebf1607 +547755017f7fe735909d9c74c1af919a +4118af9ea2f30646ac3152999ffe98d4 +a0531131bb50c64d812bf2d97b2272cb +6624f9c07c94f0ac4e376be447f1c543 +361cd0957f51f6218d3641dc5d5fd6e2 +0d3bb2a36908db427b85c78a278ffaef +786bbf8a5911c459968ab41cc1271e1a +561321d6b9d0a93243c36d3ef7563886 +26a87a07b8279d720cacf61458252f0d +5613b79db7ba1d15a66d1546ee1e5596 +55da213f93a6956361228280f221272e +b8342810ae9c18dfd97880a72204b566 +ebf7be7373b467840552e061f110fbd4 +509c2d6c82b12138c7a40b691fe053e1 +f1a9ed1e89d1b4ada55545534fa6accd +5ed62a9d020ad3366f12b9188ddb21fd +2968fcd8385c1e8cd82e3918f3593961 +eb80bb702a7a194efb3b25d056b29738 +4def2af4a3757b4dd1be1ffdd00448ac +9de787ffa47f7b183443fc4510e21976 +7726641ffd265ac00b314b4625fc41c1 +94b83b3ab151672d70c4b6b44c1c7dd0 +0d65214c5af62c27ae22acc67330845c +15d9e07e8096faeff363d3dbb2cb9b3f +7f9e04f9258bbea1b8986356c0d2ff1f +3726802e552657e135e81b58a2685450 +d8022163dbc62d06e02beecfb0b998c2 +1242bcc5ee56676675db3114fcbff134 +bc6e8d59948ee99be2862dd20bc3a91a +04ea6709a80def4dbc5a18289262cceb +c01687ab6ecf8f4f8d177a644aa5cf57 +60007dc160364f50d9f242f5266bdedf +47e047cdb8fa877a546b5c43bc3d9f23 +aae63acc48965df7535995caff07bc04 +f3cd8e48ca7baf7ba1a442c8781ff621 +f2e68488447272a6f60e76b901281feb +877d1069497428d577ce99173fee3f76 +3de5b4ba96a58541cc81566f57e0f9c9 +2b142f22f9a312cc6aa915d56ceefd9c +370cfdbe472ee4c55c9d2e7ff6eef39f +8eca1fff9cc586a3359a1c3ecc8cde51 +90212308290412e6d3f2df2a6f747c12 +23a9a7ed489788d0437141499c68d6e0 +62b8ba3efe63378e5f76efed45d7f14e +a7d9b2c1b379a3d56405653d840a1978 +4535f99a4da65bf31476549f751c4512 +b2c238080ba0e4a78f2fd0968a39d4ac +1263721c5ce8311607c12b5d5004a212 +ca6954c45b22f3191b7f120f67196450 +c0f6780d1f7b996dcc7f1a58551fb047 +df4d54e054ea18aeda827ca75401b01e +795b4087d42878a3021511b0b95ce9a7 +a356227e71f724c55e234d2386b2d115 +04bdcdee9a726f321ef595e56470c2d6 +b5a38f4727fab17f24b3bfb7ecaceb2e +2d03c8a2780a328cea04af4cadbcf2e0 +f5287d6547dad7566f59f8642d569a32 +f0f072ced91e0fd0c190a8384a1558f9 +647fa05b654564899d5b428da1884348 +39fe3a11457af57516d9d1d71554a3e3 +c8b27f8e20e6ed53f205839bd0d903aa +474f9077cd402dc522b779ab4decaccb +02a728dc0ea9535679d742493952796c +5e797f47962d11a9f0479182d1efb6c9 +c2b21386ec6b21e53d4d00e5501530cd +7a968caf4c26a9dee69d6259df32ff0a +cb04b7fe2ee5537ab4fd27d045ed8a9a +5640edd012e4bf2a03a467aa444de5d6 +202d78105cf5bbd3dd6177267740e7c8 +813f7a94c173bb533a87ecb6aff8973e +d948b97ebb3c1982487025e4bde464c5 +fa15c93f4de96973a685d4f2d5a21fd5 +b1a841de097ac66f3cef535564d0ff3c +5d5d92dd8564c0f3f4fe3e18845d25a0 +6c0a8c45d06f9ca7dea557274c89cf93 +2927561c2d9a60afcc5d29376fed91d5 +b43e1adf375dcc33b00a241c38476426 +cb09afec96b494a9ab947b7e04f1c9d1 +fdd351d6b13eeaaa06dd4616023557ee +2ea6c283edf265f9d776322c22f05de8 +41c66323956780942da3c6cfee7b8ec8 +d22f8c7b9dbfc8a5125e5e0ac5bf9dfc +0de54288da48657be506aeaa390508e9 +a67ed8e661e0d972670a9655ef182f8d +7d2f72a008dbd616d8f927e628bdf964 +1e2e02a94552b7c21e872f27624adb68 +f9129cde5d7e092a572be5af6a9a8c42 +7d129391ad87c13049397cd9139365ec +42e8fdd6408f95039b1e77699d21f5ee +c2dc63615dcfbfaa65b6945501fe5a72 +31ba68f5ba4ebba3e246bd0dc7f60d04 +4ed59773e8dfe96c0a09c4ab69901360 +0b2f44c830363823630bc672efc38edd +e29bc22e74ef6e9ba6564ef2833aab74 +0badef9494e372c0e2c0692a3c9e45e4 +168fcf3d452694be0bcc323d091b84ad +70872c0445f89bec5b6c24f9bff461fe +2da8f901962bc311ec59c2ac43d7e812 +b483a2f142ff4adde0b3bac95fc49f8f +ca51b206101ec09172c475ca80c4ee3a +8807172964ac4cbb0cadb3b276dd3538 +8f7a6564ef23b4bd8348613d5ce78386 +9df6e3da2bb1778fdb499a9a18633451 +84c740978129da8f5279f52716b92e9b +ffd5f1c91f82b370328f798a269c088d +37f72f06a72d472836b73508d8c56d38 +c5073404bac0d47ecacb95729b1deaca +3fc398e5320bde1fb6555d70f476db94 +dd16b5e167b7decf8a5e70374d976d96 +adcb793e6fbb4ca279b84d40a11697b3 +be947cc4b4643b63feab760b418487fb +2acf42be6232cbe394ff7680d1c1fef7 +41f4a2cc4407f13368d3ff95f770e1de +ac4835f918abcab3df1da4ab020bd0d3 +bccd2226c99abc2f4d018e2e4c460198 +c8f2c3addd4b58f175f209968a96c19c +31fbf3555e3ece40abbe5b8e6330be95 +23b4e6b235f9124f1a531402737aef91 +ad915b96e4dc63aed31b14037e8cb2fc +b2acd592523e71b6fce7d15ea4efb068 +1acdaf7e478a59dabd25ee807a8940f7 +3283a3c6ad1f2abcaebcbb685d4a31b3 +d286a1f0e3e1b6b27c48d295410525f7 +2540e5054b29935b45dad2ec0c5d9ee2 +b4399b0f57ae340b9155c7d085acbd8b +c3ffb43533892db1a97d34fdb4dd2b65 +a6e483ce02311c1542e289495e52c0f1 +a2a16bbb82d3b01384c1dca4a1fb750b +66c956827a9a868e4ee801d19d54a48f +2402c732d414ec9168f286f77a27acd9 +b365260b8e74c0881774a5da8896dcb5 +aa175eb41b42d90f0189ef2f10ec3b94 +076586464e4550839eaedb31adc8fca3 +614c0f0896ac65bdaf263578cc93de1b +6b840fe43f225e93ca8942f69d6b8408 +b0a73be97de827c887873a8a4a6df08c +b13c86c17aed5193b45f573ecbc6e9c3 +55d1d8b2e9c1ae0619ad51e380372b05 +47b01c84fa49f50fa2f7dad34b19760c +61072bd07cd1bf15580c30b7b4768c1e +31692a153f15feea5cbf9718d81ce198 +a466a38bfe7a6758d1fae379e8873b98 +632bb3f4f8f1125d9c752d9c5d4d3469 +edcd6d515d39ecd83529e45e9ccd7468 +3dc73d6f215cbded5fd15907660908eb +e07e594d82ff0a64840cebfb844327f3 +683bc20bf945b9bb43bb6e089c277fed +34a88cf663afd72669a87627b0bd660a +d9acce58a9a1d381c5fc62fc3b9805a8 +1db7e7aec781566577bb3805a9dc03df +80558ec676d0350078d505e14a410403 +66862ad17b06b5187d5bfd4fd6993707 +6b9e5568eb81e3dc511c2080901bcd1e +69ab90fffc8097fa39a191602e31c3ab +e083331c12f31e5801d3fd57942f4806 +9ebdfc2f0363ff9c9978a6aee1f63700 +4980956439126aa15f6eae86f7f76bd3 +f1b1c77872fedfa775e05951f1a568ef +b0ae44041b31df34be324726e81979cb +96002a64ee94308e5635ed362cadc555 +628d0050f0804ad4c566b2e5830c1a25 +b7e9d54e7be4700bce28617e4480887b +dd7e60671e6ac505206d812aa494ee3a +abeb96a1b9ba44a48bed927d63f74702 +1fd2f8430df3065c0e5871962c934cc5 +080afdb57cfef8f8a37cbeefdb709d0e +9abe77061623c3888428d5fb5d107602 +e4bf4235454292afe56b50704c33288c +611de5c9efc16436e15c229098f9fad4 +570a13c30e5e5692f166c9f15d6894d9 +cd6f712ef3ae8bba04c9fd8c27a7aaaf +d7d50208288b165969dd54eb4caf2258 +9d6c9b598fc0b3d40283bbb934b11550 +f86ff496a3597f11699667c0aa37ee27 +c97408aaaa62fa361d08ac869436d5ca +afcaefef38138e356c1ddee9f747b6d4 +065df351b1ff35f037ef565607ee5d6b +70c46bc9ee8c9f7e9bb5806c02505db5 +59c9cdd0d70101df057ded1840f63575 +54d8d5d6b7fdac3b62cb62fa5770b2a0 +02e429f379c3597d8f0b684782f8825e +a4e239ac21a4a3b89b0c695fd924f834 +9ed299c09c50703231142886f955da2a +0b37cdd53e196f0efc2ea63f9d1f08ff +a1c32b2f5fd0de90294b127154659c88 +53052276609692804ce100a7d3276164 +2d96038480794afbc380be2939f89a43 +f9e465be10310febd5500c2109744367 +b90f4d3f2284b0731167729f2f4bd45b +1fd77414cea05a4dea9223e2ec1db3fe +bb19e8879b32a5b3a1d5a5a96765218b +2c1a281ae96f97690e30f0c235a57b29 +01477d6fa24280ae787451071c10da70 +6f7ecf9418ca9294f5da837fb4d44906 +c31fe5d3f6fedd465e8601a5262ebd22 +59ff97d25a3c2f335555c8d39dffb7d9 +d91714c68bc836587af075514a263b4f +6145ee067f4e6e01357196b3da872e1d +5dbb3839c0bfa9a10858e6684d3ba0e1 +af726b931f7b92fbda39af6993f7d0fd +735626f1b21461197a9d47bd597b208c +0b31450c4adf2ad3c5be20240d3edc6f +b7629fceafefb9dab4be2577505b0a50 +6605cc09e22bb347b8f24461c0add60f +78885002add0b3c2c7436c811886ea46 +bd221828159a42572875b985de13db2b +c1b64a367532e0007e915a0dbf906507 +cb51f8fb53397b3f3ab5c2b6579c988b +1dea94f263efd5e8081109d555553a29 +d282eb3f9ed374326edf41d87c4c64b9 +83cbce4ec91f6efe1f7c4c214de4e3d5 +6f6ccf2d1993c21362c93bde18a9db07 +543f3636030a6117837b2bf9abdfb60f +fff70aca8cfda3ec258fab291feddb05 +c9af4dedc2ba0f766ca9ddd119dfe12d +c20c4dc12f26f6bf6980d2062d834c66 +499e878331337533f8998d9db155ebb6 +f511700ade424da2528c16237dd7097a +7ac8d1a8ecf1dd210f8ad1929603fdb1 +1fd5b2dba90176eb472eb688a4517316 +9aeb4594beeeb31f7a381db4b2729f94 +b80f13a66904050de42854f4533925dd +c14e19a22aeb3c0b899cf62d5b8c09f4 +27548701f4c732ac9de4152223d0a9a4 +0a969382d3168c508a028ed05b2ab292 +640315b1a33bdde0cd9f416becda6f54 +33ede77829bc21433257cd5599d61cd4 +929513f0051a19916bd9efb1a7dd4ac3 +f89520b173f4590b0dc13dcc698b53eb +f5eef4cf96813741443a6f337ae360a3 +fbb0138e0675f3416d5464b738af42da +b0828c41fe53c2fd1b47fe94a876b2c7 +66b0fa1f722841fdae0e0039c23d3647 +34a7716fcbc2b58075e8d9636733a734 +c535a7145d59a3bbbec82f7b83a3ce17 +f92ec82f117f89cc5a2fa27cb99ddfc1 +c13202255c72f12c3c8e82a72bf40393 +bb77c22882471a62cc18dcb9543ac4a9 +17b85dbf96f7f2567ac719f07a6c60c9 +70e543d39a390f833415d68f19900bb2 +586d419d1c701038b5feb78e4bdfbb43 +adbbca0a35a0098b3288b2f68a9ff0f3 +d41d8cd98f00b204e9800998ecf8427e +de7f13af32e2690b7b67fe73c6dc5c7e +19109f838eeb3683d6d8aaff72480caa +0da22c3b40f459eac05a440af5554eb8 +91473fbce5854b2a49787df82b0453d7 +5bb8ff1ec352df99f33cbd691d3f0f55 +e239415a8aec7cc6e64462f06063b2c6 +ab5dc18e38b217411415ccf60d6ea32f +837bf1d70b345bd3ca40775aebd71df7 +8cf9a47e9c8985099b5c2ded32ecf12c +7966d22a81b4ff60d4875bf99d62c65a +d057c221170ebd47096229309a4d5976 +28aa3f729e5a40454585d2de35903cd0 +33f52dd293909f99d277bcefac5176e7 +932cbc729f2730b35689810c100295a1 +4cc6a836ed547e64ea4e3bff562b10da +169cde1a00b51d979b108ca6e734fbdd +d5316abf64a87170d042d374ebd90562 +1f79d9915e7bbfca627c31bc47c10dd2 +b6aff79837c8f882a3467e67769c1ce0 +a73169021aa9d2640bc2fba7777c58a0 +75dcc033546de7fe833665abf49166be +520d4bdea0fdb8e4dee12619c144e99e +171989c4dcb9aaa339af718b09ce0646 +0bda18eb62c3c7a74e26cb5d2c31ccc1 +1d5792cd01c31f45dd9951fbbd61ca4b +0056fd5839ae34d3a3f3beb73f26bf6e +1016372feba2841bd652135b58d263e5 +6f50132a860f154ce3915b7b1746b801 +9094e12b641d39bca88384ae20f430db +e811d824c460edaa85b6aaf0806e9a26 +a23ffbc77a00fe754f4527b22e019b02 +d552c609f7324816500feb9aaf407a3f +f2f2acbb375045ca79c81e5a792a09ac +03ae6f00ecbf598b5fb3fd40ac0ff7e6 +c6235c998ce93b21b8b397299763fcb4 +ffe37889a063ff4328477d3c7ede43cc +1cdb53a654aa705204af1ba01dc7759b +d5309ff716f712f2db69056a918b57a4 +c91a254e8796407cb96f81c7a2fa6af6 +2b0c9787eea66e9d289717db28f1210c +ecf9a6b95ae48a0068cdc45c1e1e0363 +5b43def35835e05e1734db8267a69fa1 +d52c5f97a442f628c494e54127a75e2d +a0fddd5b8f0a4de59eb799fdc554842b +713b1e35ee0326e4dc7255698c8e2dbd +d4964149bdea8a8ab5e43e2ed8851d72 +9399244f1695535af187c7a67010a6bb +05d31a308bea07f435db33d3f92b9c17 +62fb48a1f0f59c4c9b91da79bb493ccd +4593abab97282d908baff8a19af0c02f +9a2fb3f5a3e2d85cb636dd6af8f34a2a +68781174612dd3c26addec54747eae0e +b60d7b8aae2b4ff8a7676ec1a037376a +7fe07829a807a464af28169ecc6fa20f +bebdf996d365358ad958ea17d292d942 +5197da010878f604f97875e4c38b2c5f +7f98e68060c9ff75e495faddf95087f2 +9d9192e4389b197654a4dbf99699af0f +090eba97b8736606165f6518cf1fc223 +523de1b9e61ab51918057f3d606cf7ef +635cc1a932cbfe8042174c372c38102d +47b530e7b57dcce328b3766b3655680c +f1bdd25f4c535e9f4d0c15043c77f819 +a233a9504ab4597133e077a651d8fb05 +2579c58bfd6c4bb81ee502f7d9881cc8 +41a0951de8e4bb57314d554e20b9161f +357cbdd3b601d1bb7489405909449ba0 +524f5366a405b9d1fd163565e6fcea71 +e7fc40e6ef3c00a24112a6bc66f6ef30 +6cfa33cea0dec8e7c50d52caf3b2e55b +8c1255cafb61be5bd75d5307482a244d +c0ed3cced9ddaebd4038d364ef8ec9cb +fb3894eb0a6080831eee6df12a561bd6 +1b0836473430b9e3fd232691d15420b0 +b3732d05b2ced8d61ac6081c0e0efe3d +f4af72f8b90658ab279b2620db2ff932 +4625e4d604e7c54df826f1b1414cffee +de575e23a50dacdd2a2c5dec02e3202f +63a515a35d20236fde949ebfb2bf5e76 +43e0b1a6487239a3b641eef7a47b7ac9 +23fe7f8cb9b7fdbdb545f225abef9bc8 +9a0ffdb92ab58fc520e75286b6d66d0c +86975b03876695307ef7832288d50300 +5ecfebe9658ae9bc77570c8118074f95 +be57445eff4012c2e219d27f065dcbd0 +b21947dd12b6ddf9501fe5e1150c65f8 +e97d5fe74489ba3a78033814b6e8178f +42a51aab000c0c3dd65fc092e03c3c1e +f2595ccaac58d972ae68e0038de5b7a8 +b794eab370e7354864994b96c048f0e3 +68a6d74346aa831fe9cd41ea5def3460 +cdc923dbca899d12ea5f62080d61f24c +5a4942c5880639fbba4152b2da648a94 +8e8f373b69d05ee646f66c2f7c3a06d8 +9eced944d84a487b7fa0977a3358a756 +1b662bd1d02432c063f284f0c749d8ea +295b10f74b6428415846d2c6fa8eadff +d69406909a2f50eff6e70c6ad1b1c193 +0bc91b8ad9146f0a90f016801c030d6c +e1c1ac2d7ed1534840f671e8ed913c34 +7f89450df860ac554b3fa68025bf9ec4 +b297c9eace48931adfff6def4b884423 +c5c5e907c0fe5f9090153ff84c8952cc +0700f9660fc0d54e5b6b2e49c3c3f44d +9c2c2fa5889c26232a6fcdf7a3af63e4 +e4272197a54c383b5732c59f67b19de1 +55b24263fc334ac12ecb48f77babbd54 +d41d8cd98f00b204e9800998ecf8427e +d41d8cd98f00b204e9800998ecf8427e +9606f92d555f097007ef2eb357e10572 +b05995c0ce8abcbaa7e74d3438e7b034 +d965d7fceba729515f02b5d084c23be3 +785aad6b0b289f0a83785508def89392 +876694b2ec9b1fe9fe0df1be994c9c94 +83477d8ad8504c2418126a8955e67789 +db0e20047594fa70e957a9f6000d2189 +a748855b5692b1671007689e1783a86d +8714d36bf87cbd04e01748d44f5096d4 +ab99500ca86c654e32ea14ebdb714290 +7b8eaf4528e7b52f804017939f36c784 +754ac6a4480cb4a8209c68eda5dbfceb +b1a3f4b1dc0d850b39d7686b8b8bf3b6 +33e024aa7239b6f6d23dac2e688ce033 +7109236e43e80305e27e24e279500004 +ccf8a25b857c3bcc796a0aab129affa8 +247b4d873606ddf50b39c760b1cb0384 +92fd4d6cf27624455085cead5e7a77dd +8964452ba992d1009ca199bbc4a6ad2e +46e9c25b7e9aca44f1037e65d59186a1 +da8eac5d3e9499544a85c42e114d33ce +be7dcb2b59ddb149950d3912671a55b5 +a0c2a8c84654fcb3ffe8ba857d60710f +c0c5e1e8de02746056ef7f4769ba08de +307ab2b247df7d078b4a8520e54fef82 +9ab45d7906ee32dcb27279b946ff408c +ae487f2632bae7d646f06c912bc31fd3 +fed631dc0f07721a29b9597275c21f79 +f41e053483b02d9744cec8b9be13b420 +5b0f549cc4201174a9cf20ea2daa1b06 +2652e741ba4a325e2b0d0dfb3f470396 +0296687b4eb6d939a9ea54fbd403d2c2 +25eda214a60f0eab8ba7d1f14c114352 +ccd86204cef43be7c80ae150ec61e9b9 +eb532d7babc98cfaa342f3ce8dfb247f +34ce60bef579bcc1cf932f4ef488a894 +a6ceb39d70ddf88aa865bc12976d836c +ef7a0c7ae427a631e52dcdb41ac7d875 +09a03ad2f4f13cd9dc8b36ae446f0789 +390a6a203ecab6596e291899f00690c6 +6ec1f9798ba44b8724bcb863d3f01651 +3da669d8e2e31986827906121b9cc6a0 +258a683e7b3589600f536ac06701a71a +90467697e3f7866efc007263a59b0411 +5792541c66234a80ba8819ef6c6987e3 +c959097f91c1d8f077777d070c0d294a +8d35c2cc9b159e3e5156af08ca8d05c3 +154c68478d2d9cdc6faf927a9be310bd +aaa7d5428eb16185f056b71652d5ad04 +fb6b162a6fc00727bd109b176f0dcca8 +92a09caf08b59fd29a7a6ecda6defe43 +e65caa35fa5ac24d371b82bd8169dd7a +58e41af015b40169f1325b8eea6d6286 +5d6c85096055c44e224b94d28a2f492d +179a2ed672d2aad6c6977e9278187d8f +f98414071fc97514f5e55b1cb0571508 +449d2a7413121c779f397c9ea6d4707c +f315ab6d241b62bf92839c9744dccc76 +b843a207664c36d3c7d512754921f406 +e62ad608f30531831e02a6aa7ff30772 +20e9769bfb62c705598c9e7cb3114fcf +110d76c9076277de61ac0d210e00ea99 +37363223d2947e22d512af1d73caadd7 +a92a3480f9f521705438771d3cf8fe8c +c871575298e5b7c4fa4c179108ff36b0 +b48c48d47a9d1dd0971f1111bfa67843 +c82d5d566bee4894b8253cc3dabddb41 +a30ef38e14ed0508d303217996734c9b +e507b8c66775124c38668714b90e0f43 +27123a2129ccfb6264fefd9cd9b2cbb6 +ccc852c070670f0143cf51c4a5541328 +cb162c2dd562f49af187c05f39e1d6a9 +a7432d7cb59af5ecb9abd7926f603211 +d2bc46c172ff3f81c2440bbcd38dda1f +5760aada9983ab894fdd8f61932b761a +a99c856fd7d804e6471e1a2c85c45c13 +da62441dc5d06c8891798b2b5cfcf665 +c3ea7bdd9e13f8eb4c6d0331f544631e +0fe4823b800e3ae17ba390c4485d53b5 +0fe9e2f0cf97e508c7f624beb6fd9f6d +6cd0f4231972ae0660582db52ef7cc23 +b73f04635f9ec675d3b1edd253209457 +802092a88b94afadbda2e0f59fc884e4 +4fb184c96271dc1bd96f483bf87038ec +4b9f1633d056a13ca0659fcc3654d36b +2285611b7122fc937464c7ef7bef03d8 +d1a31a1894fb21625468269daefed02f +238cdf6829a3f5b8b358b59456c240b2 +52a28cbcd0f0deca50b2a580f5582288 +1c2f8222ee57d66f888b809617701970 +9a7404134c52c3f0d328fdcfc775b730 +81097ec4cd4088bc7ff4199b04600ce7 +9e88cbd4f8648835c9c7642e53fdb0ae +3bacab0c4f7304c3c1fb51e0166e131a +3937c6ac4951900197bf9bc5a649b672 +d78b2820b38bc05700e643914668719a +3ace2800560783e999f3edb53ebd7051 +ef09468a586d6877b5092956860b32fe +f059e7c4752be9ca3e4c5dd2c62f90ec +71aab94a8367756efa53fb507bc92464 +d7cee17688d254862307c0c7ec56c306 +fa284eb1f7e8bf4220acbedbbb8f7889 +32da44c8d0cb96027dfc9ea398c61949 +2bd15d94b269eda71b9661eb9b662140 +52b130fdb1dccb4b2f00d104c8b5367a +97e26b59c73539e180695f03c8baadf5 +72949d382336780d7b0115485e6952e3 +2147b385add11a789dadb9256ee9fa79 +c478aa16e61a71f2b5020fa300065db5 +903b55bd2dbf885fd3f20a8f77599991 +a071e4c1958e6ece5379b432a1e8a8be +d3e95042631a609e44ed78b5e239399f +8e8f01a12abc718e60f1896d0135a674 +6c8e176465adb250c5b828881762deab +0f809d86b3bbf8d78a784963c939e233 +3a3625db0e36ff156eda206423342eaf +82b82639e3f28f261b5ec84f6e5e089d +d79519ab0b9363cf25fba50cde7e93c4 +d1946c27d3cfea2c22e85dd3dda0dd46 +199cbae81a1e67934490162aed0f75e6 +289fbf98706bc2a1e2db76c42729ec92 +5b14e94bb38ab3b30fce6cb68aea4c46 +38b4318954acad766eb2f5b5a1388ba9 +5933a7c35678d0c854808dac5f979b35 +6c5e47b3b013414bfcfa12006902307a +96ffa27de2d0656927e667cd252a143b +2d9077324aac51a74602565b48c1a1e3 +944b8a413921a8cc4e8614f67f218dd4 +d04e38b42164623ee6f2b26455e75166 +d67d14f0deb11de4eebe929f0ef2029f +5bd835292124416ec83fbbd988d504a2 +301c6102f3587d87355c64634ed29031 +1ba1896770c276ee7bbd26a27f9ad8b0 +9cf204f68637ff25333e775058b36ebd +3a1215c548fdbf9b147298029d23d69d +d03d31533295d185cc034e8165c91b40 +17437041817c7ca25d292bcbbff4cd06 +bc03311409b1637609e56f193bb52437 +85b016c2b54a085536948bda1bcc852e +ffdfdf006894cc5badb0b4de82da2d10 +3ca05fec9da59dd32456ea7f16bb89bd +9da14305915fe6d94fa31530bd3ed70c +60d8259a21a1ad4df15dbe1f5fafb116 +a30a535fb327cb6da1aaf8c543ad5b01 +ccaab1ea2c790c900762f06a89da6d19 +2834ed765c6488eeca427a3bea3ebabc +1f86ec033fa171563e7043862b5d8274 +70462fbb753e21e4141312827bd7389b +daa6cc1798551facd3ea6f9455f4692f +3ffba6b103d9942f79318249bcd6703c +8b5d3515c8000ec0175324a504e5afb3 +1c9c25a6a2fc40ad2edb1b0bb7296d50 +71a660785f9d022f5fcb6a7850fb0d28 +d2c5eb8753d00f6eb517ab611260f664 +4b1d0bcbf89ecb56c6a4b0a58aef45ab +4a6c0c83f1797b2fc1b818904249d1ea +f362f9f590b63ae2dc3ac88991676f3f +38f9147e82ca91ea4eb63714319589f0 +9a18842564e162c2bab52f809e47c25b +5160149eade5fe401c309eed798044c3 +67e79ba11114f0b8b33af87cf9f0bafc +bb1e6288cb1d8e4849480b8dd60cfca2 +e6e4435d9bd81d29046a30634209638c +9de0becaf147dfa128adeadd30b03b1e +c2915e53f9ad61256173d207d8937ae5 +f4f97811a94f0d364233123facd4cfa7 +d41d8cd98f00b204e9800998ecf8427e +ac40d05aaca47f430b1d6a11381760e1 +901b02d538aced8d75f8d3fafb8e5643 +e2f42f48e8e63df523372ab682aba70b +0d58b5261336320997909b0541f3963e +4ed01f4da5f374132c3d0ea77b4e83f5 +d7e243c61857ba4c92e90b82f0915b98 +d860acce014bc17e74315000e1537c41 +57a50c74faac05fca307f713df0c8120 +4a585e0c1608d498e252063c4422aa0a +1529d36d3708791cc062b23f83231dfb +526c01896f73d6c7c9d9241f0f2bb1b9 +4eb315eb65968eaf1c1531e85ff9b9f7 +7644b2d857abbe282570c2d959763ad6 +eebf01c25c4ffcae57e223c9ca0e0c6f +ed409b3eed7affd7d0e85eb3569079cc +f0491e25a733f2010f2cd2fcda065046 +bf8a8516c435c27183384cb2437c9218 +b9a3ff562a8f06cdf1e0c4f1255001d4 +7708c2feb87de5b5640caf46b7c91751 +601a4aca0434d8f38fa474e2f61335a4 +5e67c2ec738e6bcba4b8d3365c93070d +81052e29cc1f1960e9c18e1915ef0880 +0e794504e33696cbee146c6bb6dcd6bd +8005555a7f0a11e7b5b2db60f1f3e8e6 +8767b88c7cb4ee0c1e0298367a94e1de +469ce859d4eac923b1565f3a65e13070 +757efbe7709856c0c6c4dd1841049d8a +cf784bd482a4900c4c370eb0e6ef8592 +44ddbd6a1845424b2990aff2ebdcff2e +69a1323c8e30c7a9ff1cf1b92b03a6c4 +090b76538b2fdfa5780af37f0dab018b +f3b37d61806db8bf05962025f42c16c0 +2a9f7b4300aebcc472077e5623cb1b39 +ec267be4d15989c8cf05318618ec860f +fcf751c8b8ccfaddd4bf4404e0fde6fb +eb55024084478f1c80f36836643e108f +67111b2bfba5d398d4d72e54886545ff +ef607b16a2752c3222b6512ccee8db5b +510a0cf7fcce4345c38ac204bd460556 +4a9c93de716d27657ae974b3bfb49e80 +297c227676bdc36c0d3839dc5dccca6c +697fe65d2cefb43a95495a8a1d1d4cef +fb61836a1f95bbc0056c7c9a59376b4a +bac8638095ac13a3f92fc3951031fbaf +af540ad083ecd6049687e7faf7d2d64b +9f0b7192955754ba7dc61296b12aa2a7 +39a01119ebeae58778c84ef3758fb9b2 +f325c5c5aedf65f683f7c47e03ded2a9 +90569b12e82e38adf8f400fb425a4199 +f952ce4eeff1bc17d9ae86010e21eb47 +5c543123387bcabecb5456a7128d8427 +f188698cdf956bad10162533e834fbbf +5471e2da26ec0521990e6d661261df11 +1c35e8c1b3bb6cd97708df65b7cf71f6 +0dc05b06dd3273b9edc09f1eae772288 +a19ac8b12f45f9971ece25df32e7f234 +f4e50395682d5039865626b1a36ef426 +a1c81776747bfedeebb591500055825b +d8ff984b4fe0d83043c3d7df72a73d9d +6bfc3320236a00cdd870cc3a154cf127 +0835ef87884c61b40c245b7d24a3c6c3 +8ed826d700f17d93e4752104c5c7c67f +9fa18db42b69d4f928666b04d7c1e46f +b21e3d7d6ce950303575ee8281f2e33f +609aa1579363fadbd9c997362a77a9af +b8cd846d6314a157941715f3418374db +9a8996869bfeaa41b5032a315624c399 +e1a1b884e57abacef28631dde6f9980c +f562ba071a3789c3a45bc8a67e229dae +724a46c9276049dab3d4308768a9d128 +8744e2d11e699a37360128ab106d23dd +c02191e1b99eb5993d7009ac89aed319 +191f97833ddfa2154865bd9a612ab858 +2665237823701a7a91f0b56b4f8d4eeb +ca1d149955260cf30375ec5b6fd0bb7d +1aa9f95d49a62b6f765f94f0a316ef8f +37b3af472dae58ee9fc810c6e9c6dd24 +1ce2bb1489f6f3d885af1dee6a0e6583 +9a358bb905499040cb2e472fa56ed99e +6bf2435d50bfb89abd29e9906d682bec +c0350541b80011142995712db7fce328 +f49cddd7221862aaa400af782c04d5e3 +08a0d0f322a6cf76ddb362bacd98b5a3 +d271843b455653e59dfa14e2d9cadfb6 +839c81f7b9d78941741aaf34d340bd3e +113c25f46e52c909f547cf932a49e2bf +d333223b24397e4ee08540ebedf8833f +de64f5c11c651eaf91005ea9926a0d96 +155add75bc9193afa2e9640914850da3 +3d5be6fda23de7922c927cd9fed6e5f9 +258c4898afc1031e814f87dd896c042f +5c813b2a04d0e7cd4f7a565e73079df0 +72e993ca6f3ed1d74c665f5115c242ff +d41d8cd98f00b204e9800998ecf8427e +8b9f265c101ef2add4059a342224f8c4 +6fdacf56f06aadfae0d15b6738d56a73 +79139f3d255378bc99e1582121f50997 +c5045450c8490f80146d515d54094776 +64be8f8c8780557f146e1dc2d2173876 +a6fc64959bd7c8ac36ce953539c44ff6 +9d31616df696a928ef6798029e982acb +82e4b351b0273e024c0863eca59fe1c9 +a7049d2f9156db6224bbe4b0bc0c28a6 +fad9fd5a171cb03ab5dc60aa3464bc76 +c9bf34ad489902f1174dd8d3d5f54e19 +7f7588c5f16f2567dbca798e8e46af6e +0017c2ecfcc9e03326bd56f6ae0db6c5 +3e56a9eb19203208405d2541cf5b200e +3882b53832d62d5097f13441e58cd68c +0fa91f4c38ea4b1ab2bbf748401ab7b4 +2ed1e1d4237f44ad3721b2643ec8f6fc +8e4d5c1025781c0c78be3f54d4b62fb9 +531c8adb5c469cb4ffd50e8e64af3255 +cae3fe00abdbcf66f3785ceae8d636b4 +fe2a5761460503efdcb123875052a19c +2354121b94eaf74da5b3507d20c7ab51 +90a62b98314aaa2d3f0ff52b2e7930c5 +cb509edc432a2c8596d7d708b6e9062d +5fad5f595d1cb63430ea7b76c1ebee4c +b656cba1999f7759b55d2ba926b4ffa8 +b5656fa0bd9f9d5d5bc110dab997e333 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval_varlength.chk b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval_varlength.chk new file mode 100644 index 000000000..e9eb66348 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/eval_varlength.chk @@ -0,0 +1 @@ +part_eval_10k.hdf5 611d8bae26646145e1c33338a27ba124 diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/hdf5_md5.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/hdf5_md5.py new file mode 100644 index 000000000..66345add3 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/hdf5_md5.py @@ -0,0 +1,29 @@ +import argparse +import h5py +import numpy as np +import hashlib +import os + +# Exmaple usage: +# python3 tfrecord_md5sum.py --input_tfrecord=eval_10k --output_md5sum=eval_shard.md5 + +parser = argparse.ArgumentParser( + description="HDF5 variable length to MD5sums for BERT.") +parser.add_argument( + '--input_hdf5', + type=str, + required=True, + help='Input tfrecord path') +args = parser.parse_args() + + +if __name__ == '__main__': + + h = hashlib.md5 + +row_sums=[] +f = h5py.File(args.input_hdf5, 'r') +for i in range(f['input_ids'].shape[0]): + row_sums.append(h(str(f['input_ids'][i].tolist()).encode('utf-8')).hexdigest()) +f.close() +print("{}\t{}".format(os.path.basename(args.input_hdf5), h(str(row_sums).encode('utf-8')).hexdigest())) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/parallel_create_hdf5.sh b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/parallel_create_hdf5.sh new file mode 100644 index 000000000..0a59c447c --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/parallel_create_hdf5.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CPUS=$( ls -d /sys/devices/system/cpu/cpu[[:digit:]]* | wc -w ) +CPUS=$((CPUS / 2)) +echo "Using ${CPUS} CPU cores" + +function usage() +{ + cat << HEREDOC + + Usage: $progname [-i|--inputdir PATH -o|--outputdir PATH -v|--vocab VOCAB-PATH] [-h|--help TIME_STR] + + optional arguments: + -h, --help show this help message and exit + -o, --outputdir PATH pass in a localization of resulting dataset + -i, --inputdir PATH pass in a localization of resulting hdf5 files + -v, --vocab PATH pass in exact path to vocabulary file + +HEREDOC +} + + +#parse passed arguments +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -h|--help) + usage + exit 0 + ;; + -o|--outputdir) + OUTPUTDIR="$2" + shift # past argument + shift # past value + ;; + -i|--inputdir) + INPUTDIR="$2" + shift + shift + ;; + -v|--vocab) + VOCAB="$2" + shift + shift + ;; + *) # unknown option + usage + exit 1 + ;; + esac +done + +# get script reference directory +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + + +mkdir -p ${OUTPUTDIR} +find -L ${INPUTDIR} -name "part-00*" | xargs --max-args=1 --max-procs=${CPUS} -I{} bash ${SCRIPT_DIR}/create_pretraining_data_wrapper.sh {} ${OUTPUTDIR} ${VOCAB} + +### If continue, you can try instead of line above something like line below to pick only the files not yet computed +# comm -3 <(ls -1 ${INPUTDIR}/) <(ls -1 ${OUTPUTDIR} | sed 's/\.hdf5$//') | grep -e "^part" | xargs --max-args=1 --max-procs=${CPUS} -I{} ${SCRIPT_DIR}/create_pretraining_data_wrapper.sh ${INPUTDIR}/{} ${OUTPUTDIR} ${VOCAB} + diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples.py new file mode 100644 index 000000000..b4cab2ed2 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples.py @@ -0,0 +1,83 @@ +"""Script for picking certain number of samples. +""" + +import argparse +import time +import logging +import collections +import h5py +import numpy as np + +parser = argparse.ArgumentParser( + description="Eval sample picker for BERT.") +parser.add_argument( + '--input_hdf5_file', + type=str, + default='', + help='Input hdf5_file path') +parser.add_argument( + '--output_hdf5_file', + type=str, + default='', + help='Output hdf5_file path') +parser.add_argument( + '--num_examples_to_pick', + type=int, + default=10000, + help='Number of examples to pick') +parser.add_argument( + '--max_seq_length', + type=int, + default=512, + help='The maximum number of tokens within a sequence.') +parser.add_argument( + '--max_predictions_per_seq', + type=int, + default=76, + help='The maximum number of predictions within a sequence.') +args = parser.parse_args() + +max_seq_length = args.max_seq_length +max_predictions_per_seq = args.max_predictions_per_seq +logging.basicConfig(level=logging.INFO) + +if __name__ == '__main__': + tic = time.time() + h5_ifile = h5py.File(args.input_hdf5_file, 'r') + num_examples = h5_ifile.get('next_sentence_labels').shape[0] + + input_ids = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int16") + input_mask = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int8") + segment_ids = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int8") + masked_lm_positions = np.zeros([args.num_examples_to_pick, max_predictions_per_seq], dtype="int16") + masked_lm_ids = np.zeros([args.num_examples_to_pick, max_predictions_per_seq], dtype="int16") + next_sentence_labels = np.zeros(args.num_examples_to_pick, dtype="int8") + +# hdf5_compression_method = "gzip" + hdf5_compression_method = None + i = 0 + pick_ratio = num_examples / args.num_examples_to_pick + num_examples_picked = 0 + for i in range(args.num_examples_to_pick): + idx = int(i * pick_ratio) + input_ids[i,:] = h5_ifile['input_ids'][idx,:] + input_mask[i,:] = h5_ifile['input_mask'][idx,:] + segment_ids[i,:] = h5_ifile['segment_ids'][idx,:] + masked_lm_positions[i,:] = h5_ifile['masked_lm_positions'][idx,:] + masked_lm_ids[i,:] = h5_ifile['masked_lm_ids'][idx,:] + next_sentence_labels[i] = h5_ifile['next_sentence_labels'][idx] + num_examples_picked += 1 + + h5_writer = h5py.File(args.output_hdf5_file+".hdf5", 'w') + h5_writer.create_dataset('input_ids', data=input_ids, dtype='i2', compression=hdf5_compression_method) + h5_writer.create_dataset('input_mask', data=input_mask, dtype='i1', compression=hdf5_compression_method) + h5_writer.create_dataset('segment_ids', data=segment_ids, dtype='i1', compression=hdf5_compression_method) + h5_writer.create_dataset('masked_lm_positions', data=masked_lm_positions, dtype='i2', compression=hdf5_compression_method) + h5_writer.create_dataset('masked_lm_ids', data=masked_lm_ids, dtype='i2', compression=hdf5_compression_method) + h5_writer.create_dataset('next_sentence_labels', data=next_sentence_labels, dtype='i1', compression=hdf5_compression_method) + h5_writer.flush() + h5_writer.close() + + toc = time.time() + logging.info("Picked %d examples out of %d samples in %.2f sec", + args.num_examples_to_pick, num_examples, toc - tic) diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples_varlength.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples_varlength.py new file mode 100644 index 000000000..380f101b0 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/pick_eval_samples_varlength.py @@ -0,0 +1,76 @@ +"""Script for picking certain number of samples. +""" + +import argparse +import time +import logging +import collections +import h5py +import numpy as np + +parser = argparse.ArgumentParser( + description="Eval sample picker for BERT.") +parser.add_argument( + '--input_hdf5_file', + type=str, + default='', + help='Input hdf5_file path') +parser.add_argument( + '--output_hdf5_file', + type=str, + default='', + help='Output hdf5_file path') +parser.add_argument( + '--num_examples_to_pick', + type=int, + default=10000, + help='Number of examples to pick') +parser.add_argument( + '--max_seq_length', + type=int, + default=512, + help='The maximum number of tokens within a sequence.') +parser.add_argument( + '--max_predictions_per_seq', + type=int, + default=76, + help='The maximum number of predictions within a sequence.') +args = parser.parse_args() + +max_seq_length = args.max_seq_length +max_predictions_per_seq = args.max_predictions_per_seq +logging.basicConfig(level=logging.INFO) + +if __name__ == '__main__': + tic = time.time() + h5_ifile = h5py.File(args.input_hdf5_file, 'r') + num_examples = h5_ifile.get('next_sentence_labels').shape[0] + +# hdf5_compression_method = "gzip" + hdf5_compression_method = None + + h5_writer = h5py.File(args.output_hdf5_file+".hdf5", 'w') + input_ids = h5_writer.create_dataset('input_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + segment_ids = h5_writer.create_dataset('segment_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int8')), compression=hdf5_compression_method) + masked_lm_positions = h5_writer.create_dataset('masked_lm_positions', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + masked_lm_ids = h5_writer.create_dataset('masked_lm_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method) + next_sentence_labels = h5_writer.create_dataset('next_sentence_labels', data=np.zeros(args.num_examples_to_pick, dtype="int8"), dtype='i1', compression=hdf5_compression_method) + + i = 0 + pick_ratio = num_examples / args.num_examples_to_pick + num_examples_picked = 0 + for i in range(args.num_examples_to_pick): + idx = int(i * pick_ratio) + input_ids[i] = h5_ifile['input_ids'][idx, :sum(h5_ifile['input_mask'][idx])] + segment_ids[i] = h5_ifile['segment_ids'][idx, :sum(h5_ifile['input_mask'][idx])] + masked_lm_positions[i] = h5_ifile['masked_lm_positions'][idx, :sum(h5_ifile['masked_lm_positions'][idx]!=0)] + masked_lm_ids[i] = h5_ifile['masked_lm_ids'][idx, :sum(h5_ifile['masked_lm_positions'][idx]!=0)] + next_sentence_labels[i] = h5_ifile['next_sentence_labels'][idx] + num_examples_picked += 1 + + h5_writer.flush() + h5_writer.close() + + toc = time.time() + logging.info("Picked %d examples out of %d samples in %.2f sec", + args.num_examples_to_pick, num_examples, toc - tic) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/prepare_data.sh b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/prepare_data.sh new file mode 100644 index 000000000..e88ad4d03 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/prepare_data.sh @@ -0,0 +1,167 @@ +#!/bin/bash + +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage() +{ + cat << HEREDOC + + Usage: $progname [-o|--outputdir PATH] [-h|--help TIME_STR] + + optional arguments: + -h, --help show this help message and exit + -o, --outputdir PATH pass in a localization of resulting dataset + -s, --skip-download skip downloading raw files from GDrive (assuming it already has been done) + -p, --shards number of resulting shards. For small scales (less than 256 nodes) use 2048. For sacles >256 4320 is recommended (default 4320) + +HEREDOC +} + +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + +#if no arguments passed +DATADIR=/workspace/bert_data +SKIP=0 +SHARDS=4320 + +#parse passed arguments +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -h|--help) + usage + exit 0 + ;; + -o|--outputdir) + DATADIR="$2" + shift # past argument + shift # past value + ;; + -p|--shards) + SHARDS="$2" + shift # past argument + shift # past value + ;; + -s|--skip-download) + SKIP=1 + shift + ;; + *) # unknown option + usage + exit 1 + ;; + esac +done + + +echo "Preparing Mlperf BERT dataset in ${DATADIR}" +mkdir -p ${DATADIR} + +if (( SKIP==0 )) ; then + + mkdir -p ${DATADIR}/phase1 && cd ${DATADIR}/phase1 + ### Download + # bert_config.json + gdown https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW + # vocab.txt + gdown https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1 + + ### Download dataset + mkdir -p ${DATADIR}/download && cd ${DATADIR}/download + # md5 sums + gdown https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_ + # processed chunks + gdown https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_ + # unpack results and verify md5sums + tar -xzf results_text.tar.gz && (cd results4 && md5sum --check ../bert_reference_results_text_md5.txt) + + + ### Download TF1 checkpoint + mkdir -p ${DATADIR}/phase1 && cd ${DATADIR}/phase1 + # model.ckpt-28252.data-00000-of-00001 + gdown https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_ + # model.ckpt-28252.index + gdown https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0 + # model.ckpt-28252.meta + gdown https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv + + cd ${DATADIR} + +fi + +### Create HDF5 files for training +mkdir -p ${DATADIR}/hdf5/training +bash ${SCRIPT_DIR}/parallel_create_hdf5.sh -i ${DATADIR}/download/results4 -o ${DATADIR}/hdf5/training -v ${DATADIR}/phase1/vocab.txt + +### Chop HDF5 files into chunks +python3 ${SCRIPT_DIR}/chop_hdf5_files.py \ + --num_shards ${SHARDS} \ + --input_hdf5_dir ${DATADIR}/hdf5/training \ + --output_hdf5_dir ${DATADIR}/hdf5/training-${SHARDS} + +### Convert fixed length to variable length format +mkdir -p ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_varlength +CPUS=$( ls -d /sys/devices/system/cpu/cpu[[:digit:]]* | wc -w ) +CPUS=$((CPUS / 2)) +ls -1 ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_uncompressed | \ + xargs --max-args=1 --max-procs=${CPUS} -I{} python3 ${SCRIPT_DIR}/convert_fixed2variable.py \ + --input_hdf5_file ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_uncompressed/{} \ + --output_hdf5_file ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_varlength/{} + +### Create full HDF5 files for evaluation +mkdir -p ${DATADIR}/hdf5/eval +python3 ${SCRIPT_DIR}/create_pretraining_data.py \ + --input_file=${DATADIR}/download/results4/eval.txt \ + --output_file=${DATADIR}/hdf5/eval/eval_all \ + --vocab_file=${DATADIR}/phase1/vocab.txt \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10 + +### pick 10k samples for evaluation +python3 ${SCRIPT_DIR}/pick_eval_samples.py \ + --input_hdf5_file=${DATADIR}/hdf5/eval/eval_all.hdf5 \ + --output_hdf5_file=${DATADIR}/hdf5/eval/part_eval_10k \ + --num_examples_to_pick=10000 + +### Convert fixed length to variable length format +mkdir -p ${DATADIR}/hdf5/eval_varlength +python3 ${SCRIPT_DIR}/convert_fixed2variable.py --input_hdf5_file ${DATADIR}/hdf5/eval/part_eval_10k.hdf5 \ + --output_hdf5_file ${DATADIR}/hdf5/eval_varlength/part_eval_10k.hdf5 + +### Convert Tensorflow checkpoint to Pytorch one +python3 ${SCRIPT_DIR}/convert_tf_checkpoint.py \ + --tf_checkpoint ${DATADIR}/phase1/model.ckpt-28252 \ + --bert_config_path ${DATADIR}/phase1/bert_config.json \ + --output_checkpoint ${DATADIR}/phase1/model.ckpt-28252.pt + + +### +ln -s ${DATADIR}/phase1/model.ckpt-28252.pt ${DATADIR}/model.ckpt-28252.pt +ln -s ${DATADIR}/phase1/bert_config.json ${DATADIR}/bert_config.json +mkdir ${DATADIR}/eval_set_uncompressed +ln -s ${DATADIR}/hdf5/eval/part_eval_10k.hdf5 ${DATADIR}/eval_set_uncompressed/ +ln -s ${DATADIR}/hdf5/training-2048/hdf5_2048_shards_uncompressed ${DATADIR}/2048_shards_uncompressed + +### Example of how to generate checksums to verify correctness of the process +# for i in `seq -w 0000 04319`; do +# python ${SCRIPT_DIR}/hdf5_md5.py \ +# --input_hdf5 ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_varlength/part_${i}_of_04320.hdf5 +# done | tee 4320_shards_varlength.chk \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/process_wiki.sh b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/process_wiki.sh new file mode 100644 index 000000000..c2b7da9c4 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/process_wiki.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# invocation script to cleanup the wiki dataset +# Usage: ./process_wiki.sh +# example: ./process_wiki.sh 'sample_data/wiki_??' +# The resulted files will be placed in ./results + +inputs=$1 + +pip install nltk + +# Remove doc tag and title +# python ./cleanup_file.py --data=$inputs --output_suffix='.1' + +# Further clean up files +# for f in ${inputs}; do +# ./clean.sh ${f}.1 ${f}.2 +# done + +# Sentence segmentation +# python ./do_sentence_segmentation.py --data=$inputs --input_suffix='.2' --output_suffix='.3' + +mkdir -p ./results + +# Train/Eval seperation +python ./seperate_test_set.py --data=$inputs --input_suffix='.3' --output_suffix='.4' --num_test_articles=10000 --test_output='./results/eval' + +## Choose file size method or number of packages by uncommenting only one of the following do_gather options +# Gather into fixed size packages +python ./do_gather.py --data=$inputs --input_suffix='.4' --block_size=26.92 --out_dir='./results' + +# Gather into fixed number of packages +#NUM_PACKAGES=512 +#python ./do_gather.py --data=$inputs --input_suffix='.3' --num_outputs=$NUM_PACKAGES --out_dir='./results' diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/seperate_test_set.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/seperate_test_set.py new file mode 100644 index 000000000..eb4192ee0 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/seperate_test_set.py @@ -0,0 +1,120 @@ +"""Script for seperating training and test sets. +""" +import argparse +import glob +import io +import logging +import multiprocessing +import os +import time +import random +import hashlib + +parser = argparse.ArgumentParser( + description='Training and test sets seperator for BERT.') +parser.add_argument( + '--data', + type=str, + default='./*/*.compact', + help='Input files. Default is "./*/*.compact"') +parser.add_argument( + '--input_suffix', + type=str, + default='.3', + help='Suffix for input files. Default is ".3"') +parser.add_argument( + '--output_suffix', + type=str, + default='.4', + help='Suffix for output training files. Default is ".4"') +parser.add_argument( + '--nworker', + type=int, + default=72, + help='Number of workers for parallel processing.') +parser.add_argument( + '--seed', + type=int, + default=12345, + help='Seed for randomization. Default is 12345.') +parser.add_argument( + '--num_test_articles', + type=int, + default=10000, + help='Number of articals withheld in test set. Default is 10k.') +parser.add_argument( + '--test_output', + type=str, + default='./results/eval', + help='Postfix for test set output. txt and md5 extensions will be added.') +args = parser.parse_args() + +# arguments +input_files = sorted(glob.glob(os.path.expanduser(args.data))) +num_files = len(input_files) +num_workers = args.nworker +logging.basicConfig(level=logging.INFO) +logging.info('Number of input files to process = %d', num_files) +# test_articles_in_files = [[] for _ in range(num_files)] + +def process_one_file(file_id): + """Seperating train and eval data, for one file.""" + one_input = input_files[file_id] + input_filename = one_input + args.input_suffix + output_filename = one_input + args.output_suffix + num_articles = 0 + num_tests = int((file_id+1) * args.num_test_articles * 1.0 / num_files) \ + - int(file_id * args.num_test_articles * 1.0 / num_files) + file_seed = args.seed + file_id * 13 + rng = random.Random(file_seed) + test_articles = [] + + with io.open(input_filename, 'r', encoding='utf-8', newline='\n') as fin: + with io.open(output_filename, 'w', encoding='utf-8', newline='\n') as fout: + lines = fin.read() + articles = lines.split('\n\n') + num_articles = len(articles) + test_article_ids = [] + while len(test_article_ids) < num_tests: + new_id = int(rng.random() * num_articles) + if new_id in test_article_ids: + continue + test_article_ids.append(new_id) + + for i in range(num_articles): + article = articles[i] + if i in test_article_ids: + # test_articles_in_files[file_id].append(article) + test_articles.append(article) + else: + fout.write(article) + fout.write('\n\n') + + logging.info('Processed %s => %s, %d of %d articals picked into test set. %s', + input_filename, output_filename, num_tests, num_articles, + test_article_ids) + return test_articles + + +if __name__ == '__main__': + tic = time.time() + p = multiprocessing.Pool(num_workers) + file_ids = range(num_files) + test_articles_in_files = p.map(process_one_file, file_ids) + toc = time.time() + logging.info('Processed %s (%d files) in %.2f sec', + args.data, num_files, toc - tic) + + output_filename = args.test_output + '.txt' + hash_filename = args.test_output + '.md5' + with io.open(output_filename, 'w', encoding='utf-8', newline='\n') as fout: + with io.open(hash_filename, 'w', encoding='utf-8', newline='\n') as hashout: + for f in test_articles_in_files: + for article in f: + fout.write(article) + fout.write('\n\n') + + article_hash = hashlib.md5(article.rstrip().encode('utf-8')).hexdigest() + hashout.write(article_hash) + hashout.write('\n') + diff --git a/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/tokenization.py b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/tokenization.py new file mode 100644 index 000000000..4beb5b35c --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/data_preprocessing/tokenization.py @@ -0,0 +1,413 @@ +"""Tokenization classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import unicodedata + +from absl import flags +import six +import tensorflow.compat.v1 as tf + +FLAGS = flags.FLAGS + +flags.DEFINE_bool( + "preserve_unused_tokens", False, + "If True, Wordpiece tokenization will not be applied to words in the vocab." +) + +_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$") + + +def preserve_token(token, vocab): + """Returns True if the token should forgo tokenization and be preserved.""" + if not FLAGS.preserve_unused_tokens: + return False + if token not in vocab: + return False + return bool(_UNUSED_TOKEN_RE.search(token)) + + +def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): + """Checks whether the casing config is consistent with the checkpoint name.""" + + # The casing has to be passed in by the user and there is no explicit check + # as to whether it matches the checkpoint. The casing information probably + # should have been stored in the bert_config.json file, but it's not, so + # we have to heuristically detect it to validate. + + if not init_checkpoint: + return + + m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) + if m is None: + return + + model_name = m.group(1) + + lower_models = [ + "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", + "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" + ] + + cased_models = [ + "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", + "multi_cased_L-12_H-768_A-12" + ] + + is_bad_config = False + if model_name in lower_models and not do_lower_case: + is_bad_config = True + actual_flag = "False" + case_name = "lowercased" + opposite_flag = "True" + + if model_name in cased_models and do_lower_case: + is_bad_config = True + actual_flag = "True" + case_name = "cased" + opposite_flag = "False" + + if is_bad_config: + raise ValueError( + "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " + "However, `%s` seems to be a %s model, so you " + "should pass in `--do_lower_case=%s` so that the fine-tuning matches " + "how the model was pre-training. If this error is wrong, please " + "just comment out this check." % (actual_flag, init_checkpoint, + model_name, case_name, opposite_flag)) + + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode("utf-8") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with tf.gfile.GFile(vocab_file, "r") as reader: + while True: + token = convert_to_unicode(reader.readline()) + if not token: + break + token = token.strip() + if token not in vocab: + vocab[token] = len(vocab) + return vocab + + +def convert_by_vocab(vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + +def convert_tokens_to_ids(vocab, tokens): + return convert_by_vocab(vocab, tokens) + + +def convert_ids_to_tokens(inv_vocab, ids): + return convert_by_vocab(inv_vocab, ids) + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class FullTokenizer(object): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True): + self.vocab = load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, vocab=self.vocab) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + if preserve_token(token, self.vocab): + split_tokens.append(token) + continue + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_tokens_to_ids(self, tokens): + return convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return convert_by_vocab(self.inv_vocab, ids) + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, do_lower_case=True, vocab=tuple()): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + vocab: A container of tokens to not mutate during tokenization. + """ + self.do_lower_case = do_lower_case + self.vocab = vocab + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = convert_to_unicode(text) + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if preserve_token(token, self.vocab): + split_tokens.append(token) + continue + if self.do_lower_case: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenziation.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer. + + Returns: + A list of wordpiece tokens. + """ + + text = convert_to_unicode(text) + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically control characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat in ("Cc", "Cf"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/nlp/language_model/bert_sample/pytorch/base/dataloaders/__init__.py b/nlp/language_model/bert_sample/pytorch/base/dataloaders/__init__.py new file mode 100644 index 000000000..7c351dd26 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/dataloaders/__init__.py @@ -0,0 +1,3 @@ +from .dataset import PretrainingDataset +from .dataloader import create_train_dataloader, create_eval_dataloader +from .dataloader import WorkerInitializer \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataloader.py b/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataloader.py new file mode 100644 index 000000000..0331b50d5 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataloader.py @@ -0,0 +1,235 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import math +import os +import random +from concurrent.futures import ProcessPoolExecutor, Future +from typing import Union, Tuple, Any, List + +import numpy as np +import torch +import torch.distributed as dist +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, ConcatDataset +from torch.utils.data.distributed import DistributedSampler + +import utils +from .dataset import PretrainingDataset + + +def get_sampler(dataset, sampler_type): + return dict( + random=RandomSampler, + sequential=SequentialSampler, + distributed=DistributedSampler + )[sampler_type.lower()](dataset) + + +class WorkerInitializer(object): + + _instance = None + + def __init__(self, seed): + self.seed = seed + + def __call__(self, idx): + np.random.seed(seed=self.seed + idx) + random.seed(self.seed + idx) + + @classmethod + def default(cls, seed=0): + if cls._instance is None: + cls._instance = cls(seed) + return cls._instance + + +# sampler: Random | Sequential | Distributed +def create_train_dataloader( + dataset, + batch_size, + worker_init_fn: WorkerInitializer=None, + sampler_type='Random', + pin_memory=True +): + if worker_init_fn is None: + worker_init_fn = WorkerInitializer.default() + sampler = get_sampler(dataset, sampler_type) + dataloader = DataLoader( + dataset, + sampler=sampler, + batch_size=batch_size, + num_workers=0 if batch_size <= 8 else 4, + worker_init_fn=worker_init_fn, + pin_memory=pin_memory, + ) + + return dataloader + + +def create_eval_dataloader(eval_dir, eval_batch_size, max_predictions_per_seq, num_eval_examples, worker_init_fn): + eval_data = [] + for eval_file in sorted(os.listdir(eval_dir)): + eval_file_path = os.path.join(eval_dir, eval_file) + if os.path.isfile(eval_file_path) and 'part' in eval_file_path: + eval_data.extend(PretrainingDataset(eval_file_path, max_pred_length=max_predictions_per_seq)) + if len(eval_data) > num_eval_examples: + eval_data = eval_data[:num_eval_examples] + break + + if torch.distributed.is_initialized(): + chunk_size = num_eval_examples // torch.distributed.get_world_size() + eval_sampler = DistributedSampler(eval_data, shuffle=False) + else: + chunk_size = num_eval_examples + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=eval_batch_size, + num_workers=0 if min(chunk_size, eval_batch_size) <= 10 else 4, + worker_init_fn=worker_init_fn, pin_memory=True) + return eval_dataloader + + +class PretrainingDataloaders: + + def __init__(self, train_dir: str, + max_predictions_per_seq: int, + batch_size: int=2, + shuffle: bool=True, + seed: Union[int, list]=0, + num_replicas: int=None, + rank: int=None, + num_files_per_iter: int=1, + worker_init: WorkerInitializer=None, + pin_memory: bool=True, + pool: ProcessPoolExecutor=None): + self.train_dir = train_dir + self.max_predictions_per_seq = max_predictions_per_seq + self.batch_size = batch_size + self.shuffle = shuffle + self.seed = seed + self.num_files_per_iter = num_files_per_iter + self.worker_init = worker_init + self.pin_memory = pin_memory + + self.files = self.get_files() + self.num_files = len(self.files) + + if num_replicas is None: + if dist.is_initialized(): + num_replicas = dist.get_world_size() + else: + num_replicas = 1 + self.num_replicas = num_replicas + + if rank is None: + rank = utils.get_rank() + self.rank = rank + + self.num_files_per_replica = int(math.ceil(self.num_files / self.num_replicas)) + self.total_files = self.num_files_per_replica * self.num_replicas + + self.files_per_replica: List[str] = None + + # Prefetch dataloader + self.pool = pool + self.prefetched_dataloader_future: Future = None + + self.sub_dataloader: DataLoader = None + + def get_seed(self, epoch=0): + if isinstance(self.seed, (tuple, list)): + # print("self.seed len: ", len(self.seed)) + # print("epoch: ", epoch) + # length = len(self.seed) + # return self.seed[epoch % length] + return self.seed[epoch] + + return self.seed + epoch + + def get_files(self): + join = os.path.join + files = [join(self.train_dir, f) for f in os.listdir(self.train_dir) if + os.path.isfile(join(self.train_dir, f)) and 'part' in f] + + files.sort() + + return files + + def set_epoch(self, epoch): + if self.shuffle: + random.Random(self.get_seed(epoch)).shuffle(self.files) + + files_per_replica = self.files[self.rank: self.total_files: self.num_replicas] + padding_size = self.num_files_per_replica - len(files_per_replica) + if padding_size > 0: + files_per_replica = files_per_replica + self.files[: padding_size] + self.files_per_replica = files_per_replica + + @staticmethod + def next_dataloader(idx: int, max_predictions_per_seq: int, + files_per_replica: List, num_files_per_iter: int, + batch_size: int, shuffle: bool, + worker_init: WorkerInitializer, pin_memory: bool): + # print("files_per_replica = ", files_per_replica) + # print(idx, num_files_per_iter) + files_per_iter = files_per_replica[idx * num_files_per_iter: (idx + 1) * num_files_per_iter] + datasets = [] + for file in files_per_iter: + datasets.append(PretrainingDataset(file, max_predictions_per_seq)) + + datasets = ConcatDataset(datasets) + sampler_type = "Random" if shuffle else "Sequential" + return create_train_dataloader( + datasets, batch_size, worker_init, + sampler_type=sampler_type, pin_memory=pin_memory + ) + + def iter_batchs(self) -> Tuple[int, int, Any]: + for dataloader_idx, sub_dataloader in enumerate(self): + for batch_idx, batch in enumerate(sub_dataloader): + yield dataloader_idx, batch_idx, batch + + def __iter__(self): + self._next_index = 0 + self._num_iters = int(math.ceil(self.num_files_per_replica / self.num_files_per_iter)) + return self + + def __next__(self) -> DataLoader: + if self._next_index < self._num_iters: + next_dataloader_args = dict( + max_predictions_per_seq=self.max_predictions_per_seq, + files_per_replica=self.files_per_replica, + num_files_per_iter=self.num_files_per_iter, + batch_size=self.batch_size, + shuffle=self.shuffle, + worker_init=self.worker_init, + pin_memory=self.pin_memory + ) + if (self._next_index + 1) < len(self.files_per_replica): + self.prefetch_dataloader(self._next_index + 1, **next_dataloader_args) + if self._next_index == 0 or self.pool is None: + data = self.next_dataloader( + idx=self._next_index, + **next_dataloader_args + ) + else: + data = self.prefetched_dataloader_future.result() + self._next_index += 1 + self.sub_dataloader = data + return data + else: + raise StopIteration() + + def prefetch_dataloader(self, idx, *args, **kwargs): + if self.pool is not None: + self.prefetched_dataloader_future = self.pool.submit( + self.next_dataloader, + idx = idx, *args, **kwargs + ) + diff --git a/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataset.py b/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataset.py new file mode 100644 index 000000000..a9b52f8e2 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/dataloaders/dataset.py @@ -0,0 +1,159 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import random + +import h5py +import numpy as np +import os + +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch.utils.data import Dataset + + +class PretrainingDataset(Dataset): + def __init__(self, input_file, max_pred_length): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids', + 'next_sentence_labels'] + self.inputs = [np.asarray(f[key][:]) for key in keys] + f.close() + + def __len__(self): + 'Denotes the total number of samples' + return len(self.inputs[0]) + + def __getitem__(self, index): + [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [ + torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy( + np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)] + + masked_lm_labels = torch.zeros(input_ids.shape, dtype=torch.long) + index = self.max_pred_length + masked_token_count = torch.count_nonzero(masked_lm_positions) + if masked_token_count != 0: + index = masked_token_count + masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] + + return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] + + +def exchange_padding_fast(device, max_batch_size, input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels): + torch.cuda.nvtx.range_push('exchangepadding') + pad_size = max_batch_size - input_ids.shape[0] + if pad_size > 0: + input_ids = F.pad(input_ids, (0, 0, 0, pad_size)) + segment_ids = F.pad(segment_ids, (0, 0, 0, pad_size)) + input_mask = F.pad(input_mask, (0, 0, 0, pad_size)) + masked_lm_labels = F.pad(masked_lm_labels, (0, 0, 0, pad_size)) + next_sentence_labels = F.pad(next_sentence_labels, (0, pad_size)) + ngpus = torch.distributed.get_world_size() + nseqs = input_mask.shape[0] + ntokensperseq = input_mask.shape[1] + igpu = torch.distributed.get_rank() + + flattened_length_seq = nseqs * ntokensperseq + flattened_length_nsp = nseqs + + def get_local_packet_size(): + return 4 * flattened_length_seq + flattened_length_nsp + + # Storing tensors in same order as arguments + def encode_packet(input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels): + + packet = torch.zeros([get_local_packet_size()], device=device, dtype=torch.int16) + + curr_pos = 0 + + packet[curr_pos:curr_pos + flattened_length_seq] = input_ids.view(-1)[:] + curr_pos += flattened_length_seq + + packet[curr_pos:curr_pos + flattened_length_seq] = segment_ids.view(-1)[:] + curr_pos += flattened_length_seq + + packet[curr_pos:curr_pos + flattened_length_seq] = input_mask.view(-1)[:] + curr_pos += flattened_length_seq + + packet[curr_pos:curr_pos + flattened_length_seq] = masked_lm_labels.view(-1)[:] + curr_pos += flattened_length_seq + + packet[curr_pos:curr_pos + flattened_length_nsp] = next_sentence_labels.view(-1)[:] + + return packet + + def decode_packet(flat_packet): + packet = flat_packet.view(ngpus, get_local_packet_size()) + + curr_pos = 0 + + input_ids_ = packet[:, curr_pos:curr_pos + flattened_length_seq].contiguous().view(ngpus, nseqs, ntokensperseq) + curr_pos += flattened_length_seq + + segment_ids_ = packet[:, curr_pos:curr_pos + flattened_length_seq].contiguous().view(ngpus, nseqs, + ntokensperseq) + curr_pos += flattened_length_seq + + input_mask_ = packet[:, curr_pos:curr_pos + flattened_length_seq].contiguous().view(ngpus, nseqs, ntokensperseq) + curr_pos += flattened_length_seq + + masked_lm_labels_ = packet[:, curr_pos:curr_pos + flattened_length_seq].contiguous().view(ngpus, nseqs, + ntokensperseq) + curr_pos += flattened_length_seq + + next_sentence_labels_ = packet[:, curr_pos:curr_pos + flattened_length_nsp].contiguous().view(ngpus, nseqs) + + return input_ids_, segment_ids_, input_mask_, masked_lm_labels_, next_sentence_labels_ + + tensors = encode_packet(input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels) + + tensors_ = torch.zeros([ngpus, get_local_packet_size()], device=device, dtype=torch.float16) + tensors_ = list(torch.split(tensors_, 1)) + # Address valueError: ProcessGroupGloo::allgather: invalid tensor size at index 0 (expected (2049), got (1, 2049)) + torch.distributed.all_gather(tensors_, tensors.view(torch.float16).unsqueeze(0)) + + tensors_ = torch.stack(tensors_).view(torch.int16).long() + input_ids_, segment_ids_, input_mask_, masked_lm_labels_, next_sentence_labels_ = decode_packet(tensors_) + + seqlens_, indices = torch.sort(input_mask_.sum(dim=2).view(-1), descending=True) + + if pad_size > 0: + input_ids_sorted = input_ids_.view(ngpus * nseqs, ntokensperseq)[indices[:], :] + segment_ids_sorted = segment_ids_.view(ngpus * nseqs, ntokensperseq)[indices[:], :] + input_mask_sorted = input_mask_.view(ngpus * nseqs, ntokensperseq)[indices[:], :] + masked_lm_labels_sorted = masked_lm_labels_.view(ngpus * nseqs, ntokensperseq)[indices[:], :] + next_sentence_labels_sorted = next_sentence_labels_.view(ngpus * nseqs)[indices[:]] + # we need to remove the empty sequences we added to the batch + valid_idx = seqlens_.view(nseqs, ngpus)[:, igpu] > 0 + input_ids_sorted = input_ids_sorted.view(nseqs, ngpus, ntokensperseq)[valid_idx, igpu, :].contiguous() + segment_ids_sorted = segment_ids_sorted.view(nseqs, ngpus, ntokensperseq)[valid_idx, igpu, :].contiguous() + input_mask_sorted = input_mask_sorted.view(nseqs, ngpus, ntokensperseq)[valid_idx, igpu, :].contiguous() + masked_lm_labels_sorted = masked_lm_labels_sorted.view(nseqs, ngpus, ntokensperseq)[valid_idx, igpu, + :].contiguous() + next_sentence_labels_sorted = next_sentence_labels_sorted.view(nseqs, ngpus)[valid_idx, igpu].contiguous() + else: + indices_ = indices.view(nseqs, ngpus)[:, igpu] + input_ids_sorted = input_ids_.view(nseqs * ngpus, ntokensperseq)[indices_, :].contiguous() + segment_ids_sorted = segment_ids_.view(nseqs * ngpus, ntokensperseq)[indices_, :].contiguous() + input_mask_sorted = input_mask_.view(nseqs * ngpus, ntokensperseq)[indices_, :].contiguous() + masked_lm_labels_sorted = masked_lm_labels_.view(nseqs * ngpus, ntokensperseq)[indices_, :].contiguous() + next_sentence_labels_sorted = next_sentence_labels_.view(nseqs * ngpus)[indices_].contiguous() + + torch.cuda.nvtx.range_pop() + return input_ids_sorted, segment_ids_sorted, input_mask_sorted, masked_lm_labels_sorted, next_sentence_labels_sorted diff --git a/nlp/language_model/bert_sample/pytorch/base/model/__init__.py b/nlp/language_model/bert_sample/pytorch/base/model/__init__.py new file mode 100644 index 000000000..d1311dcd9 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/__init__.py @@ -0,0 +1,19 @@ +from model.models.modeling import BertForPretraining +from model.models.modeling import BertConfig, BertForPreTraining + + +def create_model(config): + config.resume_step = 0 + + bert_config = BertConfig.from_json_file(config.bert_config_path) + bert_config.fused_gelu_bias = config.fused_gelu_bias + bert_config.dense_seq_output = config.dense_seq_output + bert_config.fuse_dropout = config.enable_fuse_dropout + bert_config.fused_dropout_add = config.fused_dropout_add + + # Padding for divisibility by 8 + if bert_config.vocab_size % 8 != 0: + bert_config.vocab_size += 8 - (bert_config.vocab_size % 8) + + model = BertForPreTraining(bert_config) + return bert_config, model \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/model/layers/__init__.py b/nlp/language_model/bert_sample/pytorch/base/model/layers/__init__.py new file mode 100644 index 000000000..1a0702a49 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/layers/__init__.py @@ -0,0 +1,6 @@ +import torch + +from .activations import bias_gelu_impl + +__all__ = ["bias_gelu_impl"] + diff --git a/nlp/language_model/bert_sample/pytorch/base/model/layers/activations.py b/nlp/language_model/bert_sample/pytorch/base/model/layers/activations.py new file mode 100644 index 000000000..96b48ab66 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/layers/activations.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch +from torch import nn + +# Fused GeLU +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) +# 1/sqrt(2*pi)-> 0.3989423 +# 1/sqrt(2) -> 0.70710678 +# sqrt(2/pi) -> 0.79788456 + +# this function is tanh approximation of gelu +# actual gelu is: +# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) +@torch.jit.script +def bias_gelu(bias, y): + x = bias + y + return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@torch.jit.script +def bias_gelu_back(g, bias, y): + x = bias + y + tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) + # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 + ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) + return ff*g + +class GeLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_gelu(bias, input) + + @staticmethod + def backward(ctx, grad_output): + input, bias = ctx.saved_tensors + tmp = bias_gelu_back(grad_output, bias, input) + return tmp, tmp + +bias_gelu_impl = GeLUFunction.apply + +# Swish +def swish(x): + return x * torch.sigmoid(x) + +# Fast GeLU +def fast_gelu(x): + pi = 3.1415926535897932 + cdf = 0.5 * (1.0 + torch.tanh((math.sqrt(2 / pi) * (x + 0.044715 * torch.pow(x, 3))))) + return x * cdf + +ACT2FN = { + "gelu": fast_gelu, + "bias_gelu": bias_gelu_impl, + "relu": torch.nn.functional.relu, + "swish": swish +} + diff --git a/nlp/language_model/bert_sample/pytorch/base/model/layers/embeddings.py b/nlp/language_model/bert_sample/pytorch/base/model/layers/embeddings.py new file mode 100644 index 000000000..096f76c0f --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/layers/embeddings.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .layernorm import BertLayerNorm + +import torch +from torch import nn + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None): + position_ids = self.get_position_ids(input_ids) + + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + def get_position_ids(self, input_ids): + seq_length = input_ids.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + return position_ids diff --git a/nlp/language_model/bert_sample/pytorch/base/model/layers/layernorm.py b/nlp/language_model/bert_sample/pytorch/base/model/layers/layernorm.py new file mode 100644 index 000000000..936cc0f68 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/layers/layernorm.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +from torch import nn + +class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + diff --git a/nlp/language_model/bert_sample/pytorch/base/model/layers/padding.py b/nlp/language_model/bert_sample/pytorch/base/model/layers/padding.py new file mode 100644 index 000000000..a7aa84be0 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/layers/padding.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import math + +####################################################################################################################################################################### + +def unpad_input(out_, in_, indices): + out_[:,:] = in_[indices[:],:] + +def pad_input(out_, in_, indices): + out_[indices[:],:] = in_[:,:] + +def unpad_mask(out_, in_, indices): + out_[:] = in_.flatten()[indices[:]] + +####################################################################################################################################################################### + +def generate_mask(attention_mask, heads, pad=False, fuse_mask=True, unpad_fmha=False): + if unpad_fmha: + seqlen = attention_mask.sum(dim=1).to(dtype=torch.int32).flatten() + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + maxseqlen = seqlen.max().item() + b = attention_mask.shape[0] + cu_seqlens = torch.zeros(b+1, device=attention_mask.device, dtype=torch.int32) + cu_seqlens[1:] = torch.cumsum(seqlen, dim=0) + ntokens = cu_seqlens[-1].item() + return indices, attention_mask, seqlen, ntokens, cu_seqlens, seqlen, maxseqlen + + + seqlen = attention_mask.sum(dim=1).float().cpu() + if pad == False: + seqlen[:] = ((seqlen[:] + 16 - 1) / 16).floor()*16 + seqlen[seqlen < 16] = 16 + seqlen = seqlen.int() + ntokens = seqlen.sum().item() + else: + batch = attention_mask.shape[0] + maxseqlen = attention_mask.shape[1] + seqlen.fill_(maxseqlen) + seqlen = seqlen.int() + ntokens = batch * maxseqlen + + padded_mask = attention_mask.clone() + for i in range(len(seqlen)): + padded_mask[i,:seqlen[i]] = 1 + indices = torch.nonzero(padded_mask.flatten(), as_tuple=False).flatten() + + if pad==False and fuse_mask == True: + mask = torch.zeros([ntokens], device="cuda", dtype=torch.float16) + unpad_mask(mask, attention_mask, indices) + mask = (1 - mask) * -10000.0 + elif pad==False and fuse_mask == False: + padded_mask = (padded_mask.unsqueeze(1) * padded_mask.unsqueeze(2)).unsqueeze(1).half().repeat(1, heads, 1, 1) + indices_mask = torch.nonzero(padded_mask.flatten(), as_tuple=False).flatten() + mask = torch.zeros([len(indices_mask)], device="cuda", dtype=torch.float16) + unpad_mask(mask, padded_mask, indices_mask) + mask = (1 - mask) * -10000.0 + elif pad==True and fuse_mask == True: + mask = -10000.0 * (1 - attention_mask).half().view(-1) + elif pad==True and fuse_mask == False: + mask = -10000.0 * (1 - (attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2))).unsqueeze(1).half().repeat(1, heads, 1, 1).view(-1) + + return indices, mask, seqlen, ntokens, None, None, None + +####################################################################################################################################################################### + +class PadInput(torch.autograd.Function): + @staticmethod + def forward(ctx, input, indices, batch, maxseqlen, hidden, ntokens): + ctx.save_for_backward(indices) + ctx.hidden = hidden + ctx.ntokens = ntokens + ntokens = batch*maxseqlen + + output = torch.zeros([ntokens,hidden], device="cuda", dtype=torch.float16) + pad_input(output, input, indices) + + return output[:ntokens] + + @staticmethod + def backward(ctx, grad_output): + indices, = ctx.saved_tensors + + grad_input = torch.zeros([ctx.ntokens,ctx.hidden], device="cuda", dtype=torch.float16) + unpad_input(grad_input, grad_output, indices) + + return grad_input[:ctx.ntokens], None, None, None, None, None + +####################################################################################################################################################################### + +class UnpadInput(torch.autograd.Function): + @staticmethod + def forward(ctx, input, indices, batch, maxseqlen, hidden, ntokens): + ctx.save_for_backward(indices) + ctx.hidden = hidden + ctx.ntokens = batch*maxseqlen + + output = torch.zeros([ntokens, hidden], device="cuda", dtype=torch.float16) + unpad_input(output, input, indices) + + return output[:ntokens] + + @staticmethod + def backward(ctx, grad_output): + indices, = ctx.saved_tensors + + grad_input = torch.zeros([ctx.ntokens,ctx.hidden], device="cuda", dtype=torch.float16) + pad_input(grad_input, grad_output, indices) + + return grad_input[:ctx.ntokens], None, None, None, None, None + +####################################################################################################################################################################### diff --git a/nlp/language_model/bert_sample/pytorch/base/model/losses/__init__.py b/nlp/language_model/bert_sample/pytorch/base/model/losses/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nlp/language_model/bert_sample/pytorch/base/model/models/__init__.py b/nlp/language_model/bert_sample/pytorch/base/model/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nlp/language_model/bert_sample/pytorch/base/model/models/modeling.py b/nlp/language_model/bert_sample/pytorch/base/model/models/modeling.py new file mode 100644 index 000000000..d7c4e1886 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/model/models/modeling.py @@ -0,0 +1,1394 @@ +# coding=utf-8 +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import copy +import json +import logging +import math +import os +import sys +from functools import reduce +from io import open +from operator import mul + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss +from torch.nn import LayerNorm as BertLayerNorm + +from model.layers.activations import ACT2FN +from model.layers.embeddings import BertEmbeddings +from utils import get_rank + + +logger = logging.getLogger(__name__) + +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) + + +def remap_attn_names_tf(name): + if 'attention' in name: + ind = name.index("attention") + if 'self' in name and 'query' in name and 'kernel' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'q_weight'] + if 'self' in name and 'query' in name and 'bias' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'q_bias'] + if 'self' in name and 'key' in name and 'kernel' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'k_weight'] + if 'self' in name and 'key' in name and 'bias' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'k_bias'] + if 'self' in name and 'value' in name and 'kernel' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'v_weight'] + if 'self' in name and 'value' in name and 'bias' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'v_bias'] + if 'output' in name and 'dense' in name and 'kernel' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'out_proj_weight'] + if 'output' in name and 'dense' in name and 'bias' in name: + name = name[:(ind + 1)] + ['multi_head_attention', 'out_proj_bias'] + if 'output' in name and 'LayerNorm' in name: + name = name[:(ind + 1)] + ['layer_norm'] + name[-1:] + return name + + +def load_tf_weights_in_bert(model, tf_checkpoint_path, use_fast_mha=False): + """ Load tf checkpoints in a pytorch model + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + if get_rank() == 0: + print("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + if get_rank() == 0: + print("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + # MHA params need to be treated separately + if use_fast_mha: + mha_params = ['q_weight', 'q_bias', 'k_weight', 'k_bias', 'v_weight', 'v_bias', 'out_proj_weight', + 'out_proj_bias'] + else: + mha_params = [] + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m", "global_step", "LAMB", "LAMB_1", "beta1_power", "beta2_power"] for n in name): + if get_rank() == 0: + print("Skipping {}".format("/".join(name))) + continue + + if use_fast_mha: + name = remap_attn_names_tf(name) + + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] in mha_params: + pointer = getattr(pointer, l[0]) + elif l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + else: + pointer = getattr(pointer, l[0]) + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel' or (m_name in mha_params and 'bias' not in m_name): + array = np.ascontiguousarray(np.transpose(array)) + + try: + assert pointer.shape == array.shape + except AssertionError as e: + # If copying smaller into larger, assume padded and ok + if reduce(mul, pointer.shape) > reduce(mul, array.shape): + if get_rank() == 0: + print("Initialize padded PyTorch weight {}".format(name)) + pointer.data.zero_() + + def generate_slices(): + slices = [] + for i in range(array.ndim): + slices.append(slice(0, array.shape[i], 1)) + return slices + + # pointer.data[generate_slices()] = torch.from_numpy(array) + pointer.data[generate_slices()] = torch.from_numpy(array) + else: + e.args += (pointer.shape, array.shape) + raise + else: + if get_rank() == 0: + print("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +@torch.jit.script +def jit_dropout_add(x, residual, prob, is_training): + # type: (Tensor, Tensor, float, bool) -> Tensor + # out = F.dropout(x, p=prob, training=is_training) + out = torch.nn.functional.dropout(x, p=prob, training=is_training) + out = residual + out + return out + + +@torch.jit.script +def jit_bias_dropout_add(x, bias, residual, prob, is_training): + # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor + # out = F.dropout(x, p=prob, training=is_training) + out = torch.nn.functional.dropout(x + bias, p=prob, training=is_training) + out = residual + out + return out + + +class LinearActivation(torch.nn.Linear): + r"""Fused Linear and activation Module. + """ + __constants__ = ['bias'] + + def __init__(self, in_features, out_features, act='gelu', bias=True): + super(LinearActivation, self).__init__(in_features, out_features, bias) + self.act_fn = nn.Identity() # + self.biased_act_fn = None # + if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript + if bias and not 'bias' in act: # compatibility + act = 'bias_' + act # + self.biased_act_fn = ACT2FN[act] # + + else: + self.act_fn = ACT2FN[act] + else: + self.act_fn = act + + def forward(self, input): + if not self.bias is None: + return self.biased_act_fn(self.bias, nn.functional.linear(input, self.weight, None)) + else: + return self.act_fn(F.linear(input, self.weight, self.bias)) + + +class LinearDropoutAdd(torch.nn.Linear): + r"""Fused Linear and activation Module. + """ + __constants__ = ['bias'] + + def __init__(self, in_features, out_features, bias=True, p=0.1): + super(LinearDropoutAdd, self).__init__(in_features, out_features, bias) + self.p = p + + def forward(self, input, residual): + linear_out = nn.functional.linear(input, self.weight, None) + if self.bias is None: + result = jit_dropout_add(linear_out, residual, self.p, is_training=self.training) + else: + result = jit_bias_dropout_add(linear_out, self.bias.expand_as(residual), residual, self.p, + is_training=self.training) + return result + + +class BertConfig(object): + """Configuration class to store the configuration of a `BertModel`. + """ + + def __init__(self, + vocab_size_or_config_json_file, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02): + """Constructs BertConfig. + + Args: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 + and isinstance(vocab_size_or_config_json_file, unicode)): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = BertConfig(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.softmax = nn.Softmax(dim=-1) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def transpose_key_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 3, 1) + + def forward(self, hidden_states, attention_mask): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_key_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(2) + # attention_scores = attention_scores - (1.0 - attention_mask.unsqueeze(1).unsqueeze(2).float()) * 10000.0 + + # Normalize the attention scores to probabilities. + attention_probs = self.softmax(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # context_layer = torch.einsum("bnft,btnh->bfnh", attention_probs, mixed_value_layer.view(1,512,16,64)) + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer + # return context_layer.reshape(context_layer.shape[:2] + (self.all_head_size,)) + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask, *args, **kwargs): + self_output = self.self(input_tensor, attention_mask) + attention_output = self.output(self_output, input_tensor) + return attention_output + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.fused_gelu_bias = config.fused_gelu_bias + if config.fused_gelu_bias: + self.dense = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act) + else: + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str) or ( + sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + if not self.fused_gelu_bias: + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + if not config.fused_dropout_add: + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + else: + self.dense = LinearDropoutAdd(config.intermediate_size, config.hidden_size, bias=True, + p=config.hidden_dropout_prob) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.p = config.hidden_dropout_prob + self.fused_dropout_add = config.fused_dropout_add + + def forward(self, hidden_states, input_tensor): + if self.fused_dropout_add: + hidden_states = self.dense(hidden_states, input_tensor) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + else: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, seqlen=None, batch=None): + attention_output = self.attention(hidden_states, attention_mask, seqlen, batch) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + layer = BertLayer(config) + self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False): + all_encoder_layers = [] + for layer_module in self.layer: + hidden_states = layer_module(hidden_states, attention_mask) + if output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + if not output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + return all_encoder_layers + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(bert_model_embedding_weights.size(1), + bert_model_embedding_weights.size(0), + bias=False) + self.decoder.weight = bert_model_embedding_weights + self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super(BertOnlyNSPHead, self).__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertPreTrainingHeads, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + self.dense_seq_output = config.dense_seq_output + + def forward(self, sequence_output, pooled_output, masked_lm_labels): + if self.dense_seq_output: + batch_size = sequence_output.shape[0] + seq_len = sequence_output.shape[1] + hidden_dim = sequence_output.shape[2] + sequence_flattened = torch.index_select(sequence_output.view(-1, sequence_output.shape[-1]), 0, + torch.nonzero(masked_lm_labels.view(-1) != 0, + as_tuple=False).squeeze()) + sequence_output = sequence_flattened + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + + def __init__(self, config, *inputs, **kwargs): + super(BertPreTrainedModel, self).__init__() + if not isinstance(config, BertConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " + "To create a model from a Google pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + self.config = config + + # we want to make sure vocab size is padded to % 8 == 0 + if self.config.vocab_size % 8 != 0: + self.config.vocab_size += 8 - (self.config.vocab_size % 8) + if get_rank == 0: + print(f'Padded vocab_size to : {self.config.vocab_size}') + + def init_bert_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, BertLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + @classmethod + def from_pretrained(cls, pretrained_checkpoint, state_dict=None, cache_dir=None, + from_tf=False, config=None, *inputs, **kwargs): + """ + Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. + Download and cache the pre-trained model file if needed. + + It is used preprocessing stage. + + Params: + pretrained_model_name_or_path: either: + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `pytorch_model.bin` a PyTorch dump of a BertForPretraining instance + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `model.chkpt` a TensorFlow checkpoint + from_tf: should we load the weights from a locally saved TensorFlow checkpoint + cache_dir: an optional path to a folder in which the pre-trained models will be cached. + state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models + *inputs, **kwargs: additional input for the specific Bert class + (ex: num_labels for BertForSequenceClassification) + """ + logger.info("loading archive file {}".format(pretrained_checkpoint)) + assert config, "BERT configuration file must be provided to from_pretraining()" + logger.info("Model config {}".format(config)) + # Instantiate model. + model = cls(config, *inputs, **kwargs) + if state_dict is None and not from_tf: + state_dict = torch.load(pretrained_checkpoint, + map_location='cpu' if not torch.cuda.is_available() else None) + if from_tf: + # Directly load from a TensorFlow checkpoint + return load_tf_weights_in_bert(model, pretrained_checkpoint, use_fast_mha=config.fused_mha) + # Load from a PyTorch state_dict + old_keys = [] + new_keys = [] + # print(f'loading keys: {state_dict.keys()}') + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + start_prefix = '' + if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): + start_prefix = 'bert.' + load(model, prefix=start_prefix) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + model.__class__.__name__, "\n\t".join(error_msgs))) + return model + + +class BertModel(BertPreTrainedModel): + """BERT model ("Bidirectional Embedding Representations from a Transformer"). + + Params: + config: a BertConfig class instance with the configuration to build a new model + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. + + Outputs: Tuple of (encoded_layers, pooled_output) + `encoded_layers`: controled by `output_all_encoded_layers` argument: + - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end + of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each + encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], + - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding + to the last attention block of shape [batch_size, sequence_length, hidden_size], + `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a + classifier pretrained on top of the hidden state associated to the first character of the + input (`CLS`) to train on the Next-Sentence task (see BERT's paper). + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = modeling.BertModel(config=config) + all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config): + super(BertModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) + self.apply(self.init_bert_weights) + self.unpad = False + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, + checkpoint_activations=False): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask # .unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + if self.unpad == False: + extended_attention_mask = extended_attention_mask.to( + dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + embedding_output = self.embeddings(input_ids, token_type_ids) + encoded_layers = self.encoder(embedding_output, + extended_attention_mask, + output_all_encoded_layers=output_all_encoded_layers, + checkpoint_activations=checkpoint_activations) + sequence_output = encoded_layers[-1] + pooled_output = self.pooler(sequence_output) + if not output_all_encoded_layers: + encoded_layers = encoded_layers[-1] + return encoded_layers, pooled_output + + +class BertForPretraining(BertPreTrainedModel): + """BERT model with pre-training heads. + This module comprises the BERT model followed by the two pre-training heads: + - the masked language modeling head, and + - the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `masked_lm_labels` and `next_sentence_label` are not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `masked_lm_labels` or `next_sentence_label` is `None`: + Outputs a tuple comprising + - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and + - the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForPretraining(config) + masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config): + super(BertForPretraining, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + self.dense_seq_output = config.dense_seq_output + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, + next_sentence_label=None, checkpoint_activations=False): + sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False, + checkpoint_activations=checkpoint_activations) + # if dense_seq_output, prediction scores returned by this function is already masked out with masked_lm_labels, and first dimension is flattened + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output, masked_lm_labels) + + if masked_lm_labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=0) + + masked_lm_labels_flat = masked_lm_labels.view(-1) + + if self.dense_seq_output: + masked_lm_labels_dense = masked_lm_labels_flat[masked_lm_labels_flat != 0] + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels_dense) + else: + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + nsp_loss_fct = CrossEntropyLoss(ignore_index=-1) + next_sentence_loss = nsp_loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + # print("loss is {} {}".format(masked_lm_loss, next_sentence_loss)) + total_loss = masked_lm_loss + next_sentence_loss + total_loss = total_loss.float() + + # Masked Language Model Accuracy + mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != 0] + if not self.dense_seq_output: + prediction_scores_flat = prediction_scores.view(-1, prediction_scores.shape[-1]) + mlm_predictions_scores = prediction_scores_flat[masked_lm_labels_flat != 0] + mlm_predictions = mlm_predictions_scores.argmax(dim=-1) + else: + mlm_predictions = prediction_scores.argmax(dim=-1) + + mlm_acc = (mlm_predictions == mlm_labels).sum(dtype=torch.float) / mlm_labels.numel() + + return total_loss, mlm_acc, mlm_labels.numel() + else: # TODO: Handle this path for dense sequence output as well + return prediction_scores, seq_relationship_score + + +class BertForPreTrainingModelOnly(nn.Module): + def __init__(self, config): + super(BertForPreTrainingModelOnly, self).__init__() + self.bert = BertModel(config) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, checkpoint_activations=False): + sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False, + checkpoint_activations=checkpoint_activations) + return sequence_output, pooled_output + + +class BertForPreTrainingHeadsOnly(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertForPreTrainingHeadsOnly, self).__init__() + self.cls = BertPreTrainingHeads(config, bert_model_embedding_weights) + self.dense_seq_output = config.dense_seq_output + self.config = config + + def forward(self, sequence_output, pooled_output, masked_lm_labels=None, next_sentence_label=None): + # if dense_seq_output, prediction scores returned by this function is already masked out with masked_lm_labels, and first dimension is flattened + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output, masked_lm_labels) + + if masked_lm_labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=0) + masked_lm_labels_flat = masked_lm_labels.view(-1) + if self.dense_seq_output: + masked_lm_labels_dense = masked_lm_labels_flat[masked_lm_labels_flat != 0] + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels_dense) + else: + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + nsp_loss_fct = CrossEntropyLoss(ignore_index=-1) + next_sentence_loss = nsp_loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + if torch.any(torch.isnan(masked_lm_loss)) or torch.any(torch.isnan(next_sentence_loss)): + print("masked_lm_loss or next_sentence_loss exploded (corrupted by Inf or Nan)") + print(masked_lm_loss, next_sentence_loss) + # return None + sys.exit(1) + # print("loss is {} {}".format(masked_lm_loss, next_sentence_loss)) + total_loss = (masked_lm_loss + next_sentence_loss).float() + # print("masked",masked_lm_loss) + # print("nsp",next_sentence_loss) + # total_loss=total_loss + # Masked Language Model Accuracy + valid_mask = (masked_lm_labels_flat != 0).int() + num_valid = valid_mask.sum(dtype=torch.int64) + + # mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != 0] + + if not self.dense_seq_output: + mlm_labels = masked_lm_labels_flat + prediction_scores_flat = prediction_scores.view(-1, prediction_scores.shape[-1]) + mlm_predictions_scores = prediction_scores_flat + mlm_predictions = mlm_predictions_scores.argmax(dim=-1) + mlm_acc = ((mlm_predictions == mlm_labels) * valid_mask).sum(dtype=torch.float) / num_valid + + + # prediction_scores_flat = prediction_scores.view(-1, prediction_scores.shape[-1]) + # mlm_predictions_scores = prediction_scores_flat[masked_lm_labels_flat != 0] + # mlm_predictions = mlm_predictions_scores.argmax(dim=-1) + else: + # mlm_predictions = prediction_scores.argmax(dim=-1).view(-1) + mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != 0] + mlm_predictions = prediction_scores.argmax(dim=-1).view(-1) + mlm_acc = (mlm_predictions == mlm_labels).sum(dtype=torch.float) / num_valid + return total_loss, mlm_acc, num_valid + # mlm_acc = (mlm_predictions == mlm_labels).sum(dtype=torch.float) / mlm_labels.numel() + # print((mlm_predictions == mlm_labels).sum(dtype=torch.float),num_valid,mlm_acc) + # return total_loss, mlm_acc, mlm_labels.numel() + + else: # TODO: Handle this path for dense sequence output as well + return prediction_scores, seq_relationship_score + + +class BertForPreTraining(BertPreTrainedModel): + def __init__(self, config): + super(BertForPreTraining, self).__init__(config) + self.bert_model_segment = BertForPreTrainingModelOnly(config) + self.heads_only_segment = BertForPreTrainingHeadsOnly(config, + self.bert_model_segment.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, + next_sentence_label=None, checkpoint_activations=False): + assert checkpoint_activations == False, "cuda-graphs: reattach passing of checkpoint_activations when function.py starts handling non-Tensor inputs" + sequence_output, pooled_output = self.bert_model_segment(input_ids, token_type_ids, attention_mask) + return self.heads_only_segment(sequence_output, pooled_output, masked_lm_labels, next_sentence_label) + + +class BertForMaskedLM(BertPreTrainedModel): + """BERT model with the masked language modeling head. + This module comprises the BERT model followed by the masked language modeling head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + + Outputs: + if `masked_lm_labels` is not `None`: + Outputs the masked language modeling loss. + if `masked_lm_labels` is `None`: + Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForMaskedLM(config) + masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config): + super(BertForMaskedLM, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, + checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False) + prediction_scores = self.cls(sequence_output) + + if masked_lm_labels is not None: + # loss_fct = CrossEntropyLoss(ignore_index=-1) + loss_fct = CrossEntropyLoss(ignore_index=0) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + return masked_lm_loss + else: + return prediction_scores + + +class BertForNextSentencePrediction(BertPreTrainedModel): + """BERT model with next sentence prediction head. + This module comprises the BERT model followed by the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `next_sentence_label` is not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `next_sentence_label` is `None`: + Outputs the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForNextSentencePrediction(config) + seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config): + super(BertForNextSentencePrediction, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, + checkpoint_activations=False): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False) + seq_relationship_score = self.cls(pooled_output) + + if next_sentence_label is not None: + # loss_fct = CrossEntropyLoss(ignore_index=-1) + loss_fct = CrossEntropyLoss(ignore_index=0) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + return next_sentence_loss + else: + return seq_relationship_score + + +class BertForSequenceClassification(BertPreTrainedModel): + """BERT model for classification. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForSequenceClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config, num_labels): + super(BertForSequenceClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + return loss + else: + return logits + + +class BertForMultipleChoice(BertPreTrainedModel): + """BERT model for multiple choice tasks. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_choices`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` + and type 1 corresponds to a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_choices]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) + input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) + token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_choices = 2 + + model = BertForMultipleChoice(config, num_choices) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config, num_choices): + super(BertForMultipleChoice, self).__init__(config) + self.num_choices = num_choices + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) + _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, + output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, self.num_choices) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + return loss + else: + return reshaped_logits + + +class BertForTokenClassification(BertPreTrainedModel): + """BERT model for token-level classification. + This module is composed of the BERT model with a linear layer on top of + the full hidden state of the last layer. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForTokenClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config, num_labels): + super(BertForTokenClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + return loss + else: + return logits + + +class BertForQuestionAnswering(BertPreTrainedModel): + """BERT model for Question Answering (span extraction). + This module is composed of the BERT model with a linear layer on top of + the sequence output that computes start_logits and end_logits + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. + Positions are clamped to the length of the sequence and position outside of the sequence are not taken + into account for computing the loss. + `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. + Positions are clamped to the length of the sequence and position outside of the sequence are not taken + into account for computing the loss. + + Outputs: + if `start_positions` and `end_positions` are not `None`: + Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. + if `start_positions` or `end_positions` is `None`: + Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end + position tokens of shape [batch_size, sequence_length]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForQuestionAnswering(config) + start_logits, end_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + + def __init__(self, config): + super(BertForQuestionAnswering, self).__init__(config) + self.bert = BertModel(config) + # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version + # self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, + checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + return total_loss + else: + return start_logits, end_logits diff --git a/nlp/language_model/bert_sample/pytorch/base/optimizers/__init__.py b/nlp/language_model/bert_sample/pytorch/base/optimizers/__init__.py new file mode 100644 index 000000000..8ff379afc --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/optimizers/__init__.py @@ -0,0 +1 @@ +from .factory import create_optimizer \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/optimizers/factory.py b/nlp/language_model/bert_sample/pytorch/base/optimizers/factory.py new file mode 100644 index 000000000..5d9a33fda --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/optimizers/factory.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from torch.optim import AdamW + +from .lamb import Lamb + + +def create_optimizer(name: str, params, config): + name = name.lower() + + if name == "lamb": + return Lamb( + params, lr=config.learning_rate, + betas=(config.opt_lamb_beta_1, config.opt_lamb_beta_1), eps=1e-6, + weight_decay=config.weight_decay_rate, adam=False + ) + + if name == "adamw": + return AdamW( + params, lr=config.learning_rate, + betas=(config.opt_lamb_beta_1, config.opt_lamb_beta_2), + weight_decay=config.weight_decay_rate + ) + + raise RuntimeError(f"Not found optimier {name}.") diff --git a/nlp/language_model/bert_sample/pytorch/base/optimizers/lamb.py b/nlp/language_model/bert_sample/pytorch/base/optimizers/lamb.py new file mode 100644 index 000000000..287deb088 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/optimizers/lamb.py @@ -0,0 +1,102 @@ +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + r"""Implements Lamb algorithm. + It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + adam (bool, optional): always use trust ratio = 1, which turns this into + Adam. Useful for comparison purposes. + .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0, adam=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay) + self.adam = adam + super(Lamb, self).__init__(params, defaults) + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + # Decay the first and second moment running average coefficient + # m_t + exp_avg.mul_(beta1).add_(1 - beta1, grad) + # v_t + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + + # Paper v3 does not use debiasing. + # bias_correction1 = 1 - beta1 ** state['step'] + # bias_correction2 = 1 - beta2 ** state['step'] + # Apply bias to lr to avoid broadcast. + step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1 + + weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) + + adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) + if group['weight_decay'] != 0: + adam_step.add_(group['weight_decay'], p.data) + + adam_norm = adam_step.pow(2).sum().sqrt() + if weight_norm == 0 or adam_norm == 0: + trust_ratio = 1 + else: + trust_ratio = weight_norm / adam_norm + state['weight_norm'] = weight_norm + state['adam_norm'] = adam_norm + state['trust_ratio'] = trust_ratio + if self.adam: + trust_ratio = 1 + + p.data.add_(-step_size * trust_ratio, adam_step) + + return loss \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/prepare.py b/nlp/language_model/bert_sample/pytorch/base/prepare.py new file mode 100644 index 000000000..28fa7d254 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/prepare.py @@ -0,0 +1,288 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import os +import subprocess +from typing import List, Optional, Union +import os.path as ospath +from argparse import ArgumentParser, REMAINDER +from functools import partial, wraps +from typing import NamedTuple + + +# ========================================================= +# Define arguments +# ========================================================= + +def parse_args(): + parser = ArgumentParser("Prepare") + parser.add_argument("--name", type=str, default=None, help="The name of submitter") + parser.add_argument("--data_dir", type=str, default=None, help="Data direction") + # WARN: Don't delete this argument + parser.add_argument('other_args', nargs=REMAINDER) + args = parser.parse_args() + return args + + +# ========================================================= +# Constants +# ========================================================= +args = parse_args() + +APT = "apt" +PIP_INSTALL = "pip3 install " +PYTHON = "python3" +DOWNLOAD = "wget" + +APT_PKGS = ["numactl"] +SUPPORTED_WHEELS = ["torch", "apex"] + + +MODEL_DIR = ospath.abspath( + ospath.join( + __file__, + "../../" + ) +) +CURRENT_MODEL_NAME = ospath.basename(MODEL_DIR) +PROJ_DIR = ospath.abspath( + ospath.join( + MODEL_DIR, + "../../" + ) +) + +PACKAGE_DIR_NAME = "packages" +SOURCE_DIR_NAME = "csrc" +SDK_DIR_NAME = "sdk_installers" +PACKAGE_LIST_NAME = "files.txt" +SDK_LIST_NAME = "files.txt" + +DATA_SHARDS = 2048 +TRAIN_DIR_NAME = f"{DATA_SHARDS}_shards_uncompressed" +CONVERTED_CHECKPOINT_NAME = "model.ckpt-28252.pt" + +SUBMITTER = args.name +DATA_DIR = args.data_dir +SUBMITTER_DIR = ospath.join(PROJ_DIR, SUBMITTER, CURRENT_MODEL_NAME) +TRAIN_DATA_DIR = ospath.join(DATA_DIR, TRAIN_DIR_NAME) + +EXTENSION_SOURCE_DIR_ENV = "EXTENSION_SOURCE_DIR" +SDK_ARGUMENTS_ENV = "SDK_ARGUMENTS" + + +# ========================================================= +# Helpers +# ========================================================= + +class ShellResult(NamedTuple): + + returncode: int + result: Union[subprocess.CompletedProcess, str] = None + + +def _exec_cmd(cmd: Union[str, List], *args, **kwargs): + args_str = " ".join(args) + args_str += " ".join([f"{name}={value}" for name, value in kwargs.items()]) + cmd_str = cmd + if isinstance(cmd, (tuple, list)): + cmd_str = " ".join(cmd) + print(f"Commands: {cmd_str}") + + result = subprocess.run(cmd, *args, **kwargs, stdout=None, stderr=subprocess.STDOUT) + + if result.returncode > 0: + msg = f"ERROR: {cmd} {args_str}" + return ShellResult(returncode=result.returncode, result=msg) + + return ShellResult(returncode=result.returncode, result=result) + + +def exec_shell_cmd(cmd: str, *args, **kwargs): + return _exec_cmd(cmd, shell=True, *args, **kwargs) + + +def exec_shell_cmds(cmds: List[str], *args, **kwargs): + cmds = "\n".join(cmds) + return exec_shell_cmd(cmds, *args, **kwargs) + + +def warning(*args, **kwargs): + print("WARN:", *args, **kwargs) + + +def find_file_by_match(dir, file_patterns): + if ospath.exists(dir): + dir_files = os.listdir(dir) + else: + return file_patterns + + for i, pattern in enumerate(file_patterns): + pattern = pattern.strip() + if len(pattern) <= 1 or not pattern.endswith("*"): + continue + + pattern = pattern[:-1] + + for dir_file in dir_files: + if dir_file.startswith(pattern): + file_patterns[i] = dir_file + break + return file_patterns + +# ========================================================= +# Pipelines +# ========================================================= + +def install_apt_packages(): + return exec_shell_cmd(f"{APT} install -y {' '.join(APT_PKGS)}") + + +def prepare_data(): + checked_files = ["2048_shards_uncompressed", "bert_config.json", "eval_set_uncompressed", "model.ckpt-28252.pt"] + path_join = ospath.join + exist_preprocessed_data = all([ospath.exists(path_join(DATA_DIR, name)) for name in checked_files]) + + if exist_preprocessed_data: + return + + # Check last download file + origin_data = path_join(DATA_DIR, "phase1", "model.ckpt-28252.meta") + need_download_dataset = "-s" if ospath.exists(origin_data) else "" + cmds = [ + f"cd {path_join(MODEL_DIR, 'pytorch/data_preprocessing')}", + f"bash prepare_data.sh -o {DATA_DIR} -p {DATA_SHARDS} {need_download_dataset}" + ] + return exec_shell_cmds(cmds) + + +def install_sdk(): + def get_sdk_args(): + sdk_args = dict() + if SDK_ARGUMENTS_ENV in os.environ: + sdk_args_str = os.environ[SDK_ARGUMENTS_ENV] + + sdk_args_segments = sdk_args_str.split(';') + for sdk_arg in sdk_args_segments: + sdk, arg = sdk_arg.split('=', maxsplit=1) + sdk_args[sdk] = arg + return sdk_args + + sdk_args_dict = get_sdk_args() + print("SDK Arguments:", sdk_args_dict) + + sdk_installer_dir = ospath.join(SUBMITTER_DIR, SDK_DIR_NAME) + if not ospath.exists(sdk_installer_dir): + sdk_installer_dir = ospath.join(PROJ_DIR, SUBMITTER, SDK_DIR_NAME) + if not ospath.exists(sdk_installer_dir): + warning("Not found sdk\'s dir, skip run installer") + return + + # Find sdk installers + sdk_list_file = ospath.join(sdk_installer_dir, SDK_LIST_NAME) + if ospath.exists(sdk_list_file): + with open(sdk_list_file) as f: + sdk_installers = f.readlines() + sdk_installers_pattern = [sdk.strip() for sdk in sdk_installers] + sdk_installers = find_file_by_match(sdk_installer_dir, sdk_installers_pattern) + else: + sdk_installers = os.listdir(sdk_installer_dir) + sdk_installers.sort() + + sdk_installers_cmds = [] + for sdk in sdk_installers: + if sdk.endswith(".run"): + sdk_arg = "" + for sdk_args_key in sdk_args_dict: + if sdk.startswith(sdk_args_key): + sdk_arg = sdk_args_dict[sdk_args_key] + sdk_installers_cmds.append("sh " + ospath.join(sdk_installer_dir, sdk) + f" {sdk_arg}") + + if len(sdk_installers_cmds) == 0: + warning("Not found installer in", sdk_installer_dir, ", skip run installer") + return + + return exec_shell_cmds(sdk_installers_cmds) + + +def install_requirements(): + return exec_shell_cmd( + f"{PIP_INSTALL} -r requirements.txt" + ) + + +def install_wheel_pkgs(filter_packages: bool=False): + wheel_dir = ospath.join(SUBMITTER_DIR, PACKAGE_DIR_NAME) + if not ospath.exists(wheel_dir): + warning("Not found package\'s dir, skip install wheel package") + return + + # Find packages + package_list_file = ospath.join(wheel_dir, PACKAGE_LIST_NAME) + if ospath.exists(package_list_file): + with open(package_list_file) as f: + packages = f.readlines() + packages_pattern = [pkg.strip() for pkg in packages] + packages = find_file_by_match(wheel_dir, packages_pattern) + else: + packages = os.listdir(wheel_dir) + packages.sort() + + def _filter_packages(name: str): + for support_pkg in SUPPORTED_WHEELS: + if name.startswith(support_pkg): + return True + return False + + if filter_packages: + packages = list(filter(_filter_packages, packages)) + + if len(packages) == 0: + warning("Not found wheel packages in", wheel_dir) + return + + install_packages_cmds = [f"{PIP_INSTALL} {ospath.join(wheel_dir, pkg)}" for pkg in packages] + return exec_shell_cmds(install_packages_cmds) + + +def install_extensions(): + source_dir = ospath.join(SUBMITTER_DIR, SOURCE_DIR_NAME) + if not ospath.exists(source_dir): + warning("Not found source dir:", source_dir) + return + return exec_shell_cmd( + f"{EXTENSION_SOURCE_DIR_ENV}={source_dir} {PYTHON} setup.py build && cp build/lib*/ext_ops* ./" + ) + + +def pipelines(): + return [ + # TODO: Uncomment + install_apt_packages, + install_requirements, + install_sdk, + partial(install_wheel_pkgs, filter_packages=True), + install_extensions, + # prepare_data, + ] + + +if __name__ == '__main__': + for pipeline in pipelines(): + result = pipeline() + if result is not None and result.returncode > 0: + print(result.result) + print("Fail:", pipeline) + exit(result.returncode) + + + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/requirements.txt b/nlp/language_model/bert_sample/pytorch/base/requirements.txt new file mode 100644 index 000000000..1dd53e662 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/requirements.txt @@ -0,0 +1,3 @@ +# progress bars in model download and training scripts +h5py +psutil \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/run_pretraining.py b/nlp/language_model/bert_sample/pytorch/base/run_pretraining.py new file mode 100644 index 000000000..cb964a0ee --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/run_pretraining.py @@ -0,0 +1,173 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +from copy import copy +import os +import random +import time +from concurrent.futures import ProcessPoolExecutor + +import numpy as np +import torch +from torch.cuda.amp import GradScaler + +import utils +from dataloaders import WorkerInitializer +from dataloaders.dataloader import PretrainingDataloaders +from train.evaluator import Evaluator +from train.trainer import Trainer +from train.training_state import TrainingState +from train.event import TrainingEventCompose, TrainingLogger + + +logger = None + + +def main(): + import config + parser = argparse.ArgumentParser("Bert") + config.activate_config_env(parser=parser, with_config_env_name=True) + + if config.use_env and 'LOCAL_RANK' in os.environ: + config.local_rank = int(os.environ['LOCAL_RANK']) + + device, num_gpus = utils.init_dist_training_env(config) + config.device = device + config.n_gpu = num_gpus + + utils.check_config(config) + + try: + from dltest import show_training_arguments + show_training_arguments(config) + except: + pass + + interface = config.training_event(config) + events = [ + TrainingLogger(config, log_freq=config.log_freq) + ] + training_event = TrainingEventCompose(interface, events) + training_event.launch() + + global logger + logger = events[0].logger + + utils.barrier() + training_event.on_init_start() + init_start_time = logger.previous_log_time + worker_seeds, shuffling_seeds = utils.setup_seeds(config.seed, config.num_epochs_to_generate_seeds_for, device) + if torch.distributed.is_initialized(): + worker_seed = worker_seeds[torch.distributed.get_rank()] + else: + worker_seed = worker_seeds[0] + + random.seed(worker_seed) + np.random.seed(worker_seed) + torch.manual_seed(worker_seed) + worker_init = WorkerInitializer.default(worker_seed) + + pool = ProcessPoolExecutor(1) + evaluator = Evaluator( + config.eval_dir, + proc_pool=pool, + global_batch_size=utils.global_batch_size(config), + max_steps=config.max_steps, + worker_init=worker_init, + use_cache=config.cache_eval_data + ) + grad_scaler = GradScaler(init_scale=float(os.getenv("INIT_LOSS_SCALE", 2 ** 20)), growth_interval=2000) + training_state = TrainingState() + trainer = Trainer(training_event, evaluator, training_state, grad_scaler, device=device) + training_state._trainer = trainer + + utils.barrier() + trainer.init() + + utils.barrier() + init_evaluation_start = time.time() + eval_loss, eval_mlm_acc = evaluator.evaluate(trainer) + training_state.eval_loss = eval_loss + training_state.eval_mlm_accuracy = eval_mlm_acc + init_evaluation_end = time.time() + init_evaluation_info = dict( + eval_loss = eval_loss, + eval_mlm_accuracy = eval_mlm_acc, + time = init_evaluation_end - init_evaluation_start + ) + training_event.on_init_evaluate(init_evaluation_info) + + if not config.do_train: + return config, training_state, init_evaluation_info["time"] + + dataloader = PretrainingDataloaders( + config.train_dir, + max_predictions_per_seq=config.max_predictions_per_seq, + batch_size=config.train_batch_size, + seed=shuffling_seeds, num_files_per_iter=1, + worker_init=worker_init, pool=pool, + ) + + training_event.on_init_end() + init_end_time = logger.previous_log_time + training_state.init_time = (init_end_time - init_start_time) / 1e+3 + + utils.barrier() + + epoch = -1 + training_event.on_train_begin() + raw_train_start_time = logger.previous_log_time + if config.save_checkpoint: + trainer.save() + while training_state.global_steps < config.max_steps and not training_state.end_training: + epoch += 1 + training_state.epoch = epoch + dataloader.set_epoch(epoch) + trainer.train_one_epoch(dataloader) + if config.save_checkpoint: + trainer.save() + training_event.on_train_end() + raw_train_end_time = logger.previous_log_time + training_state.raw_train_time = (raw_train_end_time - raw_train_start_time) / 1e+3 + return config, training_state + + +if __name__ == "__main__": + now = time.time() + config, state = main() + + if not utils.is_main_process(): + exit(1) + + gpu_count = config.n_gpu + e2e_time = time.time() - now + training_perf = (utils.global_batch_size(config) * state.global_steps) / (state.raw_train_time + 1e-7) + if config.do_train: + finished_info = { + "e2e_time": e2e_time, + "training_sequences_per_second": training_perf, + "converged": state.converged, + "final_loss": state.eval_loss, + "final_mlm_accuracy": state.eval_mlm_accuracy, + "raw_train_time": state.raw_train_time, + "init_time": state.init_time, + } + else: + finished_info = {"e2e_time": e2e_time} + logger.log("FINISHED", finished_info, stacklevel=0) + if state.converged: + exit(0) + else: + exit(1) diff --git a/nlp/language_model/bert_sample/pytorch/base/run_training.sh b/nlp/language_model/bert_sample/pytorch/base/run_training.sh new file mode 100644 index 000000000..5a6d9edeb --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/run_training.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +get_lscpu_value() { + awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}" +} +lscpu_out=$(lscpu) + +n_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}") +n_cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}") + +echo "Number of CPU sockets on a node: ${n_sockets}" +echo "Number of CPU cores per socket: ${n_cores_per_socket}" + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +export PYTHONPATH=../:$PYTHONPATH + +python3 -u -m bind_pyt \ + --nsockets_per_node ${n_sockets} \ + --ncores_per_socket ${n_cores_per_socket} \ + --no_hyperthreads \ + --no_membind "$@" --training_script ./run_pretraining.py --do_train; check_status + +exit ${EXIT_STATUS} \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/run_with_docker.sh b/nlp/language_model/bert_sample/pytorch/base/run_with_docker.sh new file mode 100644 index 000000000..df3c47c26 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/run_with_docker.sh @@ -0,0 +1,208 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +# ================================================= +# Constants +# ================================================= + +MODEL="bert" +export MODEL +DOCKER_IMAGE="perf:${MODEL}" +NEXP=1 + +# TODO: Add to Dockerfile +WORK_DIR="/workspace/baai-perf" +MODEL_DIR="${WORK_DIR}/benchmarks/${MODEL}/pytorch" + +CURRENT_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_DIR}/../../../" +BUILD_EXTENSION_DIR="${CURRENT_DIR}/build" +BUILD_EXTENSION_PACKAGE_NAME="ext_ops" + +BASE_DOCKERFILE_PATH="${CURRENT_DIR}/BaseDockerfile" +HOST_DOCKERFILE_PATH="${CURRENT_DIR}/Dockerfile" + +SOURCE_DATA_DIR="" +MAP_DATA_DIR="/mnt/dataset/perf/${MODEL}" +SUBMITTER="default" +CONFIG="" + +: "${CLEAR_CACHES:=1}" +SHM_SIZE="32g" + + +# ================================================= +# Parse arguments +# ================================================= + +i=2 +TRAINING_SCRIPT_ARGS="$@" +for arg in "$@" +do + if [[ $arg =~ "--data_dir" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SOURCE_DATA_DIR=${kv[1]} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/$arg/"--data_dir ${MAP_DATA_DIR}"} + else + SOURCE_DATA_DIR=${!i} + TRAINING_SCRIPT_ARGS=${TRAINING_SCRIPT_ARGS/"--data_dir ${!i}"/"--data_dir ${MAP_DATA_DIR}"} + fi + + elif [[ $arg =~ "--name" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + SUBMITTER=${kv[1]} + else + SUBMITTER=${!i} + fi + + elif [[ $arg =~ "--config" ]]; then + if [[ $arg =~ "=" ]]; then + kv=(${arg//=/ }) + CONFIG=${kv[1]} + else + CONFIG=${!i} + fi + fi + + let i++ +done + + +# ================================================= +# Check arguments +# ================================================= + +if [[ "${SOURCE_DATA_DIR}" == "" ]]; then + echo "ERROR: data_dir is not given, please set --data_dir " + exit 1 +fi + +if [[ "${CONFIG}" == "" ]]; then + echo "ERROR: config is not given, please set --config " + exit 1 +fi + +CONTAINER_SUBMITTER_DIR="${WORK_DIR}/${SUBMITTER}" +HOST_SUBMITTER_DIR="${PROJ_DIR}/${SUBMITTER}" + +CONTAINER_ENVIRONMENT_VARIABLES_PATH=${CONTAINER_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh +HOST_ENVIRONMENT_VARIABLES_PATH="${HOST_SUBMITTER_DIR}/${MODEL}/config/environment_variables.sh" + +HOST_SUBMITTER_DOCKERFILE="${PROJ_DIR}/${SUBMITTER}/${MODEL}/config/Dockerfile" +CONTAINER_NAME="perf-${SUBMITTER}-${MODEL}-container" + +if [ ! -f "${HOST_ENVIRONMENT_VARIABLES_PATH}" ]; then + touch "${HOST_ENVIRONMENT_VARIABLES_PATH}" +fi + +source ${HOST_ENVIRONMENT_VARIABLES_PATH} + +RESULTS_DIR="${PROJ_DIR}/${SUBMITTER}/${MODEL}/results" +LOG_FILE_BASE="${RESULTS_DIR}/config_${CONFIG}_experiment" + +echo "======================================" +echo "Arguments" +echo "---------" + +echo "MODEL = ${MODEL}" +echo "CONTAINER_NAME = ${CONTAINER_NAME}" +echo "DOCKER_IMAGE = ${DOCKER_IMAGE}" +echo "MODEL_DIR = ${MODEL_DIR}" +echo "SUBMITTER = ${SUBMITTER}" +echo "CONTAINER_SUBMITTER_DIR = ${CONTAINER_SUBMITTER_DIR}" +echo "HOST_SUBMITTER_DOCKERFILE = ${HOST_SUBMITTER_DOCKERFILE}" +echo "CONFIG = ${CONFIG}" +echo "CONTAINER_MOUNTS = ${CONTAINER_MOUNTS}" +echo "TRAINING_SCRIPT_ARGS = ${TRAINING_SCRIPT_ARGS[*]}" +echo "CURRENT_DIR = ${CURRENT_DIR}" +echo "CONTAINER_ENVIRONMENT_VARIABLES_PATH = ${CONTAINER_ENVIRONMENT_VARIABLES_PATH}" +echo "RESULTS_DIR = ${RESULTS_DIR}" +echo "LOG_FILE_BASE = ${LOG_FILE_BASE}" +echo "SHM_SIZE = ${SHM_SIZE}" +echo "======================================" + + +# ================================================= +# Training +# ================================================= + +# Cleanup container +cleanup_docker() { + docker container rm -f "${CONTAINER_NAME}" || true +} +cleanup_docker +trap 'set -eux; cleanup_docker' EXIT + +# Clean built extension +if [ -d "${BUILD_EXTENSION_DIR}" ]; then + echo "WARN: Delete built extension" + rm -rf "${BUILD_EXTENSION_DIR}" + rm -rf ${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so + echo "extension file: "${CURRENT_DIR}/${BUILD_EXTENSION_PACKAGE_NAME}.*.so"" +fi + + +# Build image +if [ -f "${HOST_DOCKERFILE_PATH}" ]; then + echo "WARN: Remove previous Dockerfile" + rm -f "${HOST_DOCKERFILE_PATH}" +fi + +echo "WARN: cp BaseDockerfile to Dockerfile" +cp "${BASE_DOCKERFILE_PATH}" "${HOST_DOCKERFILE_PATH}" + +if [ -f "${HOST_SUBMITTER_DOCKERFILE}" ]; then + echo "WARN: Found submitter's Dockerfile, merging submitter's Dockerfile to Dockerfile" + cat "${HOST_SUBMITTER_DOCKERFILE}" >> "${HOST_DOCKERFILE_PATH}" +fi + +docker build -t ${DOCKER_IMAGE} ./ + +# Setup container by Dockerfile +docker run --rm --init --detach \ + --net=host --uts=host --ipc=host --security-opt=seccomp=unconfined \ + --privileged=true \ + --ulimit=stack=67108864 --ulimit=memlock=-1 \ + -w ${MODEL_DIR} \ + --shm-size="${SHM_SIZE}" \ + --volume ${SOURCE_DATA_DIR}:${MAP_DATA_DIR} \ + --volume ${PROJ_DIR}:${WORK_DIR} \ + --name="${CONTAINER_NAME}" ${CONTAINER_MOUNTS} \ + "${DOCKER_IMAGE}" sleep infinity + +# make sure container has time to finish initialization +# TODO: Uncomment +#sleep 30 +docker exec -it "${CONTAINER_NAME}" true + +mkdir -p ${RESULTS_DIR} +docker exec -it "${CONTAINER_NAME}" sh -c "chmod 777 run_training.sh" + +# TODO: Remove pip source +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple" + +docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};python3 prepare.py --name ${SUBMITTER} --data_dir ${MAP_DATA_DIR}" + +# Run experiments +for _experiment_index in $(seq 1 "${NEXP}"); do + ( + echo "Beginning trial ${_experiment_index} of ${NEXP}" + echo "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};bash ./run_training.sh ${TRAINING_SCRIPT_ARGS[*]}" + + if [ "${CLEAR_CACHES}" -eq 1 ]; then + sync && sudo /sbin/sysctl vm.drop_caches=3 + fi + + # Run experiment + docker exec -it "${CONTAINER_NAME}" /bin/bash -c "source ${CONTAINER_ENVIRONMENT_VARIABLES_PATH};bash ./run_training.sh ${TRAINING_SCRIPT_ARGS[*]}" + ) |& tee "${LOG_FILE_BASE}_${_experiment_index}.log" +done \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/schedulers/__init__.py b/nlp/language_model/bert_sample/pytorch/base/schedulers/__init__.py new file mode 100644 index 000000000..53a358824 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/schedulers/__init__.py @@ -0,0 +1 @@ +from .factory import create_scheduler \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/schedulers/base.py b/nlp/language_model/bert_sample/pytorch/base/schedulers/base.py new file mode 100644 index 000000000..ad93dea00 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/schedulers/base.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +from torch.optim.optimizer import Optimizer +from torch.optim.lr_scheduler import _LRScheduler + + +class LRScheduler(_LRScheduler): + def __init__(self, optimizer, last_epoch=-1): + # Check if using mixed precision training + self.mixed_training = False + base_optimizer = optimizer + + # Check that optimizer param is valid + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + super(LRScheduler, self).__init__(base_optimizer, last_epoch) + + def step(self, epoch=None): + # Set the current training step + # ('epoch' is used to be consistent with _LRScheduler) + if self.mixed_training: + # The assumption is that the step will be constant + state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]] + if 'step' in state_dict: + self.last_epoch = state_dict['step'] + 1 + else: + self.last_epoch = 1 + else: + self.last_epoch = epoch if epoch is not None else self.last_epoch + 1 + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/schedulers/factory.py b/nlp/language_model/bert_sample/pytorch/base/schedulers/factory.py new file mode 100644 index 000000000..a74a242ba --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/schedulers/factory.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import config + +from .linear_warmup_poly_scheduler import LinearWarmupPolyDecayScheduler +from .linear_warmup_scheduler import LinearWarmUpScheduler + + +def create_scheduler(optimizer, scheduler="poly"): + if config.warmup_proportion == 0: + warmup_steps = config.warmup_steps + warmup_start = config.start_warmup_step + else: + warmup_steps = int(config.max_steps * config.warmup_proportion) + warmup_start = 0 + + if scheduler == "linear": + return LinearWarmUpScheduler(optimizer, warmup_steps, config.max_steps) + + if scheduler == "poly": + return LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=warmup_start, + warmup_steps=warmup_steps, + total_steps=config.max_steps, end_learning_rate=0.0, degree=1.0) + + raise ValueError(f"Not found scheduler {scheduler}.") \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_poly_scheduler.py b/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_poly_scheduler.py new file mode 100644 index 000000000..df9d495cf --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_poly_scheduler.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +from .base import LRScheduler + + +class LinearWarmupPolyDecayScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + def __init__(self, optimizer, start_warmup_steps, warmup_steps, total_steps, end_learning_rate=0.0, degree=1.0, last_epoch=-1): + self.num_warmup_updates = warmup_steps + self.start_warmup_steps = start_warmup_steps + self.total_steps = total_steps + self.end_learning_rate = end_learning_rate + self.degree = degree + super(LinearWarmupPolyDecayScheduler, self).__init__(optimizer, last_epoch) + + param_group = self.optimizer.param_groups[0] + if 'step' in param_group and param_group['step']>0: + self.last_epoch = param_group['step'] + if self.last_epoch <= 0: + self.last_epoch = 0 + + def step(self, epoch=None): + param_group = self.optimizer.param_groups[0] + if 'step' in param_group: + self.last_epoch = param_group['step'] + 1 + else: + self.last_epoch += 1 + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + def get_lr(self): + mod_step = self.last_epoch - self.start_warmup_steps + if mod_step < self.num_warmup_updates: + progress = mod_step / self.num_warmup_updates + return [(base_lr * progress) for base_lr in self.base_lrs] + else: + progress = min(self.last_epoch / self.total_steps, 1.0) + return [(base_lr - self.end_learning_rate) * (1-progress) ** self.degree + self.end_learning_rate + for base_lr in self.base_lrs] diff --git a/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_scheduler.py b/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_scheduler.py new file mode 100644 index 000000000..8e82bf661 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/schedulers/linear_warmup_scheduler.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np + +from .base import LRScheduler + +class LinearWarmUpScheduler(LRScheduler): + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs] \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/setup.py b/nlp/language_model/bert_sample/pytorch/base/setup.py new file mode 100644 index 000000000..867873cc2 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/setup.py @@ -0,0 +1,98 @@ +import glob +import os +import os.path as ospath + +from setuptools import setup, Extension +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +PACKAGE_NAME = "ext_ops" + +SOURCE_FILE_EXT = ["c", "cpp", "cu"] +HEADER_FILE_EXT = ["h", "hpp", "cuh"] + +SUPPORT_EXTENSIONS = SOURCE_FILE_EXT + HEADER_FILE_EXT + + +def get_value_from_environ(name: str, default=None): + if name in os.environ: + return os.environ[name] + if name.upper() in os.environ: + return os.environ[name.upper()] + + return default + + +def check_source_dir(): + cur_dir = os.path.dirname(os.path.abspath(__file__)) + source_dir = os.path.join(os.path.dirname(cur_dir), 'iluvatar/csrc') + return source_dir + + +def find_source_files() -> dict: + source_dir = check_source_dir() + + if not ospath.exists(source_dir): + return dict() + + # Search source files + sources = dict() + for ext in SOURCE_FILE_EXT: + sources[ext] = glob.glob(ospath.join(source_dir, "**", f"*.{ext}"), recursive=True) + + return sources + + +def find_include_dirs() -> list: + source_dir = check_source_dir() + if not ospath.exists(source_dir): + return [] + return glob.glob(ospath.join(source_dir, "**", "include"), recursive=True) + + +def get_nvcc_arguments() -> list: + return [ + '-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__', + "-ftemplate-depth=1024", + ] + + +source_files = find_source_files() +include_dirs = find_include_dirs() +c_sources = source_files.pop("c") +other_sources = [] +for _sources in source_files.values(): + other_sources.extend(_sources) + +nvcc_arguments = get_nvcc_arguments() + +ext_modules = [] + +if len(c_sources) != 0: + ext_modules.append(Extension( + name=PACKAGE_NAME, + sources=c_sources, + include_dirs=include_dirs, + extra_compile_args={ + 'c': ['-O3'] + } + )) + +if len(other_sources) != 0: + ext_modules.append(CUDAExtension( + name=PACKAGE_NAME, + sources=other_sources, + extra_compile_args={ + 'cxx': ['-O3', ], + 'nvcc': ['-O3'] + nvcc_arguments + } + )) + +setup( + name=PACKAGE_NAME, + version="0.1", + ext_modules=ext_modules, + cmdclass={ + 'build_ext': BuildExtension + } +) diff --git a/nlp/language_model/bert_sample/pytorch/base/train/__init__.py b/nlp/language_model/bert_sample/pytorch/base/train/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nlp/language_model/bert_sample/pytorch/base/train/evaluator.py b/nlp/language_model/bert_sample/pytorch/base/train/evaluator.py new file mode 100644 index 000000000..f4da8d5f9 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/evaluator.py @@ -0,0 +1,99 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from concurrent.futures import ProcessPoolExecutor + +import torch +import torch.distributed as dist + +from dataloaders.dataloader import WorkerInitializer, create_eval_dataloader + +import config + +class Evaluator: + + def __init__(self, eval_dir: str, + proc_pool: ProcessPoolExecutor, + global_batch_size: int, + max_steps: int, + worker_init: WorkerInitializer=None, + use_cache: bool=False): + self.eval_dir = eval_dir + self.proc_pool = proc_pool + self.use_cache = use_cache + + if worker_init is None: + worker_init = WorkerInitializer.default() + self.worker_init = worker_init + + self.eval_count = 0 + self.cached_batches = [] + + self.need_next_training_shard = global_batch_size * max_steps > 10000 + self._dataloader = None + self.fetch_dataloader() + + def fetch_dataloader(self): + if self._dataloader is None: + if self.need_next_training_shard: + if self.eval_count == 0: + self.eval_dataset_future = self.proc_pool.submit( + create_eval_dataloader, + config.eval_dir, config.eval_batch_size, config.max_predictions_per_seq, + config.num_eval_examples, self.worker_init + ) + else: + self._dataloader = self.eval_dataset_future.result(timeout=None) + else: + self._dataloader = create_eval_dataloader( + config.eval_dir, config.eval_batch_size, config.max_predictions_per_seq, + config.num_eval_examples, self.worker_init + ) + + return self._dataloader + + def evaluate(self, trainer): + self.eval_count += 1 + + eval_dataloader = self.fetch_dataloader() + + trainer.model.eval() + + total_eval_loss, total_eval_mlm_acc = 0.0, 0.0 + total_masked = 0 + + # on first eval, load and cache data on GPU + if self.eval_count == 1 and self.use_cache: + for batch in eval_dataloader: + self.cached_batches.append([t.to(trainer.device) for t in batch]) + + with torch.no_grad(): + for batch in self.cached_batches if self.use_cache else eval_dataloader: + if not self.use_cache: + batch = [t.to(trainer.device) for t in batch] + loss, mlm_acc, num_masked = trainer.inference(batch) + total_eval_loss += loss * num_masked + total_eval_mlm_acc += mlm_acc * num_masked + total_masked += num_masked + torch.cuda.synchronize() + trainer.model.train() + + if torch.distributed.is_initialized(): + # Collect total scores from all ranks + torch.distributed.all_reduce(total_eval_mlm_acc, op=torch.distributed.ReduceOp.SUM) + torch.distributed.all_reduce(total_eval_loss, op=torch.distributed.ReduceOp.SUM) + torch.distributed.all_reduce(total_masked, op=torch.distributed.ReduceOp.SUM) + + # Average by number of examples + total_eval_mlm_acc /= total_masked + total_eval_loss /= total_masked + + return total_eval_loss.item(), total_eval_mlm_acc.item() + diff --git a/nlp/language_model/bert_sample/pytorch/base/train/event/__init__.py b/nlp/language_model/bert_sample/pytorch/base/train/event/__init__.py new file mode 100644 index 000000000..2cd8c9e60 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/event/__init__.py @@ -0,0 +1,4 @@ +from .base import BaseTrainingEventInterface +from .base_adapter import BaseTrainingEventAdapter +from .compose import TrainingEventCompose +from .log import TrainingLogger \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/train/event/base.py b/nlp/language_model/bert_sample/pytorch/base/train/event/base.py new file mode 100644 index 000000000..0e5075203 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/event/base.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from typing import Tuple, List + +import torch.nn +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.optim import Optimizer + +BERT_MODEL = torch.nn.Module +BatchType = Tuple[Tensor, Tensor, Tensor, Tensor, Tensor] + +class BaseTrainingEventInterface: + + def __init__(self, config): + self.config = config + + def convert_model(self, model: BERT_MODEL) -> BERT_MODEL: + return model + + def create_optimizer(self, model: BERT_MODEL) -> Optimizer: + raise NotImplementedError() + + def model_to_fp16(self, model: BERT_MODEL, optimizer: Optimizer) -> Tuple[BERT_MODEL, Optimizer]: + return model, optimizer + + def model_to_ddp(self, model: BERT_MODEL) -> BERT_MODEL: + return model + + def on_init_start(self): + pass + + def on_init_end(self): + pass + + def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): + pass + + def on_train_begin(self): + pass + + def on_train_end(self): + pass + + def on_epoch_begin(self, epoch: int): + pass + + def on_epoch_end(self, epoch: int): + pass + + def on_step_begin(self, step: int): + pass + + def on_step_end(self, step: int): + pass + + diff --git a/nlp/language_model/bert_sample/pytorch/base/train/event/base_adapter.py b/nlp/language_model/bert_sample/pytorch/base/train/event/base_adapter.py new file mode 100644 index 000000000..3d549ff00 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/event/base_adapter.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from torch.optim import Optimizer + +from .base import BaseTrainingEventInterface + + +class BaseTrainingEventMix: + + def launch(self): + pass + + def create_optimizer(self, optimizer: Optimizer): + pass + + def on_init_evaluate(self, result: dict): + pass + + def on_evaluate(self, result: dict): + pass + + def on_step_end(self, step: int, result: dict = None): + pass + + +class BaseTrainingEventAdapter(BaseTrainingEventMix, BaseTrainingEventInterface): + pass + + + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/train/event/compose.py b/nlp/language_model/bert_sample/pytorch/base/train/event/compose.py new file mode 100644 index 000000000..0aceb4350 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/event/compose.py @@ -0,0 +1,110 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from typing import List, Union, Callable, Tuple + +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.optim import Optimizer + +from .base import BaseTrainingEventInterface as TrainingEventInterface, BERT_MODEL +from .base_adapter import BaseTrainingEventMix, BaseTrainingEventAdapter + + +class TrainingEventCompose(BaseTrainingEventAdapter): + + def __init__(self, interface: TrainingEventInterface, events: List[BaseTrainingEventAdapter]): + super(TrainingEventCompose, self).__init__(interface.config) + + self.interface = interface + self.events = events + + def launch(self): + self._call_events_func(self.launch, with_interface=False) + + def convert_model(self, model: BERT_MODEL) -> BERT_MODEL: + model = self.interface.convert_model(model) + self._call_events_func(self.convert_model, with_interface=False, model=model) + return model + + def create_optimizer(self, model: BERT_MODEL) -> Optimizer: + optimizer = self.interface.create_optimizer(model) + self._call_events_func(self.create_optimizer, with_interface=False, optimizer=optimizer) + return optimizer + + def model_to_fp16(self, model: BERT_MODEL, optimizer: Optimizer) -> Tuple[BERT_MODEL, Optimizer]: + model, optimizer = self.interface.model_to_fp16(model, optimizer) + self._call_events_func(self.model_to_fp16, with_interface=False, model=model, optimizer=optimizer) + return model, optimizer + + def model_to_ddp(self, model: BERT_MODEL) -> BERT_MODEL: + model = self.interface.model_to_ddp(model) + self._call_events_func(self.model_to_ddp, with_interface=False, model=model) + return model + + def on_init_evaluate(self, result: dict): + self._call_events_func(self.on_init_evaluate, with_interface=False, result=result) + + def on_evaluate(self, result: dict): + self._call_events_func(self.on_evaluate, with_interface=False, result=result) + + def on_init_start(self): + self._call_events_func(self.on_init_start, with_interface=True) + + def on_init_end(self): + self._call_events_func(self.on_init_end, with_interface=True) + + def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler = None): + return self.interface.on_backward(step, loss, optimizer, grad_scaler) + + def on_train_begin(self): + self._call_events_func(self.on_train_begin, with_interface=True) + + def on_train_end(self): + self._call_events_func(self.on_train_end, with_interface=True) + + def on_epoch_begin(self, epoch: int): + self._call_events_func(self.on_epoch_begin, with_interface=True, epoch=epoch) + + def on_epoch_end(self, epoch: int): + self._call_events_func(self.on_epoch_end, with_interface=True, epoch=epoch) + + def on_step_begin(self, step: int): + self._call_events_func(self.on_step_begin, with_interface=True, step=step) + + def on_step_end(self, step: int, result: dict = None): + self.interface.on_step_end(step) + self._call_events_func(self.on_step_end, with_interface=False, step=step, result=result) + + def _call_events_func(self, func: Union[str, Callable], with_interface=False, *args, **kwargs): + func_name = self._get_func_name(func) + events = self.events + if with_interface: + events = [self.interface] + events + + result = [] + for event in events: + ret = None + if hasattr(event, func_name): + ret = getattr(event, func_name)(*args, **kwargs) + result.append(ret) + return result + + def _get_func_name(self, func: Union[str, Callable]): + if isinstance(func, str): + return func + + if callable(func): + return func.__name__ + + return None + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/train/event/log.py b/nlp/language_model/bert_sample/pytorch/base/train/event/log.py new file mode 100644 index 000000000..c9c107429 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/event/log.py @@ -0,0 +1,155 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import copy +import inspect +import os +import os.path as ospath +from typing import Tuple, Union, Iterable + +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.optim import Optimizer + +from config.config_manager import get_properties_from_config +from utils.logging import PerfLogger, LogEvent, PerfLogLevel +from .base import BERT_MODEL +from .base_adapter import BaseTrainingEventAdapter + + +STACKLEVEL = 4 + + +class TrainingLogger(BaseTrainingEventAdapter): + + def __init__(self, config, logger: PerfLogger=None, log_freq: int = 0): + super(TrainingLogger, self).__init__(config) + self.config = config + self.log_freq = log_freq + level = PerfLogLevel.INFO if log_freq > 0 else PerfLogLevel.SUBMITTION + if logger is None: + logger = PerfLogger.get_default_logger(rank=config.local_rank, level=level) + self.logger = logger + + self.model = None + self.submitter = None + + def launch(self): + self.logger.log(LogEvent.launch_training, "Launch training", stacklevel=STACKLEVEL) + config_path: str = self.config.config + config_dict = get_properties_from_config(self.config) + for key, value in config_dict.items(): + if type(value) not in [int, float, str, bool] and not isinstance(value, Iterable): + config_dict[key] = str(value) + + # Extract definition of training event + try: + training_event_class = self.config.training_event + if not inspect.isclass(training_event_class): + training_event_class = training_event_class.__class__ + training_event_class_define = inspect.getabsfile(training_event_class) + training_event_class_define = training_event_class_define.rsplit(".py", maxsplit=1)[0] + training_event_class_define += ":" + training_event_class.__name__ + except: + training_event_class_define = str(self.config.training_event) + config_dict['training_event'] = training_event_class_define + + # Like /path/to/proj/submitter/model/config/config_xxx.py + if config_path.startswith("."): + config_path = ospath.abspath(config_path) + + config_path_nodes = config_path.rsplit(sep="/", maxsplit=4) + submitter = config_path_nodes[1] + model = config_path_nodes[2] + self.logger.init_logger(submitter=submitter, + model=model, + config_path=config_path, + config=config_dict, + stacklevel=STACKLEVEL) + + self.model = model + self.submitter = submitter + + def convert_model(self, model: BERT_MODEL): + model_class = type(model) + model_info = dict( + type = model_class.__name__, + module = model_class.__module__ if hasattr(model_class, "__module__") else "None" + ) + self._log_event(LogEvent.convert_model, model_info) + + def create_optimizer(self, optimizer: Optimizer): + optimizer_class = type(optimizer) + optimizer_info = dict( + type=optimizer_class.__name__, + module=optimizer_class.__module__ if hasattr(optimizer_class, "__module__") else "None" + ) + self._log_event(LogEvent.create_optimizer, optimizer_info) + + def model_to_fp16(self, model: BERT_MODEL, optimizer: Optimizer): + fp16_info = dict( + fp16 = self.config.fp16 if hasattr(self.config, "fp16") else False + ) + self._log_event(LogEvent.model_to_fp16, fp16_info) + + def model_to_ddp(self, model: BERT_MODEL): + model_class = type(model) + model_info = dict( + type=model_class.__name__, + module=model_class.__module__ if hasattr(model_class, "__module__") else None + ) + self._log_event(LogEvent.model_to_ddp, model_info) + + def on_init_evaluate(self, result: dict): + self._log_event(LogEvent.init_evaluation, result) + + def on_evaluate(self, result: dict): + self._log_event(LogEvent.evaluation, result) + + def on_init_start(self): + self._log_event(LogEvent.init_start) + + def on_init_end(self): + self._log_event(LogEvent.init_end, "Finish initialization") + + def on_backward(self, step: int, loss: Tensor, optimizer: Optimizer, grad_scaler: GradScaler=None): + pass + + def on_train_begin(self): + self._log_event(LogEvent.train_begin) + + def on_train_end(self): + self._log_event(LogEvent.train_end) + + def on_epoch_begin(self, epoch: int): + epoch_info = dict(epoch=epoch) + self._log_event(LogEvent.epoch_begin, epoch_info) + + def on_epoch_end(self, epoch: int): + epoch_info = dict(epoch=epoch) + self._log_event(LogEvent.epoch_end, epoch_info) + + def on_step_begin(self, step: int): + pass + + def on_step_end(self, step: int, result: dict=None): + if (self.log_freq <= 0 or step % self.log_freq != 0) and step != 1: + return + if result is None: + step_info = dict() + else: + step_info = copy.copy(result) + + step_info['step'] = step + self._log_event(LogEvent.step_end, step_info) + + def _log_event(self, event, *args, **kwargs): + self.logger.log(event, stacklevel=STACKLEVEL, *args, **kwargs) + diff --git a/nlp/language_model/bert_sample/pytorch/base/train/trainer.py b/nlp/language_model/bert_sample/pytorch/base/train/trainer.py new file mode 100644 index 000000000..4384c23c5 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/trainer.py @@ -0,0 +1,222 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + +import numpy as np +import math +import time +import os +import sys + +import torch +from torch.cuda.amp import GradScaler +from torch.types import Device + +import config +import utils +from dataloaders.dataset import exchange_padding_fast +from model import create_model +from schedulers import create_scheduler +from train.evaluator import Evaluator +from train.training_state import TrainingState +from train.event import TrainingEventCompose as TrainingEvent +from utils.checkpoint import remap_segmented_model_parameters + + +class Trainer(): + + def __init__(self, training_event: TrainingEvent, + evaluator: Evaluator, + training_state: TrainingState, + grad_scaler: GradScaler, + device: Device): + super(Trainer, self).__init__() + self.training_event = training_event + self.training_state = training_state + self.grad_scaler = grad_scaler + + self.device = device + self.optimizer = None + self.bert_config = None + self.model = None + self.evaluator = evaluator + self.lr_scheduler = None + self.init_epoch = 0 + self.init_dataloader_idx = 0 + + def init(self): + self.bert_config, self.model = create_model(config) + self.model = self._init_model(self.model, self.device) + self.model = self.training_event.convert_model(self.model) + self.optimizer = self.training_event.create_optimizer(self.model) + self.model, self.optimizer = self.training_event.model_to_fp16(self.model, self.optimizer) + self.model = self.training_event.model_to_ddp(self.model) + self.lr_scheduler = create_scheduler(self.optimizer) + self.load() + + def _init_model(self, model, device): + checkpoint = torch.load(config.init_checkpoint, map_location="cpu") + if self._is_resume_checkpoint(checkpoint): + if "global_steps" in checkpoint['state'] and checkpoint['state']['global_steps'] > 0: + config.learning_rate = checkpoint['state']['learning_rate'] + else: + if "model" in checkpoint: + checkpoint = checkpoint["model"] + checkpoint_remapped = remap_segmented_model_parameters(checkpoint) + model.load_state_dict(checkpoint_remapped, strict=True) + model = model.to(device) + return model + + def _is_resume_checkpoint(self, checkpoint): + return "optimizer" in checkpoint + + def load(self): + checkpoint = torch.load(config.init_checkpoint, map_location="cpu") + + if not self._is_resume_checkpoint(checkpoint): + return + + model_ckpt = checkpoint['model'] + if config.distributed_lamb: + self.model.load_state_dict(model_ckpt,strict=True) + else: + self.model.module.load_state_dict(model_ckpt,strict=True) + + if 'global_steps' in checkpoint['state'] and checkpoint['state']['global_steps'] > 0: + # restore optimizer + optimizer_ckpt = checkpoint['optimizer'] + self.optimizer.load_state_dict(optimizer_ckpt) + + # restore epoch, dataloader_idx + self.init_epoch = checkpoint['state']['epoch'] + self.init_dataloader_idx = checkpoint['state']['iter_dataloader_idx'] + + self.training_state.global_steps = checkpoint['state']['global_steps'] + + + def save(self): + model_dict = self.model.state_dict() + optimizer_dict = self.optimizer.state_dict() + state_dict = self.training_state.to_dict() + + save_dict = {'model':model_dict,'optimizer':optimizer_dict,'state': state_dict} + + save_file = os.path.join(config.output_dir,f'model.ckpt-{self.training_state.global_steps}.pt') + + utils.main_proc_print(f"save for steps:{self.training_state.global_steps}") + + if utils.get_rank() == 0 or utils.get_rank() == -1: + if not os.path.isdir(config.output_dir): + os.makedirs(config.output_dir) + torch.save(save_dict,save_file) + + def train_one_epoch(self, dataloader): + state = self.training_state + training_event = self.training_event + + # restore epoch + if state.epoch < self.init_epoch: + return + + training_event.on_epoch_begin(state.epoch) + + step_start_time = time.time() + for dataloader_idx, batch_idx, batch in dataloader.iter_batchs(): + + # restore dataloader + if state.epoch == self.init_epoch and dataloader_idx <= self.init_dataloader_idx: + continue + + state.num_trained_samples = state.global_steps * utils.global_batch_size(config) + + state.global_steps += 1 + state.iter_dataloader_idx = dataloader_idx + self.training_event.on_step_begin(state.global_steps) + self.train_one_step(batch_idx, batch) + + other_state = dict() + if state.global_steps % config.gradient_accumulation_steps == 0: + step_end_time = time.time() + step_total_time = step_end_time - step_start_time + step_start_time = step_end_time + sequences_per_second = (utils.global_batch_size(config) * config.gradient_accumulation_steps) / step_total_time + other_state["seq/s"] = sequences_per_second + + eval_result = None + # if self.can_do_eval(state): + if state.global_steps > 0 and state.global_steps % config.eval_steps == 0: + eval_start = time.time() + state.eval_loss, state.eval_mlm_accuracy = self.evaluator.evaluate(self) + eval_end = time.time() + eval_result = dict(global_steps=state.global_steps, + eval_loss=state.eval_loss, + eval_mlm_accuracy=state.eval_mlm_accuracy, + time=eval_end - eval_start) + if config.save_checkpoint: + self.save() + + end_training = self.detect_training_status(state) + + step_info = state.to_dict(**other_state) + + self.training_event.on_step_end(state.global_steps, result=step_info) + + if eval_result is not None: + self.training_event.on_evaluate(eval_result) + + if end_training: + break + + training_event.on_epoch_end(state.epoch) + + def train_one_step(self, batch_idx, batch): + if config.exchange_padding == True: + batch = [t.to(self.device, non_blocking=True, dtype=torch.int16) for t in batch] + batch = exchange_padding_fast(self.device, config.train_batch_size, *batch) + else: + batch = [t.to(self.device, non_blocking=True) for t in batch] + + state = self.training_state + + self.model.train() + state.loss, state.mlm_acc, _ = self.forward(batch) + if not np.isfinite(state.loss.item()): + print("Loss is {}, stopping training".format(state.loss.item())) + sys.exit(1) + self.training_event.on_backward(state.global_steps, state.loss, self.optimizer, self.grad_scaler) + self.lr_scheduler.step() + + def detect_training_status(self, state: TrainingState): + if state.eval_mlm_accuracy >= config.target_mlm_accuracy: + state.converged_success() + + if state.global_steps > config.max_steps or state.num_trained_samples > config.max_samples_termination: + state.end_training = True + + return state.end_training + + def can_do_eval(self, state: TrainingState): + do_eval = all([ + config.eval_dir is not None, + state.num_trained_samples >= config.eval_iter_start_samples, + state.global_steps % math.ceil(config.eval_interval_samples / utils.global_batch_size(config)) == 0, + config.eval_interval_samples > 0, + state.global_steps > 1, + ]) + + return do_eval or state.global_steps >= config.max_steps + + def forward(self, batch): + input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch + loss, mlm_acc, num_valid = self.model(input_ids, segment_ids, input_mask, + masked_lm_labels, next_sentence_labels) + return loss, mlm_acc, num_valid + + def inference(self, batch): + self.model.eval() + return self.forward(batch) diff --git a/nlp/language_model/bert_sample/pytorch/base/train/training_state.py b/nlp/language_model/bert_sample/pytorch/base/train/training_state.py new file mode 100644 index 000000000..ccfac960b --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/train/training_state.py @@ -0,0 +1,73 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +from dataclasses import dataclass + +import torch +import utils + + +@dataclass +class TrainingState: + _trainer = None + _status = 'aborted' # later set to 'success' if termination criteria met + + global_steps = 0 + skipped_steps = 0 + iter_dataloader_idx = 0 + + loss: float = 0.0 + mlm_acc: float = 0.0 + + epoch: int = 1 + num_trained_samples = 0 + end_training: bool = False + converged: bool = False + + eval_loss = 0 + eval_mlm_accuracy = 0 + + init_time = 0 + raw_train_time = 0 + + def status(self): + if self.converged: + self._status = "success" + return self._status + + def converged_success(self): + self.end_training = True + self.converged = True + + def to_dict(self, **kwargs): + state_dict = dict() + + for var_name, value in self.__dict__.items(): + if not var_name.startswith("_") and utils.is_property(value): + state_dict[var_name] = value + + lr = self._trainer.lr_scheduler.get_lr() + if isinstance(lr, (tuple, list)): + lr = lr[0] + state_dict["learning_rate"] = lr + + exclude = ["eval_loss", "eval_mlm_accuracy", "skipped_steps", + "converged", "init_time", "raw_train_time"] + for exkey in exclude: + if exkey in state_dict: + state_dict.pop(exkey) + + state_dict.update(kwargs) + + for k in state_dict.keys(): + if torch.is_tensor(state_dict[k]): + state_dict[k] = state_dict[k].item() + + return state_dict \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/__init__.py b/nlp/language_model/bert_sample/pytorch/base/utils/__init__.py new file mode 100644 index 000000000..9c0ed2c68 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/__init__.py @@ -0,0 +1,17 @@ +import inspect + +from .check import check_config +from .dist import * + +def is_property(value): + status = [ + not callable(value), + not inspect.isclass(value), + not inspect.ismodule(value), + not inspect.ismethod(value), + not inspect.isfunction(value), + not inspect.isbuiltin(value), + "classmethod object" not in str(value) + ] + + return all(status) \ No newline at end of file diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/check.py b/nlp/language_model/bert_sample/pytorch/base/utils/check.py new file mode 100644 index 000000000..41cad658e --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/check.py @@ -0,0 +1,94 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import os +import os.path as ospath +from .dist import global_batch_size + + +def get_config_arg(config, name): + if hasattr(config, name): + value = getattr(config, name) + if value is not None: + return value + + if name in os.environ: + return os.environ[name] + + return None + + +def check_config(config): + print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( + config.device, config.n_gpu, config.local_rank != -1, config.fp16)) + + train_dir = get_config_arg(config, "train_dir") + + data_dir = get_config_arg(config, "data_dir") + config.data_dir = data_dir + if data_dir is None and train_dir is None: + raise ValueError("Invalid data_dir and train_dir, should be given a path.") + + if train_dir is None: + config.train_dir = ospath.join(data_dir, "2048_shards_uncompressed") + + init_checkpoint = get_config_arg(config, "init_checkpoint") + config.init_checkpoint = init_checkpoint + if init_checkpoint is None: + if data_dir is None: + raise ValueError("Invalid init_checkpoint and data_dir, should be given a path.") + config.init_checkpoint = ospath.join(data_dir, "model.ckpt-28252.pt") + + bert_config_path = get_config_arg(config, "bert_config_path") + config.bert_config_path = bert_config_path + if bert_config_path is None: + if data_dir is None: + raise ValueError("Invalid bert_config_path and data_dir, should be given a path.") + config.bert_config_path = ospath.join(data_dir, "bert_config.json") + + eval_dir = get_config_arg(config, "eval_dir") + config.eval_dir = eval_dir + if eval_dir is None: + if data_dir is None: + raise ValueError("Invalid eval_dir and data_dir, should be given a path.") + config.eval_dir = ospath.join(data_dir, "eval_set_uncompressed") + + if config.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + config.gradient_accumulation_steps)) + # if args.train_batch_size % args.gradient_accumulation_steps != 0: + # raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format( + # args.gradient_accumulation_steps, args.train_batch_size)) + # + # args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + if config.eval_interval_samples == 0: + eval_interval_samples = (0.05 * (230.23 * global_batch_size(config) + 3000000)) / 25000 + eval_interval_samples = int(eval_interval_samples) * 25000 + config.eval_interval_samples = eval_interval_samples + + # if not (config.do_train or (config.eval_dir and config.eval_iter_samples <= 0)): + # raise ValueError(" `do_train` or should be in offline eval mode") + + # if not config.resume_from_checkpoint or not os.path.exists(config.output_dir): + # os.makedirs(config.output_dir, exist_ok=True) + + + + + + + + + + + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/checkpoint.py b/nlp/language_model/bert_sample/pytorch/base/utils/checkpoint.py new file mode 100644 index 000000000..7926f529d --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/checkpoint.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Returns true only if resuming from a checkpoint found in output_dir. +# init_checkpoint and init_tf_checkpoint are not considered +import glob +import os +from collections import OrderedDict + + +def found_resume_checkpoint(args): + if args.phase2: + checkpoint_str = "phase2_ckpt*.pt" + else: + checkpoint_str = "phase1_ckpt*.pt" + return args.resume_from_checkpoint and (args.resume_init_checkpoint is not None or len( + glob.glob(os.path.join(args.output_dir, checkpoint_str)))) > 0 + + +def remap_attn_parameters(model_dict): + res_dict = OrderedDict() + for k in model_dict: + if 'attention' in k: + if 'self.query.weight' in k: + new_k = k.replace('self.query.weight', 'multi_head_attention.q_weight') + elif 'self.key.weight' in k: + new_k = k.replace('self.key.weight', 'multi_head_attention.k_weight') + elif 'self.value.weight' in k: + new_k = k.replace('self.value.weight', 'multi_head_attention.v_weight') + elif 'self.query.bias' in k: + new_k = k.replace('self.query.bias', 'multi_head_attention.q_bias') + elif 'self.key.bias' in k: + new_k = k.replace('self.key.bias', 'multi_head_attention.k_bias') + elif 'self.value.bias' in k: + new_k = k.replace('self.value.bias', 'multi_head_attention.v_bias') + elif 'output.dense.weight' in k: + new_k = k.replace('output.dense.weight', 'multi_head_attention.out_proj_weight') + elif 'output.dense.bias' in k: + new_k = k.replace('output.dense.bias', 'multi_head_attention.out_proj_bias') + elif 'output.LayerNorm.weight' in k: + new_k = k.replace('output.LayerNorm.weight', 'layer_norm.weight') + elif 'output.LayerNorm.bias' in k: + new_k = k.replace('output.LayerNorm.bias', 'layer_norm.bias') + else: + new_k = k + else: + new_k = k + res_dict[new_k] = model_dict[k] + model_dict.clear() + return res_dict + + +def remap_segmented_model_parameters(model_dict): + res_dict = OrderedDict() + for k in model_dict: + if 'bert' in k: + new_k = 'bert_model_segment.' + k + elif 'cls' in k: + new_k = 'heads_only_segment.' + k + else: + assert False, "shouldn't happen" + res_dict[new_k] = model_dict[k] + model_dict.clear() + return res_dict diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/dist.py b/nlp/language_model/bert_sample/pytorch/base/utils/dist.py new file mode 100644 index 000000000..99744372e --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/dist.py @@ -0,0 +1,202 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import torch +import torch.distributed as dist + +from contextlib import contextmanager +import logging.config +import random + + +def generate_seeds(rng, size): + """ + Generate list of random seeds + + :param rng: random number generator + :param size: length of the returned list + """ + seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)] + return seeds + + +def broadcast_seeds(seeds, device): + """ + Broadcasts random seeds to all distributed workers. + Returns list of random seeds (broadcasted from workers with rank 0). + + :param seeds: list of seeds (integers) + :param device: torch.device + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + seeds_tensor = torch.LongTensor(seeds).to(device) + torch.distributed.broadcast(seeds_tensor, 0) + seeds = seeds_tensor.tolist() + return seeds + + +def setup_seeds(master_seed, epochs, device): + """ + Generates seeds from one master_seed. + Function returns (worker_seeds, shuffling_seeds), worker_seeds are later + used to initialize per-worker random number generators (mostly for + dropouts), shuffling_seeds are for RNGs resposible for reshuffling the + dataset before each epoch. + Seeds are generated on worker with rank 0 and broadcasted to all other + workers. + + :param master_seed: master RNG seed used to initialize other generators + :param epochs: number of epochs + :param device: torch.device (used for distributed.broadcast) + """ + if master_seed is None: + # random master seed, random.SystemRandom() uses /dev/urandom on Unix + master_seed = random.SystemRandom().randint(0, 2**32 - 1) + if get_rank() == 0: + # master seed is reported only from rank=0 worker, it's to avoid + # confusion, seeds from rank=0 are later broadcasted to other + # workers + logging.info(f'Using random master seed: {master_seed}') + else: + # master seed was specified from command line + logging.info(f'Using master seed from command line: {master_seed}') + + # initialize seeding RNG + seeding_rng = random.Random(master_seed) + + # generate worker seeds, one seed for every distributed worker + worker_seeds = generate_seeds(seeding_rng, get_world_size()) + + # generate seeds for data shuffling, one seed for every epoch + shuffling_seeds = generate_seeds(seeding_rng, epochs) + + # broadcast seeds from rank=0 to other workers + worker_seeds = broadcast_seeds(worker_seeds, device) + shuffling_seeds = broadcast_seeds(shuffling_seeds, device) + return worker_seeds, shuffling_seeds + + +def barrier(): + """ + Works as a temporary distributed barrier, currently pytorch + doesn't implement barrier for NCCL backend. + Calls all_reduce on dummy tensor and synchronizes with GPU. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + torch.distributed.all_reduce(torch.cuda.FloatTensor(1)) + torch.cuda.synchronize() + + +def get_rank(default=0): + """ + Gets distributed rank or returns zero if distributed is not initialized. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + rank = torch.distributed.get_rank() + else: + rank = default + return rank + + +def get_world_size(): + """ + Gets total number of distributed workers or returns one if distributed is + not initialized. + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + world_size = torch.distributed.get_world_size() + else: + world_size = 1 + return world_size + + +def main_proc_print(*args, **kwargs): + if is_main_process(): + print(*args, **kwargs) + + +def set_device(cuda, local_rank): + """ + Sets device based on local_rank and returns instance of torch.device. + + :param cuda: if True: use cuda + :param local_rank: local rank of the worker + """ + if cuda: + torch.cuda.set_device(local_rank) + device = torch.device('cuda') + else: + device = torch.device('cpu') + return device + + +def init_dist_training_env(config): + if config.local_rank == -1: + device = torch.device("cuda") + num_gpus = torch.cuda.device_count() + else: + torch.cuda.set_device(config.local_rank) + device = torch.device("cuda", config.local_rank) + host_addr_full = 'tcp://' + os.environ["MASTER_ADDR"] + ':' + os.environ["MASTER_PORT"] + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + + dist_backend = config.dist_backend + DIST_BACKEND_ENV = "PT_DIST_BACKEND" + if DIST_BACKEND_ENV in os.environ: + print("WARN: Use the distributed backend of the environment.") + dist_backend = os.environ[DIST_BACKEND_ENV] + + torch.distributed.init_process_group(backend=dist_backend, init_method=host_addr_full, rank=rank, world_size=world_size) + num_gpus = torch.distributed.get_world_size() + + return device, num_gpus + + +def global_batch_size(config): + return config.train_batch_size * config.n_gpu + + +@contextmanager +def sync_workers(): + """ + Yields distributed rank and synchronizes all workers on exit. + """ + rank = get_rank() + yield rank + barrier() + + +def is_main_process(): + if dist.is_initialized(): + if "LOCAL_RANK" in os.environ: + return int(os.environ["LOCAL_RANK"]) == 0 + else: + return get_rank() == 0 + + return True + + +def format_step(step): + if isinstance(step, str): + return step + s = "" + if len(step) > 0: + s += "Training Epoch: {} ".format(step[0]) + if len(step) > 1: + s += "Training Iteration: {} ".format(step[1]) + if len(step) > 2: + s += "Validation Iteration: {} ".format(step[2]) + return s diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/logging.py b/nlp/language_model/bert_sample/pytorch/base/utils/logging.py new file mode 100644 index 000000000..d38d5c6cb --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/logging.py @@ -0,0 +1,245 @@ +# Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# Copyright Declaration: This software, including all of its code and documentation, +# except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# CoreX. No user of this software shall have any right, ownership or interest in this software and +# any use of this software shall be in compliance with the terms and conditions of the End User +# License Agreement. + + +import os +import sys +import time +import logging +import json +from logging import currentframe +from typing import NamedTuple, Union, Tuple, Optional +from collections import OrderedDict + +from enum import IntEnum + + +_srcfile = os.path.normcase(logging.addLevelName.__code__.co_filename) + + +class LogKeys: + default_logger_name = "PerfLogger" + + # Log format + log_header = "PerfLog" + log_template = "[{header}] {message}" + + # Submitted info + submmiter: str = "submmiter" + model: str = "model" + optimizer_type: str = "optimizer_type" + config: str = "config" + config_path: str = "config_path" + + # Event + event: str = "event" + value: str = "value" + + # Metadata + metadata: str = "metadata" + called_log_file = "file" + called_log_file_lineno = "lineno" + time_ms = "time_ms" + rank = "rank" + + # Other message + other_message: str = "other" + + +class PerfLogLevel(IntEnum): + + INFO = 100 + SUBMITTION = 101 + + @staticmethod + def from_string(level: str): + return PerfLogLevel.__dict__[level.upper()] + + @classmethod + def register_to_logging(cls, logging): + for level_name, level in PerfLogLevel.__dict__.items(): + if isinstance(level, cls): + logging.addLevelName(level.value, level_name) + + +PerfLogLevel.register_to_logging(logging) + + +class LogEventField(NamedTuple): + + name: str + rank: Union[int, list] = -1 + level: PerfLogLevel = PerfLogLevel.SUBMITTION + + +class LogEvent: + + submitted_info = LogEventField("SUBMITTED_INFO", rank=0) + launch_training = LogEventField("LAUNCH_TRAINING") + convert_model = LogEventField("CONVERT_MODEL", rank=0) + create_optimizer = LogEventField("CREATE_OPTIMIZER", rank=0) + model_to_fp16 = LogEventField("MODEL_TO_FP16", rank=0) + model_to_ddp = LogEventField("MODEL_TO_DDP", rank=0) + init_start = LogEventField("INIT_START", rank=0) + init_end = LogEventField("INIT_END", rank=0) + train_begin = LogEventField("TRAIN_BEGIN", rank=0) + train_end = LogEventField("TRAIN_END", rank=0) + epoch_begin = LogEventField("EPOCH_BEGIN", rank=0, level=PerfLogLevel.INFO) + epoch_end = LogEventField("EPOCH_END", rank=0, level=PerfLogLevel.INFO) + step_begin = LogEventField("STEP_BEGIN", rank=0, level=PerfLogLevel.INFO) + step_end = LogEventField("STEP_END", rank=0, level=PerfLogLevel.INFO) + init_evaluation = LogEventField("INIT_EVALUATION", rank=0) + evaluation = LogEventField("EVALUATION", rank=0) + finished = LogEventField("FINISHED", rank=0) + + @staticmethod + def from_string(key: str): + return LogEvent.__dict__[key.lower()] + + +class PerfLogger: + + _singleton = None + + def __init__(self, rank: int, + level: Union[str, PerfLogLevel]=PerfLogLevel.SUBMITTION, + logger: logging.Logger=None): + self.rank = rank + + if isinstance(level, str): + level = PerfLogLevel.from_string(level) + self.level = level + + if logger is None: + logger = logging.Logger(LogKeys.default_logger_name) + + self.logger = logger + + self.previous_log_time = None + + @property + def _current_time_ms(self): + current = int(time.time() * 1e3) + self.previous_log_time = current + return current + + def init_logger(self, submitter: str, model: str, config_path: str, config: dict, *args, **kwargs): + message = { + LogKeys.submmiter: submitter, + LogKeys.model: model, + LogKeys.config_path: config_path, + LogKeys.config: config + } + + self.log(LogEvent.submitted_info, message, *args, **kwargs) + + + def log(self, event: Union[str, LogEventField], message: Optional[Union[str, dict]]=None, *args, **kwargs): + if isinstance(event, str): + event = LogEvent.from_string(event) + + show_log = any([ + event.rank == 0 and self.rank == 0, + event.rank == -1, + ]) and any([ + event.level == PerfLogLevel.SUBMITTION, + event.level == self.level + ]) + + if not show_log: + return + + stacklevel = 1 + if "stacklevel" in kwargs: + stacklevel = kwargs.pop("stacklevel") + + call_info = self.get_caller(stacklevel=stacklevel) + + message = self._encode_message(event, message, call_info) + self.logger.log(self.level.value, message, *args, **kwargs) + + def _encode_message(self, event: LogEventField, + message: Union[str, dict], + call_info: Tuple[str, int]) -> str: + if isinstance(message, str): + message ={LogKeys.other_message: message} + message = OrderedDict({ + LogKeys.event: event.name, + LogKeys.value: message + }) + called_file, lineno = call_info + metadata = { + LogKeys.called_log_file: called_file, + LogKeys.called_log_file_lineno: lineno, + LogKeys.time_ms: self._current_time_ms, + LogKeys.rank: self.rank + } + + message[LogKeys.metadata] = metadata + message = json.dumps(message) + + return self._log_template(message) + + def _log_template(self, message: str): + return LogKeys.log_template.format(header=LogKeys.log_header, message=message) + + def get_caller(self, stacklevel=1) -> Tuple[str, int]: + f = currentframe() + + if stacklevel == 0: + default_file_name = f.f_code.co_filename + default_lineno = f.f_lineno + return (default_file_name, default_lineno) + + # On some versions of IronPython, currentframe() returns None if + # IronPython isn't run with -X:Frames. + if f is not None: + f = f.f_back + orig_f = f + while f and stacklevel > 1: + f = f.f_back + stacklevel -= 1 + if not f: + f = orig_f + rv = ("(unknown file)", -1) + + while hasattr(f, "f_code"): + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + rv = (co.co_filename, f.f_lineno) + break + return rv + + + @classmethod + def get_default_logger(cls, rank: int=-1, + level: Union[str, PerfLogLevel]=PerfLogLevel.SUBMITTION, + logger: logging.Logger=None): + if cls._singleton is None: + cls._singleton = cls(rank=rank, level=level, logger=logger) + + return cls._singleton + + + + + + + + + + + + + + + diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/paths.py b/nlp/language_model/bert_sample/pytorch/base/utils/paths.py new file mode 100644 index 000000000..e99480ff7 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/paths.py @@ -0,0 +1,18 @@ +import os.path as ospath + + +MODEL_DIR = ospath.abspath( + ospath.join( + __file__, + "../../../" + ) +) + +CURRENT_MODEL_NAME = ospath.basename(MODEL_DIR) + +PROJ_DIR = ospath.abspath( + ospath.join( + MODEL_DIR, + "../../" + ) +) diff --git a/nlp/language_model/bert_sample/pytorch/base/utils/tokenization.py b/nlp/language_model/bert_sample/pytorch/base/utils/tokenization.py new file mode 100644 index 000000000..f9f96f788 --- /dev/null +++ b/nlp/language_model/bert_sample/pytorch/base/utils/tokenization.py @@ -0,0 +1,428 @@ +# coding=utf-8 +# Copyright 2020 MLBenchmark Group. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import unicodedata + +from absl import flags +import six +import tensorflow.compat.v1 as tf + +FLAGS = flags.FLAGS + +flags.DEFINE_bool( + "preserve_unused_tokens", False, + "If True, Wordpiece tokenization will not be applied to words in the vocab." +) + +_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$") + + +def preserve_token(token, vocab): + """Returns True if the token should forgo tokenization and be preserved.""" + if not FLAGS.preserve_unused_tokens: + return False + if token not in vocab: + return False + return bool(_UNUSED_TOKEN_RE.search(token)) + + +def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): + """Checks whether the casing config is consistent with the checkpoint name.""" + + # The casing has to be passed in by the user and there is no explicit check + # as to whether it matches the checkpoint. The casing information probably + # should have been stored in the bert_config.json file, but it's not, so + # we have to heuristically detect it to validate. + + if not init_checkpoint: + return + + m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) + if m is None: + return + + model_name = m.group(1) + + lower_models = [ + "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", + "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" + ] + + cased_models = [ + "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", + "multi_cased_L-12_H-768_A-12" + ] + + is_bad_config = False + if model_name in lower_models and not do_lower_case: + is_bad_config = True + actual_flag = "False" + case_name = "lowercased" + opposite_flag = "True" + + if model_name in cased_models and do_lower_case: + is_bad_config = True + actual_flag = "True" + case_name = "cased" + opposite_flag = "False" + + if is_bad_config: + raise ValueError( + "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " + "However, `%s` seems to be a %s model, so you " + "should pass in `--do_lower_case=%s` so that the fine-tuning matches " + "how the model was pre-training. If this error is wrong, please " + "just comment out this check." % (actual_flag, init_checkpoint, + model_name, case_name, opposite_flag)) + + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode("utf-8") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with tf.gfile.GFile(vocab_file, "r") as reader: + while True: + token = convert_to_unicode(reader.readline()) + if not token: + break + token = token.strip() + if token not in vocab: + vocab[token] = len(vocab) + return vocab + + +def convert_by_vocab(vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + +def convert_tokens_to_ids(vocab, tokens): + return convert_by_vocab(vocab, tokens) + + +def convert_ids_to_tokens(inv_vocab, ids): + return convert_by_vocab(inv_vocab, ids) + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class FullTokenizer(object): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True): + self.vocab = load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, vocab=self.vocab) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + if preserve_token(token, self.vocab): + split_tokens.append(token) + continue + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_tokens_to_ids(self, tokens): + return convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return convert_by_vocab(self.inv_vocab, ids) + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, do_lower_case=True, vocab=tuple()): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + vocab: A container of tokens to not mutate during tokenization. + """ + self.do_lower_case = do_lower_case + self.vocab = vocab + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = convert_to_unicode(text) + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if preserve_token(token, self.vocab): + split_tokens.append(token) + continue + if self.do_lower_case: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenziation.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer. + + Returns: + A list of wordpiece tokens. + """ + + text = convert_to_unicode(text) + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically control characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat in ("Cc", "Cf"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/tests/executables/bert/init_torch.sh b/tests/executables/bert/init_torch.sh new file mode 100644 index 000000000..bc93aa7fd --- /dev/null +++ b/tests/executables/bert/init_torch.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# /*************************************************************************************************** +# * Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# * Copyright Declaration: This software, including all of its code and documentation, +# * except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# * Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# * Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# * CoreX. No user of this software shall have any right, ownership or interest in this software and +# * any use of this software shall be in compliance with the terms and conditions of the End User +# * License Agreement. +# **************************************************************************************************/ + +CURRENT_DIR=$(cd `dirname $0`; pwd) + +set -euox pipefail + +# determine whether the user is root mode to execute this script +prefix_sudo="" +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + echo "User $current_user need to add sudo permission keywords" + prefix_sudo="sudo" +fi + +echo "prefix_sudo= $prefix_sudo" + +source $(cd `dirname $0`; pwd)/../_utils/which_install_tool.sh +if command_exists apt; then + $prefix_sudo apt install -y git numactl +elif command_exists dnf; then + $prefix_sudo dnf install -y git numactl +else + $prefix_sudo yum install -y git numactl +fi + +if [ "$(ulimit -n)" -lt "1048576" ]; then + ulimit -n 1048576 +fi + +# prepare data +cd ${CURRENT_DIR}/../../data/datasets/bert_mini + +if [[ ! -d "${CURRENT_DIR}/../../data/datasets/bert_mini/2048_shards_uncompressed" ]]; then + echo "Unarchive 2048_shards_uncompressed_mini" + tar -zxf 2048_shards_uncompressed_mini.tar.gz +fi +if [[ ! -d "${CURRENT_DIR}/../../data/datasets/bert_mini/eval_set_uncompressed" ]]; then + echo "Unarchive eval_set_uncompressed.tar.gz" + tar -zxf eval_set_uncompressed.tar.gz +fi + +if [[ "$(uname -m)" == "aarch64" ]]; then + set +euox pipefail + source /opt/rh/gcc-toolset-11/enable + set -euox pipefail +fi + + +# install sdk +cd ${CURRENT_DIR}/../../nlp/language_model/bert_sample/pytorch/base +pip3 install -r requirements.txt +$prefix_sudo python3 setup.py install + + + +if [ "$?" != "0" ]; then + echo "init torch : failed." + exit 1 +fi + +echo "init torch : completed." diff --git a/tests/executables/bert/train_bert_default_amp_dist_1x8_torch.sh b/tests/executables/bert/train_bert_default_amp_dist_1x8_torch.sh new file mode 100644 index 000000000..311994b05 --- /dev/null +++ b/tests/executables/bert/train_bert_default_amp_dist_1x8_torch.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# /*************************************************************************************************** +# * Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# * Copyright Declaration: This software, including all of its code and documentation, +# * except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# * Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# * Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# * CoreX. No user of this software shall have any right, ownership or interest in this software and +# * any use of this software shall be in compliance with the terms and conditions of the End User +# * License Agreement. +# **************************************************************************************************/ + +set -euox pipefail + +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=10} + +cd ../../nlp/language_model/bert_sample/pytorch/base +if [ "$?" != "0" ]; then + echo "train status: fail." + exit 1 +fi + + +bash run_training.sh \ +--name default \ +--config V100x1x8 \ +--data_dir ../../../../../../data/datasets/bert_mini/ \ +--max_steps 500 \ +--train_batch_size ${BATCH_SIZE} \ +--target_mlm_accuracy 0.33 \ +--init_checkpoint "../../../../../../data/datasets/bert_mini/model.ckpt-28252.apex.pt" + +if [ "$?" != "0" ]; then + echo "train status: fail." + exit 1 +fi + +echo "train status: pass." diff --git a/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_torch.sh b/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_torch.sh new file mode 100644 index 000000000..1a5a8e0e3 --- /dev/null +++ b/tests/executables/bert/train_bert_pretraining_amp_dist_1x8_torch.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# /*************************************************************************************************** +# * Copyright (c) 2022 Iluvatar CoreX. All rights reserved. +# * Copyright Declaration: This software, including all of its code and documentation, +# * except for the third-party software it contains, is a copyrighted work of Shanghai Iluvatar CoreX +# * Semiconductor Co., Ltd. and its affiliates ("Iluvatar CoreX") in accordance with the PRC Copyright +# * Law and relevant international treaties, and all rights contained therein are enjoyed by Iluvatar +# * CoreX. No user of this software shall have any right, ownership or interest in this software and +# * any use of this software shall be in compliance with the terms and conditions of the End User +# * License Agreement. +# **************************************************************************************************/ + +set -euox pipefail + +: ${BATCH_SIZE:=27} + +cd ../../nlp/language_model/bert_sample/pytorch/base/ +if [ "$?" != "0" ]; then + echo "ERROR: ../../nlp/language_model/bert_sample/pytorch/base/ not exist." + exit 1 +fi + +master_port=22233 +bash run_training.sh --name iluvatar --config 03V100x1x8 --train_batch_size ${BATCH_SIZE} --data_dir ../../../../../../data/datasets/bert_mini/ --master_port $master_port +if [ "$?" != "0" ]; then + echo "eval result: fail." + exit 1 +fi + +echo "eval result: pass." -- Gitee From 513a6d7a94a8633855807de46e14d8556be5da60 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 15:31:58 +0800 Subject: [PATCH 17/18] sync yolov5 all --- tests/executables/yolov5/init_torch.sh | 78 +++++++++++++++++++ .../yolov5/train_yolov5s_coco_amp_torch.sh | 22 ++++++ 2 files changed, 100 insertions(+) create mode 100644 tests/executables/yolov5/init_torch.sh create mode 100644 tests/executables/yolov5/train_yolov5s_coco_amp_torch.sh diff --git a/tests/executables/yolov5/init_torch.sh b/tests/executables/yolov5/init_torch.sh new file mode 100644 index 000000000..8a26be22b --- /dev/null +++ b/tests/executables/yolov5/init_torch.sh @@ -0,0 +1,78 @@ +#!/bin/bash +#This script is to check if needed package is installed, also download dataset and pre-trained weights if not exist. +. ../_utils/install_pip_pkgs.sh + +CURRENT_MODEL_DIR=$(cd `dirname $0`; pwd) +PROJ_DIR="${CURRENT_MODEL_DIR}/../../" +PROJECT_DATA="${PROJ_DIR}/data/datasets" +MODEL_ZOO_DIR="${PROJ_DIR}/data/model_zoo" + +pkgs=('requests' 'matplotlib' 'numpy' 'Pillow' 'scipy' 'tqdm' 'seaborn' 'pandas' 'thop' 'opencv-python' 'pycocotools' '--ignore-installed PyYAML') +install_pip_pkgs "${pkgs[@]}" + +pip3 install tqdm==4.62.1 + +git clone https://gitee.com/deep-spark/deepsparkhub-gpl.git + +cd ${PROJ_DIR}/deepsparkhub-gpl/cv/detection/yolov5-sample/pytorch + +# Remove exist datas +if [[ -d "./datasets/coco128" ]]; then + rm -rf ./datasets/coco128 +fi + +if [[ -d "./datasets/coco" ]]; then + rm -rf ./datasets/coco +fi + +if [[ -d "./weights" ]]; then + rm -rf ./weights +fi +mkdir "weights" + +if [[ -d "./datasets" ]]; then + rm ./datasets +fi + +# Build datas +if [[ ! -d "datasets" ]]; then + echo "ln -s ${PROJECT_DATA} ./datasets" + ln -s ${PROJECT_DATA} ./datasets +fi + +if [[ ! -d "${PROJECT_DATA}/coco128" ]];then + if [ -f "${PROJECT_DATA}/coco128.tgz" ]; then + echo "Unarchive coco128.tgz" + tar zxf "${PROJECT_DATA}/coco128.tgz" -C ./datasets/ + else + echo "Error: Not found ${PROJECT_DATA}/coco128.tgz!" + fi +else + echo "Warning: coco128 exist!" +fi + +if [[ -d "${PROJECT_DATA}/coco2017" ]];then + if [[ -f "${PROJECT_DATA}/coco2017labels.zip" ]]; then + echo "Unarchive coco2017labels.zip" + unzip -q -d ./datasets/ "${PROJECT_DATA}/coco2017labels.zip" + + echo "ln -s ${PROJECT_DATA}/coco2017/train2017 ./datasets/coco/images/" + ln -s ${PROJECT_DATA}/coco2017/train2017 ./datasets/coco/images/ + + echo "ln -s ${PROJECT_DATA}/coco2017/val2017 ./datasets/coco/images/" + ln -s ${PROJECT_DATA}/coco2017/val2017 ./datasets/coco/images/ + else + echo "Error: Not found ${PROJECT_DATA}/coco2017labels.zip!" + fi +else + echo "Warning: Not found coco2017!" +fi + +if [[ -f "${MODEL_ZOO_DIR}/yolov5s.pt" ]];then + echo "ln -s ${MODEL_ZOO_DIR}/yolov5s.pt ./weights" + ln -s ${MODEL_ZOO_DIR}/yolov5s.pt ./weights +fi + +if [[ -f "/opt/rh/gcc-toolset-11/enable" ]];then + source /opt/rh/gcc-toolset-11/enable +fi \ No newline at end of file diff --git a/tests/executables/yolov5/train_yolov5s_coco_amp_torch.sh b/tests/executables/yolov5/train_yolov5s_coco_amp_torch.sh new file mode 100644 index 000000000..471532b6a --- /dev/null +++ b/tests/executables/yolov5/train_yolov5s_coco_amp_torch.sh @@ -0,0 +1,22 @@ +CURRENT_DIR=$(cd `dirname $0`; pwd) + +source ../_utils/global_environment_variables.sh + +: ${BATCH_SIZE:=8} + +ROOT_DIR=${CURRENT_DIR}/../.. + +EXIT_STATUS=0 +check_status() +{ + if ((${PIPESTATUS[0]} != 0)); then + EXIT_STATUS=1 + fi +} + +cd "${ROOT_DIR}/deepsparkhub-gpl/cv/detection/yolov5-sample/pytorch" +ixdltest-check --nonstrict_mode_args="--epoch 1" -b 0.2 --run_script \ +python3 train.py --img-size 640 --batch-size ${BATCH_SIZE} \ + --cfg ./models/yolov5s.yaml --weights ./weights/yolov5s.pt --data ./data/coco.yaml --amp ${nonstrict_mode_args} "$@"; check_status + +exit ${EXIT_STATUS} -- Gitee From c4b5c1b95baf3cd6e1832bab940289b7dd870aab Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 26 Sep 2025 15:47:39 +0800 Subject: [PATCH 18/18] sync _utils script --- .../_utils/fix_import_sklearn_error.sh | 23 +++++++++++++++ ...x_import_sklearn_error_libgomp_d22c30c5.sh | 13 +++++++++ .../_utils/init_classification_paddle.sh | 29 +++++++++++++++++++ .../_utils/init_detection_torch.sh | 14 +++++++++ .../_utils/init_segmentation_torch.sh | 14 +++++++++ .../_utils/init_tf_cnn_benckmark.sh | 26 +++++++++++++++++ .../set_paddle_environment_variables.sh | 2 ++ .../executables/_utils/which_install_tool.sh | 24 +++++++++++++++ 8 files changed, 145 insertions(+) create mode 100644 tests/executables/_utils/fix_import_sklearn_error.sh create mode 100644 tests/executables/_utils/fix_import_sklearn_error_libgomp_d22c30c5.sh create mode 100644 tests/executables/_utils/init_classification_paddle.sh create mode 100644 tests/executables/_utils/init_detection_torch.sh create mode 100644 tests/executables/_utils/init_segmentation_torch.sh create mode 100644 tests/executables/_utils/init_tf_cnn_benckmark.sh create mode 100644 tests/executables/_utils/set_paddle_environment_variables.sh create mode 100644 tests/executables/_utils/which_install_tool.sh diff --git a/tests/executables/_utils/fix_import_sklearn_error.sh b/tests/executables/_utils/fix_import_sklearn_error.sh new file mode 100644 index 000000000..a2662c753 --- /dev/null +++ b/tests/executables/_utils/fix_import_sklearn_error.sh @@ -0,0 +1,23 @@ +sys_name_str=`uname -a` +if [[ "${sys_name_str}" =~ "aarch64" ]]; then + if [ -z "$LD_PRELOAD" ]; then + ligo=`find /usr -iname "libgomp.so.1"` + for path in $ligo; do + if [[ "${path}" =~ "libgomp.so.1" ]]; then + export LD_PRELOAD="${path}" + echo "Set LD_PRELOAD="${path}"" + break + fi + done + + ligo=`find /usr -iname "libgomp-d22c30c5.so.1.0.0"` + for path in $ligo; do + if [[ "${path}" =~ "libgomp-d22c30c5.so.1.0.0" ]]; then + export LD_PRELOAD="${path}" + echo "Set LD_PRELOAD="${path}"" + break + fi + done + + fi +fi \ No newline at end of file diff --git a/tests/executables/_utils/fix_import_sklearn_error_libgomp_d22c30c5.sh b/tests/executables/_utils/fix_import_sklearn_error_libgomp_d22c30c5.sh new file mode 100644 index 000000000..3bf7013bf --- /dev/null +++ b/tests/executables/_utils/fix_import_sklearn_error_libgomp_d22c30c5.sh @@ -0,0 +1,13 @@ +sys_name_str=`uname -a` +if [[ "${sys_name_str}" =~ "aarch64" ]]; then + if [ -z "$LD_PRELOAD" ]; then + ligo=`find /usr -iname "libgomp-d22c30c5.so.1.0.0"` + for path in $ligo; do + if [[ "${path}" =~ "libgomp-d22c30c5.so.1.0.0" ]]; then + export LD_PRELOAD="${path}" + echo "Set LD_PRELOAD="${path}"" + break + fi + done + fi +fi \ No newline at end of file diff --git a/tests/executables/_utils/init_classification_paddle.sh b/tests/executables/_utils/init_classification_paddle.sh new file mode 100644 index 000000000..52edc3c7d --- /dev/null +++ b/tests/executables/_utils/init_classification_paddle.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +if [ -n "$1" ]; then + _UTILS_DIR=$1 +else + _UTILS_DIR='../_utils' +fi + +# Install packages +. $_UTILS_DIR/install_pip_pkgs.sh + +pkgs=('scipy' 'scikit-learn==0.23.2' 'opencv-python' 'tqdm' "visualdl==2.3.0") + + +PY_VERSION=$(python3 -V 2>&1|awk '{print $2}'|awk -F '.' '{print $2}') +if [ "$PY_VERSION" == "10" ]; +then + pkgs=('scipy' 'scikit-learn==1.1.0' 'opencv-python' 'tqdm' "visualdl==2.3.0") + echo "$pkgs" +elif [ "$PY_VERSION" == "11" ]; +then + pkgs=('scipy' 'scikit-learn==1.3.1' 'opencv-python' 'tqdm' "visualdl==2.3.0") + echo "$pkgs" +else + pkgs=('scipy' 'scikit-learn==0.24.0' 'opencv-python' 'tqdm' "visualdl==2.3.0") + echo "$pkgs" +fi + +install_pip_pkgs "${pkgs[@]}" diff --git a/tests/executables/_utils/init_detection_torch.sh b/tests/executables/_utils/init_detection_torch.sh new file mode 100644 index 000000000..8661bebc9 --- /dev/null +++ b/tests/executables/_utils/init_detection_torch.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -n "$1" ]; then + _UTILS_DIR=$1 +else + _UTILS_DIR='../_utils' +fi + +# Install packages +. $_UTILS_DIR/install_pip_pkgs.sh + +pkgs=('scipy' 'matplotlib' 'pycocotools' 'opencv-python' 'easydict' 'tqdm') + +install_pip_pkgs "${pkgs[@]}" \ No newline at end of file diff --git a/tests/executables/_utils/init_segmentation_torch.sh b/tests/executables/_utils/init_segmentation_torch.sh new file mode 100644 index 000000000..d3bae05d0 --- /dev/null +++ b/tests/executables/_utils/init_segmentation_torch.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [ -n "$1" ]; then + _UTILS_DIR=$1 +else + _UTILS_DIR='../_utils' +fi + +# Install packages +. $_UTILS_DIR/install_pip_pkgs.sh + +pkgs=('scipy==1.12.0' 'matplotlib' 'pycocotools' 'opencv-python' 'easydict' 'tqdm') + +install_pip_pkgs "${pkgs[@]}" \ No newline at end of file diff --git a/tests/executables/_utils/init_tf_cnn_benckmark.sh b/tests/executables/_utils/init_tf_cnn_benckmark.sh new file mode 100644 index 000000000..105325e96 --- /dev/null +++ b/tests/executables/_utils/init_tf_cnn_benckmark.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +CURRENT_DIR=$(cd `dirname $0`; pwd) +ROOT_DIR=${CURRENT_DIR}/../.. +ROOT_DIR=${CURRENT_DIR}/../.. +DATA_DIR=${ROOT_DIR}/data/datasets/imagenette_tfrecord + +if [ -n "$1" ]; then + _UTILS_DIR=$1 +else + _UTILS_DIR='../_utils' +fi + +# Install packages +. $_UTILS_DIR/install_pip_pkgs.sh + +pkgs=('absl-py') + +install_pip_pkgs "${pkgs[@]}" +pip3 install ${_UTILS_DIR}/../../data/packages/DLLogger-1.0.0-py3-none-any.whl + + +if [ ! -d "${DATA_DIR}" ]; then + cd ${ROOT_DIR}/data/datasets/ + tar -xzvf imagenette_tfrecord.tgz +fi diff --git a/tests/executables/_utils/set_paddle_environment_variables.sh b/tests/executables/_utils/set_paddle_environment_variables.sh new file mode 100644 index 000000000..f01b471b2 --- /dev/null +++ b/tests/executables/_utils/set_paddle_environment_variables.sh @@ -0,0 +1,2 @@ +export FLAGS_cudnn_exhaustive_search=True +export FLAGS_cudnn_batchnorm_spatial_persistent=True diff --git a/tests/executables/_utils/which_install_tool.sh b/tests/executables/_utils/which_install_tool.sh new file mode 100644 index 000000000..1110638d2 --- /dev/null +++ b/tests/executables/_utils/which_install_tool.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +# install template +# determine whether the user is root mode to execute this script +# prefix_sudo="" +# current_user=$(whoami) +# if [ "$current_user" != "root" ]; then +# echo "User $current_user need to add sudo permission keywords" +# prefix_sudo="sudo" +# fi +# +# echo "prefix_sudo= $prefix_sudo" +# +# if command_exists apt; then +# $prefix_sudo apt install -y +# elif command_exists dnf; then +# $prefix_sudo dnf install -y +# else +# $prefix_sudo yum install -y +# fi -- Gitee